code
stringlengths 1
199k
|
|---|
"""Operations for clipping (gradient, weight) tensors to min/max values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from tensorflow.python.framework import ops
from tensorflow.python.framework import types
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import math_ops
def clip_by_value(t, clip_value_min, clip_value_max,
name=None):
"""Clips tensor values to a specified min and max.
Given a tensor `t`, this operation returns a tensor of the same type and
shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`.
Any values less than `clip_value_min` are set to `clip_value_min`. Any values
greater than `clip_value_max` are set to `clip_value_max`.
Args:
t: A `Tensor`.
clip_value_min: A 0-D (scalar) `Tensor`. The minimum value to clip by.
clip_value_max: A 0-D (scalar) `Tensor`. The maximum value to clip by.
name: A name for the operation (optional).
Returns:
A clipped `Tensor`.
"""
with ops.op_scope([t, clip_value_min, clip_value_max], name,
"clip_by_value") as name:
t = ops.convert_to_tensor(t, name="t")
# Go through list of tensors, for each value in each tensor clip
t_min = math_ops.minimum(
t, array_ops.fill(array_ops.shape(t), clip_value_max))
t_max = math_ops.maximum(
t_min, array_ops.fill(array_ops.shape(t), clip_value_min),
name=name)
return t_max
def clip_by_norm(t, clip_norm, name=None):
"""Clips tensor values to a maximum L2-norm.
Given a tensor `t`, and a maximum clip value `clip_norm`, this operation
normalizes `t` so that its L2-norm is less than or equal to `clip_norm'.
Specifically, if the L2-norm is already less than or equal to `clip_norm`,
then `t` is not modified. If the L2-norm is greater than `clip_norm`, then
this operation returns a tensor of the same type and shape as `t` with its
values set to:
`t * clip_norm / l2norm(t)`
In this case, the L2-norm of the output tensor is `clip_norm`.
This operation is typically used to clip gradients before applying them with
an optimizer.
Args:
t: A `Tensor`.
clip_norm: A 0-D (scalar) `Tensor` > 0. A maximum clipping value.
name: A name for the operation (optional).
Returns:
A clipped `Tensor`.
"""
with ops.op_scope([t, clip_norm], name, "clip_by_norm") as name:
t = ops.convert_to_tensor(t, name="t")
# Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm
l2norm_inv = math_ops.rsqrt(
math_ops.reduce_sum(t * t, math_ops.range(array_ops.rank(t))))
tclip = array_ops.identity(t * clip_norm * math_ops.minimum(
l2norm_inv, constant_op.constant(1.0 / clip_norm)), name=name)
return tclip
def global_norm(t_list, name=None):
"""Computes the global norm of multiple tensors.
Given a tuple or list of tensors `t_list`, this operation returns the
global norm of the elements in all tensors in `t_list`. The global norm is
computed as:
`global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))`
Any entries in `t_list` that are of type None are ignored.
Args:
t_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None.
name: A name for the operation (optional).
Returns:
A 0-D (scalar) `Tensor` of type `float`.
Raises:
TypeError: If `t_list` is not a sequence.
"""
if (not isinstance(t_list, collections.Sequence)
or isinstance(t_list, six.string_types)):
raise TypeError("t_list should be a sequence")
t_list = list(t_list)
with ops.op_scope(t_list, name, "global_norm") as name:
values = [
ops.convert_to_tensor(
t.values if isinstance(t, ops.IndexedSlices) else t,
name="t_%d" % i)
if t is not None else t
for i, t in enumerate(t_list)]
squared_norms = array_ops.pack(
[math_ops.reduce_sum(v * v) for v in values if v])
norm = math_ops.sqrt(
math_ops.reduce_sum(squared_norms), name="global_norm")
return norm
def clip_by_global_norm(t_list, clip_norm, use_norm=None, name=None):
"""Clips values of multiple tensors by the ratio of the sum of their norms.
Given a tuple or list of tensors `t_list`, and a clipping ratio `clip_norm`,
this operation returns a list of clipped tensors `list_clipped`
and the global norm (`global_norm`) of all tensors in `t_list`. Optionally,
if you've already computed the global norm for `t_list`, you can specify
the global norm with `use_norm`.
To perform the clipping, the values t_list[i] are set to:
`t_list[i] * clip_norm / max(global_norm, clip_norm)`
where:
`global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))`
If `clip_norm > global_norm` then the entries in `t_list` remain as they are,
otherwise they're all shrunk by the global ratio.
Any of the entries of `t_list` that are of type None are ignored.
This is the correct way to perform gradient clipping (for example, see
R. Pascanu, T. Mikolov, and Y. Bengio, "On the difficulty of training
Recurrent Neural Networks". http://arxiv.org/abs/1211.5063)
However, it is slower than `clip_by_norm()` because all the parameters must be
ready before the clipping operation can be performed.
Args:
t_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None.
clip_norm: A 0-D (scalar) `Tensor` > 0. The clipping ratio.
use_norm: A 0-D (scalar) `Tensor` of type `float` (optional). The global
norm to use. If not provided, `global_norm()` is used to compute the norm.
name: A name for the operation (optional).
Returns:
list_clipped: A list of `Tensors` of the same type as `list_t`.
global_norm: A 0-D (scalar) `Tensor` representing the global norm.
Raises:
TypeError: If `t_list` is not a sequence.
"""
if (not isinstance(t_list, collections.Sequence)
or isinstance(t_list, six.string_types)):
raise TypeError("t_list should be a sequence")
t_list = list(t_list)
if use_norm is None:
use_norm = global_norm(t_list, name)
with ops.op_scope(t_list + [clip_norm], name, "clip_by_global_norm") as name:
# Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm
scale = clip_norm * math_ops.minimum(
1.0 / use_norm,
constant_op.constant(1.0 / clip_norm, dtype=use_norm.dtype))
values = [
ops.convert_to_tensor(
t.values if isinstance(t, ops.IndexedSlices) else t,
name="t_%d" % i)
if t is not None else t
for i, t in enumerate(t_list)]
values_clipped = [
array_ops.identity(v * scale, name="%s_%d" % (name, i))
if v is not None else None
for i, v in enumerate(values)]
list_clipped = [
ops.IndexedSlices(c_v, t.indices)
if isinstance(t, ops.IndexedSlices)
else c_v
for (c_v, t) in zip(values_clipped, t_list)]
return list_clipped, use_norm
def clip_by_average_norm(t, clip_norm, name=None):
"""Clips tensor values to a maximum average L2-norm.
Given a tensor `t`, and a maximum clip value `clip_norm`, this operation
normalizes `t` so that its average L2-norm is less than or equal to
`clip_norm'. Specifically, if the average L2-norm is already less than or
equal to `clip_norm`, then `t` is not modified. If the average L2-norm is
greater than `clip_norm`, then this operation returns a tensor of the same
type and shape as `t` with its values set to:
`t * clip_norm / l2norm_avg(t)`
In this case, the average L2-norm of the output tensor is `clip_norm`.
This operation is typically used to clip gradients before applying them with
an optimizer.
Args:
t: A `Tensor`.
clip_norm: A 0-D (scalar) `Tensor` > 0. A maximum clipping value.
name: A name for the operation (optional).
Returns:
A clipped `Tensor`.
"""
with ops.op_scope([t, clip_norm], name, "clip_by_average_norm") as name:
t = ops.convert_to_tensor(t, name="t")
# Calculate L2-norm per element, clip elements by ratio of clip_norm to
# L2-norm per element
n_element = math_ops.cast(array_ops.size(t), types.float32)
l2norm_inv = math_ops.rsqrt(
math_ops.reduce_sum(t * t, math_ops.range(array_ops.rank(t))))
tclip = array_ops.identity(
t * clip_norm * math_ops.minimum(
l2norm_inv * n_element, constant_op.constant(1.0 / clip_norm)),
name=name)
return tclip
|
import socket
HOST, PORT = '', 8888
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_socket.bind((HOST, PORT))
listen_socket.listen(1)
print 'Serving HTTP on port %s ...' % PORT
while True:
client_connection, client_address = listen_socket.accept()
request = client_connection.recv(1024)
print request
http_response = """\
HTTP/1.1 200 OK
Hello, World!
"""
client_connection.sendall(http_response)
client_connection.close()
|
import json
import logging
from typing import List
from typing import Optional
from click import Command
from click import Context
from click import Group
from click import argument
from click import group
from click import option
from click import pass_context
from repokid import CONFIG
from repokid import get_hooks
from repokid.commands.repo import _repo_all_roles
from repokid.commands.repo import _repo_role
from repokid.commands.repo import _repo_stats
from repokid.commands.repo import _rollback_role
from repokid.commands.role import _display_role
from repokid.commands.role import _display_roles
from repokid.commands.role import _find_roles_with_permissions
from repokid.commands.role import _remove_permissions_from_roles
from repokid.commands.role_cache import _update_role_cache
from repokid.commands.schedule import _cancel_scheduled_repo
from repokid.commands.schedule import _schedule_repo
from repokid.commands.schedule import _show_scheduled_roles
from repokid.types import RepokidConfig
logger = logging.getLogger("repokid")
def _generate_default_config(filename: str = "") -> RepokidConfig:
"""
Generate and return a config dict; will write the config to a file if a filename is provided
Args:
filename (string): Name of file to write the generated config (represented in JSON)
Returns:
dict: Template for Repokid config as a dictionary
"""
config_dict = {
"query_role_data_in_batch": False,
"batch_processing_size": 100,
"filter_config": {
"AgeFilter": {"minimum_age": 90},
"BlocklistFilter": {
"all": [],
"blocklist_bucket": {
"bucket": "<BLOCKLIST_BUCKET>",
"key": "<PATH/blocklist.json>",
"account_number": "<S3_blocklist_account>",
"region": "<S3_blocklist_region",
"assume_role": "<S3_blocklist_assume_role>",
},
},
"ExclusiveFilter": {
"all": ["<GLOB_PATTERN>"],
"<ACCOUNT_NUMBER>": ["<GLOB_PATTERN>"],
},
},
"active_filters": [
"repokid.filters.age:AgeFilter",
"repokid.filters.lambda:LambdaFilter",
"repokid.filters.blocklist:BlocklistFilter",
"repokid.filters.optout:OptOutFilter",
],
"aardvark_api_location": "<AARDVARK_API_LOCATION>",
"connection_iam": {
"assume_role": "RepokidRole",
"session_name": "repokid",
"region": "us-east-1",
},
"dynamo_db": {
"assume_role": "RepokidRole",
"account_number": "<DYNAMO_TABLE_ACCOUNT_NUMBER>",
"endpoint": "<DYNAMO_TABLE_ENDPOINT (http://localhost:8000 if using docker compose)>",
"region": "<DYNAMO_TABLE_REGION>",
"session_name": "repokid",
},
"hooks": ["repokid.hooks.loggers"],
"logging": {
"version": 1,
"disable_existing_loggers": "False",
"formatters": {
"standard": {
"format": "%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]"
},
"json": {"class": "json_log_formatter.JSONFormatter"},
},
"handlers": {
"file": {
"class": "logging.handlers.RotatingFileHandler",
"level": "INFO",
"formatter": "standard",
"filename": "repokid.log",
"maxBytes": 10485760,
"backupCount": 100,
"encoding": "utf8",
},
"json_file": {
"class": "logging.handlers.RotatingFileHandler",
"level": "INFO",
"formatter": "json",
"filename": "repokid.json",
"maxBytes": 10485760,
"backupCount": 100,
"encoding": "utf8",
},
"console": {
"class": "logging.StreamHandler",
"level": "INFO",
"formatter": "standard",
"stream": "ext://sys.stdout",
},
},
"loggers": {
"repokid": {
"handlers": ["file", "json_file", "console"],
"level": "INFO",
}
},
},
"opt_out_period_days": 90,
"dispatcher": {
"session_name": "repokid",
"region": "us-west-2",
"to_rr_queue": "COMMAND_QUEUE_TO_REPOKID_URL",
"from_rr_sns": "RESPONSES_FROM_REPOKID_SNS_ARN",
},
"repo_requirements": {
"oldest_aa_data_days": 5,
"exclude_new_permissions_for_days": 14,
},
"repo_schedule_period_days": 7,
"warnings": {"unknown_permissions": False},
}
if filename:
try:
with open(filename, "w") as f:
json.dump(config_dict, f, indent=4, sort_keys=True)
except OSError as e:
print(f"Unable to open {filename} for writing: {e}")
else:
print(f"Successfully wrote sample config to {filename}")
return config_dict
class AliasedGroup(Group):
"""AliasedGroup provides backward compatibility with the previous Repokid CLI commands"""
def get_command(self, ctx: Context, cmd_name: str) -> Optional[Command]:
rv = Group.get_command(self, ctx, cmd_name)
if rv:
return rv
dashed = cmd_name.replace("_", "-")
for cmd in self.list_commands(ctx):
if cmd == dashed:
return Group.get_command(self, ctx, cmd)
return None
@group(cls=AliasedGroup)
@pass_context
def cli(ctx: Context) -> None:
ctx.ensure_object(dict)
if not CONFIG:
config = _generate_default_config()
else:
config = CONFIG
ctx.obj["config"] = config
ctx.obj["hooks"] = get_hooks(config.get("hooks", ["repokid.hooks.loggers"]))
@cli.command()
@argument("filename")
@pass_context
def config(ctx: Context, filename: str) -> None:
_generate_default_config(filename=filename)
@cli.command()
@argument("account_number")
@pass_context
def update_role_cache(ctx: Context, account_number: str) -> None:
config = ctx.obj["config"]
hooks = ctx.obj["hooks"]
_update_role_cache(account_number, config, hooks)
@cli.command()
@argument("account_number")
@option("--inactive", is_flag=True, default=False, help="Include inactive roles")
@pass_context
def display_role_cache(ctx: Context, account_number: str, inactive: bool) -> None:
_display_roles(account_number, inactive=inactive)
@cli.command()
@argument("permissions", nargs=-1)
@option("--output", "-o", required=False, help="File to write results to")
@pass_context
def find_roles_with_permissions(
ctx: Context, permissions: List[str], output: str
) -> None:
_find_roles_with_permissions(permissions, output)
@cli.command()
@argument("permissions", nargs=-1)
@option("--role-file", "-f", required=True, help="File to read roles from")
@option("--commit", "-c", is_flag=True, default=False, help="Commit changes")
@pass_context
def remove_permissions_from_roles(
ctx: Context, permissions: List[str], role_file: str, commit: bool
) -> None:
config = ctx.obj["config"]
hooks = ctx.obj["hooks"]
_remove_permissions_from_roles(permissions, role_file, config, hooks, commit=commit)
@cli.command()
@argument("account_number")
@argument("role_name")
@pass_context
def display_role(ctx: Context, account_number: str, role_name: str) -> None:
config = ctx.obj["config"]
_display_role(account_number, role_name, config)
@cli.command()
@argument("account_number")
@argument("role_name")
@option("--commit", "-c", is_flag=True, default=False, help="Commit changes")
@pass_context
def repo_role(ctx: Context, account_number: str, role_name: str, commit: bool) -> None:
config = ctx.obj["config"]
hooks = ctx.obj["hooks"]
_repo_role(account_number, role_name, config, hooks, commit=commit)
@cli.command()
@argument("account_number")
@argument("role_name")
@option("--selection", "-s", required=True, type=int)
@option("--commit", "-c", is_flag=True, default=False, help="Commit changes")
@pass_context
def rollback_role(
ctx: Context,
account_number: str,
role_name: str,
selection: int,
commit: bool,
) -> None:
config = ctx.obj["config"]
hooks = ctx.obj["hooks"]
_rollback_role(
account_number, role_name, config, hooks, selection=selection, commit=commit
)
@cli.command()
@argument("account_number")
@option("--commit", "-c", is_flag=True, default=False, help="Commit changes")
@pass_context
def repo_all_roles(ctx: Context, account_number: str, commit: bool) -> None:
config = ctx.obj["config"]
hooks = ctx.obj["hooks"]
logger.info("Updating role data")
_update_role_cache(account_number, config, hooks)
_repo_all_roles(account_number, config, hooks, commit=commit, scheduled=False)
@cli.command()
@argument("account_number")
@pass_context
def schedule_repo(ctx: Context, account_number: str) -> None:
config = ctx.obj["config"]
hooks = ctx.obj["hooks"]
logger.info("Updating role data")
_update_role_cache(account_number, config, hooks)
_schedule_repo(account_number, config, hooks)
@cli.command()
@argument("account_number")
@pass_context
def show_scheduled_roles(ctx: Context, account_number: str) -> None:
_show_scheduled_roles(account_number)
@cli.command()
@argument("account_number")
@option("--role", "-r", required=False, type=str)
@option("--all", "-a", is_flag=True, default=False, help="cancel for all roles")
@pass_context
def cancel_scheduled_repo(
ctx: Context, account_number: str, role: str, all: bool
) -> None:
_cancel_scheduled_repo(account_number, role_name=role, is_all=all)
@cli.command()
@argument("account_number")
@option("--commit", "-c", is_flag=True, default=False, help="Commit changes")
@pass_context
def repo_scheduled_roles(ctx: Context, account_number: str, commit: bool) -> None:
config = ctx.obj["config"]
hooks = ctx.obj["hooks"]
_update_role_cache(account_number, config, hooks)
_repo_all_roles(account_number, config, hooks, commit=commit, scheduled=True)
@cli.command()
@argument("account_number")
@option("--output", "-o", required=True, help="File to write results to")
@pass_context
def repo_stats(ctx: Context, account_number: str, output: str) -> None:
_repo_stats(output, account_number=account_number)
if __name__ == "__main__":
cli()
|
"""
Copyright 2014-2016 University of Illinois
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
file: contact/admin.py
Author: Jon Gunderson
"""
from __future__ import absolute_import
from django.contrib import admin
from contact.models import Contact
from contact.models import Announcement
class ContactAdmin(admin.ModelAdmin):
list_display = ('user', 'topic', 'date','status' )
list_filter = ('user', 'date', 'topic', 'status')
admin.site.register(Contact, ContactAdmin)
class AnnouncementAdmin(admin.ModelAdmin):
list_display = ('topic', 'date', 'scope', 'end_date', 'status', 'email', 'web' )
list_filter = ('topic', 'scope')
admin.site.register( Announcement, AnnouncementAdmin)
|
'''
cnt = sum_over_x, sum_over_y (x * y)
= sum_over_x (x) * sum_over_y (y)
= [(1 + width) * width / 2] * [(1 + length) * length / 2]
'''
def rect_counter(width, length):
cnt = 0
for x in range(1, width + 1):
for y in range(1, length + 1):
cnt += x * y
return cnt
def rect_counter_2(width, length):
return sum(range(1, width + 1)) * sum(range(1, length + 1))
def rect_counter_3(width, length):
return (1 + width) * width * (1 + length) * length // 4
print(rect_counter(2, 3))
print(rect_counter_2(2, 3)) # faster
print(rect_counter_3(2, 3)) # fastest
def get_approximate_width(length, cnt):
a = 1
b = 1
c = -cnt * 4 // ((1 + length) * length)
delta = (b ** 2 - 4 * a * c) ** .5
return map(int, filter(lambda x: x > 2,
[(-b + delta) // (2 * a), (-b - delta) // (2 * a)]))
def abs_diff(width, length, cnt):
return abs(cnt - rect_counter_3(width, length))
def get_width_with_min_cnt(length, cnt):
d = {}
for approx_width in get_approximate_width(length, cnt):
for width in range(approx_width - 3, approx_width + 4):
d[abs_diff(width, length, cnt)] = width
min_diff = min(d.keys())
return d[min_diff], min_diff
cnt = int(2e6)
d = {}
for length in range(1, 55):
width, min_diff = get_width_with_min_cnt(length, cnt)
d[min_diff] = [width, length]
min_diff = min(d.keys())
width, length = d[min_diff]
print(width, length, width * length)
|
import base
import requests
import unittest
import versa_plugin.operations
import get_configuration as get_conf
requests.packages.urllib3.disable_warnings()
pool = """
instance:
name: $pool_name
ip-address: 1.2.3.4
"""
organization = """
organization:
name: $cms_name
resource-pool:
instances:
- $pool_name
org-networks:
org-network:
- name: test_net1
subnet: 10.1.0.0
mask: 255.255.255.0
ipaddress-allocation-mode: manual
- name: test_net2
subnet: 10.2.0.0
mask: 255.255.255.0
ipaddress-allocation-mode: manual
"""
class ConnectorTestCase(base.BaseTest):
def add_pool(self, pool_name, **kwargs):
""" Add pool """
self.assertFalse(get_conf.pool(pool_name))
versa_plugin.operations.create_resource_pool()
self.assertTrue(get_conf.pool(pool_name))
def delete_pool(self, pool_name, **kwargs):
""" Delete pool """
self.assertTrue(get_conf.pool(pool_name))
versa_plugin.operations.delete_resource_pool()
self.assertFalse(get_conf.pool(pool_name))
def add_organization(self, cms_name, **kwargs):
""" Add organization """
self.assertFalse(get_conf.cms_organization(cms_name))
versa_plugin.operations.create_cms_local_organization()
self.assertTrue(get_conf.cms_organization(cms_name))
def delete_organization(self, cms_name, **kwargs):
""" Delete organization """
self.assertTrue(get_conf.cms_organization(cms_name))
versa_plugin.operations.delete_cms_local_organization()
self.assertFalse(get_conf.cms_organization(cms_name))
# @unittest.skip("")
def test_resource_pool(self):
pool_name = self.gen_name('pool')
self.add_to_sequence(self.add_pool,
self.delete_pool,
pool,
pool_name=pool_name)
self.run_sequence()
# @unittest.skip("")
def test_organization(self):
pool_name = self.gen_name('pool')
cms_name = self.gen_name('cms')
self.add_to_sequence(self.add_pool,
self.delete_pool,
pool,
pool_name=pool_name)
self.add_to_sequence(self.add_organization,
self.delete_organization,
organization,
cms_name=cms_name,
pool_name=pool_name)
self.run_sequence()
|
"""Test cases for eager execution using XLA."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import convolutional
from tensorflow.python.layers import pooling
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import googletest
from tensorflow.python.training import adam
class EagerTest(xla_test.XLATestCase):
def testBasic(self):
with self.test_scope():
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
def testGradientTape(self):
with self.test_scope():
x = constant_op.constant(1.0)
y = constant_op.constant(10.0)
with backprop.GradientTape(persistent=True) as tape:
tape.watch(x)
tape.watch(y)
a = x + y + x * y
da_dx = tape.gradient(a, x)
da_dy = tape.gradient(a, y)
self.assertEqual(11.0, da_dx.numpy())
self.assertEqual(2.0, da_dy.numpy())
def testExecuteListOutputLen0(self):
with self.test_scope():
empty = constant_op.constant([], dtype=dtypes.float32)
result = array_ops.unstack(empty, 0)
self.assertTrue(isinstance(result, list))
self.assertEqual(0, len(result))
def testExecuteListOutputLen1(self):
with self.test_scope():
split_dim = constant_op.constant(1)
value = constant_op.constant([[0., 1., 2.], [3., 4., 5.]])
result = array_ops.split(value, 1, axis=split_dim)
self.assertTrue(isinstance(result, list))
self.assertEqual(1, len(result))
self.assertAllEqual([[0, 1, 2], [3, 4, 5]], result[0])
def testExecuteListOutputLen3(self):
with self.test_scope():
split_dim = constant_op.constant(1)
value = constant_op.constant([[0., 1., 2.], [3., 4., 5.]])
result = array_ops.split(value, 3, axis=split_dim)
self.assertTrue(isinstance(result, list))
self.assertEqual(3, len(result))
self.assertAllEqual([[0], [3]], result[0])
self.assertAllEqual([[1], [4]], result[1])
self.assertAllEqual([[2], [5]], result[2])
def testBasicGraph(self):
# Run some ops eagerly
with self.test_scope():
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
# Run some ops graphly
with context.graph_mode(), self.test_session() as sess:
with self.test_scope():
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, sess.run(product))
def testDegenerateSlices(self):
with self.test_scope():
npt = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)
t = constant_op.constant(npt)
# degenerate by offering a forward interval with a negative stride
self.assertAllEqual(npt[0:-1:-1, :, :], t[0:-1:-1, :, :])
# degenerate with a reverse interval with a positive stride
self.assertAllEqual(npt[-1:0, :, :], t[-1:0, :, :])
# empty interval in every dimension
self.assertAllEqual(npt[-1:0, 2:2, 2:3:-1], t[-1:0, 2:2, 2:3:-1])
def testIdentity(self):
with self.test_scope():
self.assertAllEqual(2, array_ops.identity(2))
def testIdentityOnVariable(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(True)
i = array_ops.identity(v)
self.assertAllEqual(True, i.numpy())
def testAssignAddVariable(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(1.0)
v.assign_add(2.0)
self.assertEqual(3.0, v.numpy())
def testReadAssignRead(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(1.0)
val1 = v.read_value()
v.assign_add(2.0)
val2 = v.read_value()
self.assertEqual(1.0, val1.numpy())
self.assertEqual(3.0, val2.numpy())
def testGradient(self):
def f(x):
return x
with self.test_scope():
grad_fn = backprop.gradients_function(f)
self.assertAllEqual(2., grad_fn(1., dy=2.)[0])
def testVariableGradient(self):
with self.test_scope():
v0 = resource_variable_ops.ResourceVariable(1.0)
def f():
x = v0 * v0
return x
grads = backprop.implicit_grad(f)()
self.assertEqual(2., grads[0][0].numpy())
def testMultipleVariableReads(self):
# This test makes sure consecutive variable reads don't copy
# the underlying memory.
with self.test_scope():
# Create 128MiB variables
var = resource_variable_ops.ResourceVariable(
array_ops.ones([32, 1024, 1024]))
# Read the same variable 100 times. If the underlying tensor
# is not copied, this is a trivial operation. If it is copied,
# this will eat over 13GB and OOM.
values = []
for _ in range(100):
values.append(var.value())
# The shape, shape_n, size, and rank are tested here because their
# execution kernels (as opposed to compilation only tf2xla kernels)
# are distincts from tf2xla kernels.
def testShape(self):
def const(value):
return array_ops.shape(
constant_op.constant(value)).numpy()
def ones(value):
return array_ops.shape(
array_ops.ones(value)).numpy()
with self.test_scope():
# Shapes of directly constructed tensors
self.assertAllEqual([], const(3))
self.assertAllEqual([3], const([1.0, 2.0, 3.0]))
self.assertAllEqual([2, 2], const([[1.0, 2.0], [3.0, 4.0]]))
self.assertAllEqual([2, 1, 2], const([[[1.0, 2.0]], [[3.0, 4.0]]]))
# Shapes of tensors created by op running on device
# We make this distinction because directly constructed tensors
# are treated differently in a few places that can influence shape:
# - they always have on_host_tensor
# - they and their shapes can be cached
# - they end up on device via a copy, instead of as program output
self.assertAllEqual([], ones([]))
self.assertAllEqual([3], ones([3]))
self.assertAllEqual([2, 2], ones([2, 2]))
self.assertAllEqual([2, 1, 2], ones([2, 1, 2]))
def testShapeN(self):
with self.test_scope():
# Shapes of directly constructed tensors
shapes = array_ops.shape_n([
constant_op.constant(1.0),
constant_op.constant([1.0, 2.0, 3.0]),
constant_op.constant([[1.0, 2.0], [3.0, 4.0]])])
self.assertAllEqual(
[[], [3], [2, 2]],
[x.numpy().tolist() for x in shapes])
# Shapes of tensors created by op running on device
shapes = array_ops.shape_n([
array_ops.ones([]),
array_ops.ones([3]),
array_ops.ones([2, 2])])
self.assertAllEqual(
[[], [3], [2, 2]],
[x.numpy().tolist() for x in shapes])
def testSize(self):
with self.test_scope():
self.assertEqual(
1, array_ops.size(constant_op.constant(1.0)).numpy())
self.assertEqual(
3, array_ops.size(constant_op.constant([1.0, 2.0, 3.0])).numpy())
self.assertEqual(
4, array_ops.size(
constant_op.constant([[1.0, 2.0], [3.0, 4.0]])).numpy())
def testRank(self):
with self.test_scope():
self.assertEqual(
0, array_ops.rank(constant_op.constant(1.0)).numpy())
self.assertEqual(
1, array_ops.rank(constant_op.constant([1.0, 2.0, 3.0])).numpy())
self.assertEqual(
2, array_ops.rank(
constant_op.constant([[1.0, 2.0], [3.0, 4.0]])).numpy())
def testAdam(self):
with self.test_scope():
optimizer = adam.AdamOptimizer(0.1)
x = resource_variable_ops.ResourceVariable(10.0)
with backprop.GradientTape() as tape:
y = x * x
dy_dx = tape.gradient(y, x)
optimizer.apply_gradients([(dy_dx, x)])
self.assertAlmostEqual(9.9, x.numpy(), places=3)
def testAdamSparse(self):
with ops.device('/cpu:0'):
# Create 2-D embedding for 3 objects on CPU because sparse/sliced updates
# are not implemented on TPU.
embedding_matrix = resource_variable_ops.ResourceVariable(
array_ops.ones([3, 2]))
with self.test_scope():
with backprop.GradientTape() as tape:
embedding = embedding_ops.embedding_lookup(embedding_matrix, [1])
y = math_ops.reduce_sum(embedding)
dy_dx = tape.gradient(y, embedding_matrix)
self.assertIsInstance(dy_dx, ops.IndexedSlices)
optimizer = adam.AdamOptimizer(0.1)
# The gradient application operations will run on CPU because optimizer
# updates are always collocated with the variable.
optimizer.apply_gradients([(dy_dx, embedding_matrix)])
# This assign_add will run on CPU because when an input to an
# operation is a resource, this operation is placed on the resource's
# device by the eager runtime.
embedding_matrix.assign_add(array_ops.ones([3, 2]))
self.assertAllClose([[2.0, 2.0],
[1.9, 1.9],
[2.0, 2.0]], embedding_matrix.numpy())
class EagerFunctionTest(xla_test.XLATestCase):
def testBasic(self):
with self.test_scope():
matmul = function.defun(math_ops.matmul)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq = matmul(t, t, transpose_a=True)
self.assertAllEqual(sq.numpy().reshape(-1), [10, 14, 14, 20])
def testConv(self):
if 'GPU' in self.device:
# TODO(b/32333178)
self.skipTest('Current implementation of RandomStandardNormal kernel '
'is very slow on GPU, and has been blacklisted.')
with self.test_scope():
data_format = 'channels_last'
conv = convolutional.Conv2D(
filters=1, kernel_size=2, padding='VALID',
data_format=data_format, activation=nn_ops.relu,
kernel_initializer=init_ops.ones_initializer(),
bias_initializer=init_ops.zeros_initializer())
pool = pooling.MaxPooling2D(2, 2, data_format=data_format)
def model(x):
x = conv(x)
return pool(x)
model = function.defun(model)
x = array_ops.ones([1, 4, 4, 1])
y = model(x)
self.assertAllEqual(y.numpy(), [[[[4.]]]])
def testReadVariable(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(1.0)
@function.defun
def f():
return v.read_value()
var = f()
self.assertEqual(1.0, var.numpy())
def testUpdateVariable(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(1.0)
def f(v):
v.assign_add(1.0)
return v
f = function.defun(f)
var = f(v)
self.assertEqual(2.0, var.numpy())
def testAllArgumentKinds(self):
"""Test a complex function that takes different argument kinds.
tf2xla machinery that translates, compiles, and runs defuns
classifies arguments into: compile-time constants, regular tensors,
and resources. This test creates a function with a mix of all these
kinds. Moreover, the order of function arguments is intentionally mixed up.
This also tests the case when the same argument is a compile-time constant
as well as used in an operation that normally expects its inputs to be
in device memory - addition in this case.
"""
with self.test_scope():
def foo(c1, r1, v1, c2, v2, r2):
# c1 and c2 are compile-time constants
# r1 and r2 are regular tensors
# v1 and v2 are resource variables
a = c1 + r1
b = math_ops.cast(c2, dtypes.float32) + v2
c = array_ops.slice(v1, c1, c2)
d = r2 * v2
return a, b, c, d
foo = function.defun(foo)
c1 = [0, 0]
c2 = array_ops.ones([2], dtype=dtypes.int32)
r1 = array_ops.ones([2])
r2 = [[2., 2.], [3., 3.]]
v1 = resource_variable_ops.ResourceVariable([[1., 2.], [3., 4.]])
v2 = resource_variable_ops.ResourceVariable([[10., 20.], [30., 40.]])
a, b, c, d = foo(c1, r1, v1, c2, v2, r2)
self.assertAllEqual([1, 1], a.numpy())
self.assertAllEqual([[11., 21.], [31., 41.]], b.numpy())
self.assertAllEqual([[1.]], c.numpy())
self.assertAllEqual([[20., 40.], [90., 120.]], d.numpy())
def testDefunInGradientTape(self):
with self.test_scope():
v0 = resource_variable_ops.ResourceVariable(5.0)
@function.defun
def f(x):
x = v0 * v0 * x
return x
x = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
y = f(x)
dy = tape.gradient(y, v0)
self.assertEqual(75, y.numpy())
self.assertEqual(30, dy.numpy())
def testGradientTapeInDefun(self):
with self.test_scope():
v0 = resource_variable_ops.ResourceVariable(5.0)
@function.defun
def f():
x = constant_op.constant(1.0)
with backprop.GradientTape() as tape:
y = v0 * x
dy = tape.gradient(y, v0)
return dy
dy = f()
self.assertEqual(1.0, dy.numpy())
def testSliceInDefun(self):
with self.test_scope():
@function.defun
def f(x, y):
return x[0::2, y:, ...]
x = array_ops.ones([2, 3, 4])
y = array_ops.ones([], dtype=dtypes.int32)
with backprop.GradientTape() as tape:
tape.watch(x)
tape.watch(y)
z = f(x, y)
dz = tape.gradient(z, x)
self.assertAllEqual(np.ones([1, 2, 4]), z.numpy())
self.assertAllEqual((2, 3, 4), dz.shape.as_list())
def testNestedDefun(self):
self.skipTest('Nested defuns do not work on TPU at the moment')
with self.test_scope():
@function.defun
def times_two(x):
return 2 * x
@function.defun
def two_x_plus_1(x):
return times_two(x) + 1
x = constant_op.constant([2, 3, 4])
y = two_x_plus_1(x)
self.assertAllEqual([5, 7, 9], y.numpy())
class ExcessivePaddingTest(xla_test.XLATestCase):
"""Test that eager execution works with TPU flattened tensors.
Tensors that would normally be excessively padded when written
to TPU memory are reshaped to 1-D flat tensors.
This test case verifies that such tensors work with eager execution.
The flattening currently only happens on TPU, but tests should work
fine with all backends as flattening is transparent.
"""
def testFromConstant(self):
with self.test_scope():
# Create constant of shape [100, 2, 1]. This tensor would be
# excessively padded on TPU.
tensor = constant_op.constant(100 * [[[10.0], [2.0]]])
# Use reduce_sum since it requires correctly working with
# a particular dimension.
reduced = math_ops.reduce_sum(tensor, axis=1)
self.assertAllEqual(100 * [[12.0]], reduced)
def testFromOperation(self):
with self.test_scope():
tensor = array_ops.ones([3, 100, 2, 2])
reduced = math_ops.reduce_sum(tensor, axis=[0, 2, 3])
self.assertAllEqual(100 * [12.0], reduced)
def testAsFunctionInput(self):
with self.test_scope():
@function.defun
def f(x):
return math_ops.reduce_sum(x, axis=2)
tensor = constant_op.constant(100 * [[[10.0, 2.0]]])
reduced = f(tensor)
self.assertAllEqual(100 * [[12.0]], reduced)
def testAsFunctionOutput(self):
with self.test_scope():
@function.defun
def f(x):
return x * constant_op.constant(100 * [[[10.0, 2.0]]])
y = f(3)
reduced = math_ops.reduce_sum(y, axis=2)
self.assertAllEqual(100 * [[36.0]], reduced)
def multiple_tpus():
devices = context.context().devices()
return len([d for d in devices if 'device:TPU:' in d]) > 1
class MultiDeviceTest(xla_test.XLATestCase):
"""Test running TPU computation on more than one core."""
def testBasic(self):
if not multiple_tpus():
self.skipTest('MultiDeviceTest requires multiple TPU devices.')
# Compute 10 on TPU core 0
with ops.device('device:TPU:0'):
two = constant_op.constant(2)
five = constant_op.constant(5)
ten = two * five
self.assertAllEqual(10, ten)
# Compute 6 on TPU core 1
with ops.device('device:TPU:1'):
two = constant_op.constant(2)
three = constant_op.constant(3)
six = two * three
self.assertAllEqual(6, six)
# Copy 10 and 6 to CPU and sum them
self.assertAllEqual(16, ten + six)
if __name__ == '__main__':
ops.enable_eager_execution(
config=config_pb2.ConfigProto(log_device_placement=True))
googletest.main()
|
import unittest
from unittest import mock
import time
import dbt.exceptions
from dbt.parser.partial import PartialParsing
from dbt.contracts.graph.manifest import Manifest
from dbt.contracts.graph.parsed import ParsedModelNode
from dbt.contracts.files import ParseFileType, SourceFile, SchemaSourceFile, FilePath, FileHash
from dbt.node_types import NodeType
from .utils import normalize
class TestPartialParsing(unittest.TestCase):
def setUp(self):
project_name = 'my_test'
project_root = '/users/root'
model_file = SourceFile(
path=FilePath(project_root=project_root, searched_path='models', relative_path='my_model.sql', modification_time=time.time()),
checksum=FileHash.from_contents('abcdef'),
project_name=project_name,
parse_file_type=ParseFileType.Model,
nodes=['model.my_test.my_model'],
env_vars=[],
)
schema_file = SchemaSourceFile(
path=FilePath(project_root=project_root, searched_path='models', relative_path='schema.yml', modification_time=time.time()),
checksum=FileHash.from_contents('ghijkl'),
project_name=project_name,
parse_file_type=ParseFileType.Schema,
dfy={'version': 2, 'models': [{'name': 'my_model', 'description': 'Test model'}]},
ndp=['model.my_test.my_model'],
env_vars={},
)
self.saved_files = {model_file.file_id: model_file, schema_file.file_id: schema_file}
model_node = self.get_model('my_model')
nodes = { model_node.unique_id: model_node }
self.saved_manifest = Manifest(files=self.saved_files, nodes=nodes)
self.new_files = {
model_file.file_id: SourceFile.from_dict(model_file.to_dict()),
schema_file.file_id: SchemaSourceFile.from_dict(schema_file.to_dict()),
}
self.partial_parsing = PartialParsing(self.saved_manifest, self.new_files)
def get_model(self, name):
return ParsedModelNode(
package_name='my_test',
root_path='/users/root/',
path=f'{name}.sql',
original_file_path=f'models/{name}.sql',
raw_sql='select * from wherever',
name=name,
resource_type=NodeType.Model,
unique_id=f'model.my_test.{name}',
fqn=['my_test', 'models', name],
database='test_db',
schema='test_schema',
alias='bar',
checksum=FileHash.from_contents(''),
patch_path='my_test://' + normalize('models/schema.yml'),
)
def test_simple(self):
# Nothing has changed
self.assertIsNotNone(self.partial_parsing)
self.assertTrue(self.partial_parsing.skip_parsing())
# Change a model file
model_file_id = 'my_test://' + normalize('models/my_model.sql')
self.partial_parsing.new_files[model_file_id].checksum = FileHash.from_contents('xyzabc')
self.partial_parsing.build_file_diff()
self.assertFalse(self.partial_parsing.skip_parsing())
pp_files = self.partial_parsing.get_parsing_files()
# models has 'patch_path' so we expect to see a SchemaParser file listed
schema_file_id = 'my_test://' + normalize('models/schema.yml')
expected_pp_files = {'my_test': {'ModelParser': [model_file_id], 'SchemaParser': [schema_file_id]}}
self.assertEqual(pp_files, expected_pp_files)
expected_pp_dict = {'version': 2, 'models': [{'name': 'my_model', 'description': 'Test model'}]}
schema_file = self.saved_files[schema_file_id]
self.assertEqual(schema_file.pp_dict, expected_pp_dict)
|
from model.film import Film
def test_login_empty(app):
app.session.login(username="", password="")
def test_login_valid(app):
app.session.login(username="admin", password="admin")
app.session.logout()
def test_add_new_movie(app):
app.session.login(username="admin", password="admin")
app.film.click_on_add_movie()
app.film.fill_movie_details(Film(imdbid="10", title="My_Film1", year="1990", duration="94", rating="100", format="strange format", known_as="Also known as brbrbr"))
# app.session.logout() logout still doesn't work
|
"""
The goal of this module is making dvc functional tests setup a breeze. This
includes a temporary dir, initializing git and DVC repos and bootstrapping some
file structure.
The cornerstone of these fixtures is `tmp_dir`, which creates a temporary dir
and changes path to it, it might be combined with `scm` and `dvc` to initialize
empty git and DVC repos. `tmp_dir` returns a Path instance, which should save
you from using `open()`, `os` and `os.path` utils many times:
(tmp_dir / "some_file").write_text("some text")
# ...
assert "some text" == (tmp_dir / "some_file").read_text()
assert (tmp_dir / "some_file").exists()
Additionally it provides `.gen()`, `.scm_gen()` and `.dvc_gen()` methods to
bootstrap a required file structure in a single call:
# Generate a dir with files
tmp_dir.gen({"dir": {"file": "file text", "second_file": "..."}})
# Generate a single file, dirs will be created along the way
tmp_dir.gen("dir/file", "file text")
# Generate + git add
tmp_dir.scm_gen({"file1": "...", ...})
# Generate + git add + git commit
tmp_dir.scm_gen({"file1": "...", ...}, commit="add files")
# Generate + dvc add
tmp_dir.dvc_gen({"file1": "...", ...})
# Generate + dvc add + git commit -am "..."
# This commits stages to git not the generated files.
tmp_dir.dvc_gen({"file1": "...", ...}, commit="add files")
Making it easier to bootstrap things has a supergoal of incentivizing a move
from global repo template to creating everything inplace, which:
- makes all path references local to test, enhancing readability
- allows using telling filenames, e.g. "git_tracked_file" instead of "foo"
- does not create unnecessary files
"""
import os
import pathlib
from textwrap import dedent
import pytest
from dvc.logger import disable_other_loggers
from dvc.testing.tmp_dir import git_init
__all__ = [
"run_copy",
"run_head",
"erepo_dir",
"git_dir",
"git_init",
"git_upstream",
"git_downstream",
]
disable_other_loggers()
@pytest.fixture
def run_copy(tmp_dir, dvc):
tmp_dir.gen(
"copy.py",
(
"import sys, shutil, os\n"
"shutil.copyfile(sys.argv[1], sys.argv[2]) "
"if os.path.isfile(sys.argv[1]) "
"else shutil.copytree(sys.argv[1], sys.argv[2])"
),
)
def run_copy(src, dst, **run_kwargs):
wdir = pathlib.Path(run_kwargs.get("wdir", "."))
wdir = pathlib.Path("../" * len(wdir.parts))
script_path = wdir / "copy.py"
return dvc.run(
cmd=f"python {script_path} {src} {dst}",
outs=[dst],
deps=[src, f"{script_path}"],
**run_kwargs,
)
return run_copy
@pytest.fixture
def run_head(tmp_dir, dvc):
"""Output first line of each file to different file with '-1' appended.
Useful for tracking multiple outputs/dependencies which are not a copy
of each others.
"""
tmp_dir.gen(
{
"head.py": dedent(
"""
import sys
for file in sys.argv[1:]:
with open(file) as f, open(file +"-1","w+") as w:
w.write(f.readline())
"""
)
}
)
script = os.path.abspath(tmp_dir / "head.py")
def run(*args, **run_kwargs):
return dvc.run(
**{
"cmd": "python {} {}".format(script, " ".join(args)),
"outs": [dep + "-1" for dep in args],
"deps": list(args),
**run_kwargs,
}
)
return run
@pytest.fixture
def erepo_dir(make_tmp_dir):
return make_tmp_dir("erepo", scm=True, dvc=True)
@pytest.fixture
def git_dir(make_tmp_dir):
path = make_tmp_dir("git-erepo", scm=True)
path.scm.commit("init repo")
return path
class GitRemote:
def __init__(self, tmp_dir, name, url):
self.tmp_dir = tmp_dir
self.remote = name
self.url = url
@pytest.fixture
def git_upstream(tmp_dir, erepo_dir, git_dir, request):
remote = erepo_dir if "dvc" in request.fixturenames else git_dir
url = "file://{}".format(remote.resolve().as_posix())
tmp_dir.scm.gitpython.repo.create_remote("upstream", url)
return GitRemote(remote, "upstream", url)
@pytest.fixture
def git_downstream(tmp_dir, erepo_dir, git_dir, request):
remote = erepo_dir if "dvc" in request.fixturenames else git_dir
url = "file://{}".format(tmp_dir.resolve().as_posix())
remote.scm.gitpython.repo.create_remote("upstream", url)
return GitRemote(remote, "upstream", url)
|
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.congress import dashboard
class Overview(horizon.Panel):
name = _("Overview")
slug = "overview"
dashboard.Congress.register(Overview)
|
class Solution:
def minCostClimbingStairs(self, cost):
ans = [0]*(len(cost)+1)
for i in range(2, len(cost)+1):
ans[i] = min(ans[i-1]+cost[i-1], ans[i-2]+cost[i-2])
return ans[len(cost)]
print(Solution().minCostClimbingStairs([10, 15, 20]))
print(Solution().minCostClimbingStairs([1, 100, 1, 1, 1, 100, 1, 1, 100, 1]))
|
from __future__ import print_function
from collections import OrderedDict
import sys
from catkin_pkg.package import parse_package_string
from ros_buildfarm.common import get_default_node_label
from ros_buildfarm.common import get_devel_job_name
from ros_buildfarm.common import get_devel_view_name
from ros_buildfarm.common import get_github_project_url
from ros_buildfarm.common import get_node_label
from ros_buildfarm.common \
import get_repositories_and_script_generating_key_files
from ros_buildfarm.common import git_github_orgunit
from ros_buildfarm.common import JobValidationError
from ros_buildfarm.common import write_groovy_script_and_configs
from ros_buildfarm.config import get_distribution_file
from ros_buildfarm.config import get_index as get_config_index
from ros_buildfarm.config import get_source_build_files
from ros_buildfarm.git import get_repository
from ros_buildfarm.templates import expand_template
from rosdistro import get_distribution_cache
from rosdistro import get_index
def configure_devel_jobs(
config_url, rosdistro_name, source_build_name, groovy_script=None,
dry_run=False, whitelist_repository_names=None):
"""
Configure all Jenkins devel jobs.
L{configure_release_job} will be invoked for source repository and target
which matches the build file criteria.
"""
config = get_config_index(config_url)
build_files = get_source_build_files(config, rosdistro_name)
build_file = build_files[source_build_name]
index = get_index(config.rosdistro_index_url)
dist_cache = None
if build_file.notify_maintainers:
dist_cache = get_distribution_cache(index, rosdistro_name)
# get targets
targets = []
for os_name in build_file.targets.keys():
for os_code_name in build_file.targets[os_name].keys():
for arch in build_file.targets[os_name][os_code_name]:
targets.append((os_name, os_code_name, arch))
print('The build file contains the following targets:')
for os_name, os_code_name, arch in targets:
print(' -', os_name, os_code_name, arch)
dist_file = get_distribution_file(index, rosdistro_name, build_file)
if not dist_file:
print('No distribution file matches the build file')
return
devel_view_name = get_devel_view_name(
rosdistro_name, source_build_name, pull_request=False)
pull_request_view_name = get_devel_view_name(
rosdistro_name, source_build_name, pull_request=True)
# all further configuration will be handled by either the Jenkins API
# or by a generated groovy script
from ros_buildfarm.jenkins import connect
jenkins = connect(config.jenkins_url) if groovy_script is None else False
view_configs = {}
views = {}
if build_file.test_commits_force is not False:
views[devel_view_name] = configure_devel_view(
jenkins, devel_view_name, dry_run=dry_run)
if build_file.test_pull_requests_force is not False:
views[pull_request_view_name] = configure_devel_view(
jenkins, pull_request_view_name, dry_run=dry_run)
if not jenkins:
view_configs.update(views)
groovy_data = {
'dry_run': dry_run,
'expected_num_views': len(view_configs),
}
repo_names = dist_file.repositories.keys()
filtered_repo_names = build_file.filter_repositories(repo_names)
devel_job_names = []
pull_request_job_names = []
job_configs = OrderedDict()
for repo_name in sorted(repo_names):
if whitelist_repository_names:
if repo_name not in whitelist_repository_names:
print(
"Skipping repository '%s' not in explicitly passed list" %
repo_name, file=sys.stderr)
continue
is_disabled = repo_name not in filtered_repo_names
if is_disabled and build_file.skip_ignored_repositories:
print("Skipping ignored repository '%s'" % repo_name,
file=sys.stderr)
continue
repo = dist_file.repositories[repo_name]
if not repo.source_repository:
print("Skipping repository '%s': no source section" % repo_name)
continue
if not repo.source_repository.version:
print("Skipping repository '%s': no source version" % repo_name)
continue
job_types = []
# check for testing commits
if build_file.test_commits_force is False:
print(("Skipping repository '%s': 'test_commits' is forced to " +
"false in the build file") % repo_name)
elif repo.source_repository.test_commits is False:
print(("Skipping repository '%s': 'test_commits' of the " +
"repository set to false") % repo_name)
elif repo.source_repository.test_commits is None and \
not build_file.test_commits_default:
print(("Skipping repository '%s': 'test_commits' defaults to " +
"false in the build file") % repo_name)
else:
job_types.append('commit')
if not is_disabled:
# check for testing pull requests
if build_file.test_pull_requests_force is False:
# print(("Skipping repository '%s': 'test_pull_requests' " +
# "is forced to false in the build file") % repo_name)
pass
elif repo.source_repository.test_pull_requests is False:
# print(("Skipping repository '%s': 'test_pull_requests' of " +
# "the repository set to false") % repo_name)
pass
elif repo.source_repository.test_pull_requests is None and \
not build_file.test_pull_requests_default:
# print(("Skipping repository '%s': 'test_pull_requests' " +
# "defaults to false in the build file") % repo_name)
pass
else:
print("Pull request job for repository '%s'" % repo_name)
job_types.append('pull_request')
for job_type in job_types:
pull_request = job_type == 'pull_request'
for os_name, os_code_name, arch in targets:
try:
job_name, job_config = configure_devel_job(
config_url, rosdistro_name, source_build_name,
repo_name, os_name, os_code_name, arch, pull_request,
config=config, build_file=build_file,
index=index, dist_file=dist_file,
dist_cache=dist_cache, jenkins=jenkins, views=views,
is_disabled=is_disabled,
groovy_script=groovy_script,
dry_run=dry_run)
if not pull_request:
devel_job_names.append(job_name)
else:
pull_request_job_names.append(job_name)
if groovy_script is not None:
print("Configuration for job '%s'" % job_name)
job_configs[job_name] = job_config
except JobValidationError as e:
print(e.message, file=sys.stderr)
groovy_data['expected_num_jobs'] = len(job_configs)
groovy_data['job_prefixes_and_names'] = {}
devel_job_prefix = '%s__' % devel_view_name
pull_request_job_prefix = '%s__' % pull_request_view_name
if not whitelist_repository_names:
groovy_data['job_prefixes_and_names']['devel'] = \
(devel_job_prefix, devel_job_names)
groovy_data['job_prefixes_and_names']['pull_request'] = \
(pull_request_job_prefix, pull_request_job_names)
if groovy_script is None:
# delete obsolete jobs in these views
from ros_buildfarm.jenkins import remove_jobs
print('Removing obsolete devel jobs')
remove_jobs(
jenkins, devel_job_prefix, devel_job_names, dry_run=dry_run)
print('Removing obsolete pull request jobs')
remove_jobs(
jenkins, pull_request_job_prefix, pull_request_job_names,
dry_run=dry_run)
if groovy_script is not None:
print(
"Writing groovy script '%s' to reconfigure %d views and %d jobs" %
(groovy_script, len(view_configs), len(job_configs)))
content = expand_template(
'snippet/reconfigure_jobs.groovy.em', groovy_data)
write_groovy_script_and_configs(
groovy_script, content, job_configs, view_configs=view_configs)
def configure_devel_job(
config_url, rosdistro_name, source_build_name,
repo_name, os_name, os_code_name, arch,
pull_request=False,
config=None, build_file=None,
index=None, dist_file=None, dist_cache=None,
jenkins=None, views=None,
is_disabled=False,
groovy_script=None,
source_repository=None,
build_targets=None,
dry_run=False):
"""
Configure a single Jenkins devel job.
This includes the following steps:
- clone the source repository to use
- clone the ros_buildfarm repository
- write the distribution repository keys into files
- invoke the release/run_devel_job.py script
"""
if config is None:
config = get_config_index(config_url)
if build_file is None:
build_files = get_source_build_files(config, rosdistro_name)
build_file = build_files[source_build_name]
# Overwrite build_file.targets if build_targets is specified
if build_targets is not None:
build_file.targets = build_targets
if index is None:
index = get_index(config.rosdistro_index_url)
if dist_file is None:
dist_file = get_distribution_file(index, rosdistro_name, build_file)
if not dist_file:
raise JobValidationError(
'No distribution file matches the build file')
repo_names = dist_file.repositories.keys()
if repo_name is not None:
if repo_name not in repo_names:
raise JobValidationError(
"Invalid repository name '%s' " % repo_name +
'choose one of the following: %s' %
', '.join(sorted(repo_names)))
repo = dist_file.repositories[repo_name]
if not repo.source_repository:
raise JobValidationError(
"Repository '%s' has no source section" % repo_name)
if not repo.source_repository.version:
raise JobValidationError(
"Repository '%s' has no source version" % repo_name)
source_repository = repo.source_repository
if os_name not in build_file.targets.keys():
raise JobValidationError(
"Invalid OS name '%s' " % os_name +
'choose one of the following: ' +
', '.join(sorted(build_file.targets.keys())))
if os_code_name not in build_file.targets[os_name].keys():
raise JobValidationError(
"Invalid OS code name '%s' " % os_code_name +
'choose one of the following: ' +
', '.join(sorted(build_file.targets[os_name].keys())))
if arch not in build_file.targets[os_name][os_code_name]:
raise JobValidationError(
"Invalid architecture '%s' " % arch +
'choose one of the following: %s' % ', '.join(sorted(
build_file.targets[os_name][os_code_name])))
if dist_cache is None and build_file.notify_maintainers:
dist_cache = get_distribution_cache(index, rosdistro_name)
if jenkins is None:
from ros_buildfarm.jenkins import connect
jenkins = connect(config.jenkins_url)
if views is None:
view_name = get_devel_view_name(
rosdistro_name, source_build_name, pull_request=pull_request)
configure_devel_view(jenkins, view_name, dry_run=dry_run)
job_name = get_devel_job_name(
rosdistro_name, source_build_name,
repo_name, os_name, os_code_name, arch, pull_request)
job_config = _get_devel_job_config(
config, rosdistro_name, source_build_name,
build_file, os_name, os_code_name, arch, source_repository,
repo_name, pull_request, job_name, dist_cache=dist_cache,
is_disabled=is_disabled)
# jenkinsapi.jenkins.Jenkins evaluates to false if job count is zero
if isinstance(jenkins, object) and jenkins is not False:
from ros_buildfarm.jenkins import configure_job
configure_job(jenkins, job_name, job_config, dry_run=dry_run)
return job_name, job_config
def configure_devel_view(jenkins, view_name, dry_run=False):
from ros_buildfarm.jenkins import configure_view
return configure_view(
jenkins, view_name, include_regex='%s__.+' % view_name,
template_name='dashboard_view_devel_jobs.xml.em', dry_run=dry_run)
def _get_devel_job_config(
config, rosdistro_name, source_build_name,
build_file, os_name, os_code_name, arch, source_repo_spec,
repo_name, pull_request, job_name, dist_cache=None,
is_disabled=False):
template_name = 'devel/devel_job.xml.em'
repository_args, script_generating_key_files = \
get_repositories_and_script_generating_key_files(build_file=build_file)
maintainer_emails = set([])
if build_file.notify_maintainers and dist_cache and repo_name and \
repo_name in dist_cache.distribution_file.repositories:
# add maintainers listed in latest release to recipients
repo = dist_cache.distribution_file.repositories[repo_name]
if repo.release_repository:
for pkg_name in repo.release_repository.package_names:
if pkg_name not in dist_cache.release_package_xmls:
continue
pkg_xml = dist_cache.release_package_xmls[pkg_name]
pkg = parse_package_string(pkg_xml)
for m in pkg.maintainers:
maintainer_emails.add(m.email)
job_priority = \
build_file.jenkins_commit_job_priority \
if not pull_request \
else build_file.jenkins_pull_request_job_priority
job_data = {
'github_url': get_github_project_url(source_repo_spec.url),
'job_priority': job_priority,
'node_label': get_node_label(
build_file.jenkins_job_label,
get_default_node_label('%s_%s_%s' % (
rosdistro_name, 'devel', source_build_name))),
'pull_request': pull_request,
'source_repo_spec': source_repo_spec,
'disabled': is_disabled,
# this should not be necessary
'job_name': job_name,
'github_orgunit': git_github_orgunit(source_repo_spec.url),
'ros_buildfarm_repository': get_repository(),
'script_generating_key_files': script_generating_key_files,
'rosdistro_index_url': config.rosdistro_index_url,
'rosdistro_name': rosdistro_name,
'source_build_name': source_build_name,
'os_name': os_name,
'os_code_name': os_code_name,
'arch': arch,
'repository_args': repository_args,
'notify_compiler_warnings': build_file.notify_compiler_warnings,
'notify_emails': build_file.notify_emails,
'maintainer_emails': maintainer_emails,
'notify_maintainers': build_file.notify_maintainers,
'notify_committers': build_file.notify_committers,
'notify_pull_requests': build_file.notify_pull_requests,
'timeout_minutes': build_file.jenkins_job_timeout,
'git_ssh_credential_id': config.git_ssh_credential_id,
}
job_config = expand_template(template_name, job_data)
return job_config
|
from flask import Flask
from flask.ext.restful import Api
from werkzeug.serving import WSGIRequestHandler
from mongoengine import connect
from models import Tweet, Topic
from models.routing import register_api_model
from collecting.routes import (CollectorListResource,
CollectorResource, CollectorPollingResource)
import config
app = Flask(__name__, static_url_path='')
api = Api(app, prefix='/api')
connect(config.db_name, host=config.db_host, port=config.db_port,
username=config.db_user, password=config.db_pass)
@app.route('/')
def index():
return app.send_static_file('index.html')
register_api_model(api, Topic)
register_api_model(api, Tweet)
api.add_resource(CollectorListResource, '/collectors')
api.add_resource(CollectorResource, '/collectors/<topic_pk>')
api.add_resource(CollectorPollingResource, '/collector_polling/<topic_pk>')
class SafeRequestHandler(WSGIRequestHandler):
def connection_dropped(self, error, environ=None):
print 'connection dropped'
if __name__ == '__main__':
import sys
app.run(debug='--debug' in sys.argv, request_handler=SafeRequestHandler)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create an appraiser based on IDF scores of Matterport3D categories."""
from absl import app
from absl import flags
from crafty import appraise
from crafty import mp3d
from crafty import vln_data
flags.DEFINE_integer('notable_boost', 0,
'Extra credit for hand curated notable items.')
flags.DEFINE_integer('boring_penalty', 0,
'Penalty to apply to hand curated boring items.')
flags.DEFINE_float(
'occlusion_threshold', -1,
'The threshold for whether one object is occluded by another. Default (-1)'
'indicates no occlusion. 0.3 is a good value for occluding objects.')
flags.DEFINE_string('r2r_basedir', '/path/to/matterport_data',
'Path to Room-to-Room data.')
flags.DEFINE_string('output_file', None,
'Output file to save Riveter information.')
flags.mark_flag_as_required('output_file')
FLAGS = flags.FLAGS
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
basedir = FLAGS.r2r_basedir
mp_data = mp3d.MatterportData(basedir, FLAGS.occlusion_threshold)
print('Loading R2R training dataset.')
dataset = vln_data.load_split(basedir, 'train')
print('Collecting pano objects.')
all_scans = set([ex.scan for ex in dataset])
pano_objects = mp_data.get_per_pano_objects(all_scans)
print('Calculating IDF scores.')
appraiser = appraise.create_idf_appraiser(pano_objects, FLAGS.notable_boost,
FLAGS.boring_penalty,
FLAGS.occlusion_threshold)
print('Saving appraiser.')
appraiser.to_file(FLAGS.output_file)
if __name__ == '__main__':
app.run(main)
|
'''
Created on October 25th, 2014
Subscribe to a resource, connect to the notification channel of an mDS instance and receive
notifications from the subscribed resource
Process the notifications and filter a set of endpoints and a particular resource path. Index the
resource value from the notification and use it to actuate an indicator.
@author: mjkoster
'''
if __name__ == '__main__' :
import httplib
import json
from urlparse import urlparse
import base64
httpServer = 'http://smartobjectservice.com:8080'
httpDomain = 'domain'
resourcePathBase = '/' + httpDomain + '/endpoints'
subscribeURI = '/3302/0/5500'
#actuateURI = '/11101/0/5901'
actuateURI = '/11100/0/5900' # use the chainable LED or LED strip
baseURL = httpServer + resourcePathBase
username = 'connected-home'
password = 'secret'
auth = base64.encodestring('%s:%s' % (username, password)).replace('\n', '')
ep_names = []
def discoverEndpoints(basePath):
uriObject = urlparse(basePath)
print 'discoverEP : ' + basePath
httpConnection = httplib.HTTPConnection(uriObject.netloc)
httpConnection.request('GET', uriObject.path, headers= \
{"Accept" : "application/json", "Authorization": ("Basic %s" % auth) })
response = httpConnection.getresponse()
print response.status, response.reason
if response.status == 200:
endpoints = json.loads(response.read())
httpConnection.close()
for endpoint in endpoints:
if endpoint['type'] == 'DEMO' and discoverResources(endpoint['name'], subscribeURI):
ep_names.append(endpoint['name'])
print 'endpoint: ' + endpoint['name']
return ep_names
def discoverResources(endpoint, uri_path):
resources = []
uriObject = urlparse(baseURL + '/' + endpoint)
print 'discoverRES : ' + endpoint
httpConnection = httplib.HTTPConnection(uriObject.netloc)
httpConnection.request('GET', uriObject.path, headers= \
{"Accept" : "application/json", "Authorization": ("Basic %s" % auth) })
response = httpConnection.getresponse()
print response.status, response.reason
if response.status == 200:
resources = json.loads(response.read())
httpConnection.close()
for resource in resources:
if resource['uri'] == uri_path:
print 'resource: ' + resource['uri']
return resource['uri']
else:
return 0
def subscribe(resourceURI):
for ep in ep_names:
path = httpServer + '/' + httpDomain + '/subscriptions' + '/' + ep + subscribeURI + '?sync=true'
print "subscribe: " + path
uriObject = urlparse(path)
httpConnection = httplib.HTTPConnection(uriObject.netloc)
httpConnection.request('PUT', uriObject.path + '?' + uriObject.query, "", \
{"Content-Type" : "application/json", "Authorization": ("Basic %s" % auth)})
response = httpConnection.getresponse()
print response.status, response.reason
httpConnection.close()
def longPoll(channelPath):
print 'poll: ' + channelPath
uriObject = urlparse(channelPath)
httpConnection = httplib.HTTPConnection(uriObject.netloc)
while 1:
httpConnection.request('GET', uriObject.path, headers= \
{"Accept" : "application/json", "Authorization": ("Basic %s" % auth) })
response = httpConnection.getresponse()
print response.status, response.reason
if response.status == 200:
httpBody = response.read()
if len(httpBody) > 0:
handleNotifications(json.loads(httpBody))
def handleNotifications(events):
if 'notifications' in events:
for notification in events['notifications']:
if (notification['ep'] in ep_names) and (notification['path'] == subscribeURI):
process_payload(notification)
def process_payload(notification):
value = base64.b64decode(notification['payload']) #notification payloads are base64 encoded
print "value: ", value
"""
ledString = ""
for led in range(10):
if float(value)/10 > led:
ledString += '1'
else:
ledString += '0'
actuateLED(ledString)
"""
if value == '1':
actuateLED('FF000000')
else:
actuateLED('00000000')
def actuateLED(ledString = ''):
for ep in ep_names:
path = baseURL + '/' + ep + actuateURI
print "actuating: " + path + ", value=" + ledString
uriObject = urlparse(path)
httpConnection = httplib.HTTPConnection(uriObject.netloc)
httpConnection.request('PUT', uriObject.path + '?' + uriObject.query, ledString, \
{"Content-Type" : "application/json", "Authorization": ("Basic %s" % auth)})
response = httpConnection.getresponse()
print response.status, response.reason
httpConnection.close()
"""
Start
"""
print "Started"
discoverEndpoints(baseURL)
subscribe(subscribeURI)
try:
longPoll(httpServer + '/' + httpDomain + '/notification/pull')
except KeyboardInterrupt: pass
print 'got KeyboardInterrupt'
print 'closed'
|
"""
programme: musique_bd.py
- Le programme qui va afficher à l'écran la liste numérotée des artistes dans la BD.
- Le programme va afficher la liste des albums d'un artiste en particulier (en saisissant son numéro au clavier)
- Le fichier input_data.txt contient des données sur des albums de musique. Chaque ligne correspond à un album, avec 3 champs :
le premier champ est le nom de l'artiste;
le deuxième champ est le nom de l'album;
le troisième champ est l'année de publication de l'album. Les champs sont séparés par un |.
- Le programme qui va lire le fichier et ajouter ces albums à la base de données.
- Si l'artiste existe déjà, le programme utlise l'id de l'artiste existant, sinon crée un nouvel artiste.
"""
import sqlite3
connect_db = sqlite3.connect("musique.db")
cursor = connect_db.cursor()
def get_nom_artiste(artiste_id):
sql = "SELECT * FROM artiste WHERE id=?"
cursor.execute(sql, [(artiste_id)])
return cursor.fetchone()[1]
def get_id_artiste(nom):
sql = "SELECT * FROM artiste WHERE nom=?"
cursor.execute(sql, [(nom)])
return cursor.fetchone()[0]
def verifier_existence_artiste(nom):
sql = "SELECT 1 FROM artiste WHERE nom=?"
if cursor.execute(sql, [nom]).fetchone() == None:
return False
else:
return True
def ajouter_album(titre, annee, artiste_id, maison_disque_id):
sql = "INSERT INTO album(titre, annee, artiste_id, maison_disque_id) VALUES(?, ?, ?, ?)"
cursor.executemany(sql, [(titre, annee, artiste_id, maison_disque_id)])
connect_db.commit()
def ajouter_artiste(nom, est_solo, nombre_individus):
if not verifier_existence_artiste(nom):
sql = "INSERT INTO artiste(nom, est_solo, nombre_individus) VALUES(?, ?, ?)"
cursor.executemany(sql, [(nom, est_solo, nombre_individus)])
connect_db.commit()
def lire_input_data():
fichier_input_data = open("input/input_data")
for ligne in fichier_input_data:
ligne = ligne.rstrip().split("|")
ajouter_artiste(ligne[0], 1, 1) # inventez les valeurs que vous ne connaissez pas
ajouter_album(ligne[1],ligne[2], get_id_artiste(ligne[0]), 1) # prenez n'importe quel maison d'édition
fichier_input_data.close()
def afficher_liste_artistes():
cursor.execute("SELECT * FROM artiste")
print "Voici la liste des artistes dans notre Base de Données\n"
for row in cursor:
identifier, artiste_id, est_solo, nombre_individus = row
print "%d - %s" % (identifier, artiste_id)
def afficher_albums_artiste(artiste_id):
print "\nLa liste des albums de %s est:\n" % (get_nom_artiste(artiste_id))
sql1 = "SELECT * FROM album WHERE artiste_id=?"
cursor.execute(sql1, [(artiste_id)])
for row in cursor:
album_id, titre, annee, artiste_id, maison_disque_id = row
print "%s - %d" % (titre, annee)
def menu_bd():
selection= raw_input("""
- 'v' pour consulter les artistes de notre Base de Données
- 'a' pour consulter les albums d'un artiste en particulier
- 'c' pour charger des données d'un fichier - input.txt
- 'q' pour quiter le programme\n
Entrez votre sélection:
""")
while selection != 'q':
if selection == 'v':
afficher_liste_artistes()
elif selection == 'a':
artiste_id = int(raw_input("Entrez le numéro de l'artiste:"))
afficher_albums_artiste(artiste_id)
elif selection == 'c':
lire_input_data()
print "Les données ont été chargés avec succès\n"
selection = raw_input("""
- 'v' pour consulter les artistes de notre Base de Données
- 'a' pour consulter les albums d'un artiste en particulier
- 'c' pour charger des données d'un fichier - input_data.txt
- 'q' pour quiter le programme\n
Entrez votre sélection:
""")
print "FIN NORMAL DU PROGRAMME"
menu_bd()
|
from random import Random
class BloomFilter(object):
def __init__(self, num_bytes, num_probes, iterable=()):
"""Bloom filter implementation
Example: Check if number in set
>>> bf = BloomFilter(8, 2, (123, 321, 213, 3123))
>>> 123 in bf
True
>>> 456 in bf
False
:param num_bytes: Number of bytes in the filter
:type num_bytes: int
:param num_probes: Number of bloom filter probes
:type num_probes: int
:param iterable: Iterable set of items to create the filter from
"""
self._array = bytearray(num_bytes)
self._num_probes = num_probes
self._num_bits = num_bytes * 8
self.update(iterable)
@property
def array(self):
return self._array
def _get_probes(self, key):
random = Random(key).random
return (int(random() * self._num_bits)
for _probe in range(self._num_probes))
def update(self, keys):
"""Add keys to the current filter
:param keys: iterable set of keys
"""
for key in keys:
for i in self._get_probes(key):
self._array[i // 8] |= 2 ** (i % 8)
def __contains__(self, key):
return all(self._array[i // 8] & (2 ** (i % 8))
for i in self._get_probes(key))
|
from setuptools import setup, find_packages
import sys, os
version = '1.0'
setup(name='cephprimarystorage',
version=version,
description="ZStack ceph primary storage",
long_description="""\
ZStack ceph primary storage""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='zstack ceph',
author='Frank Zhang',
author_email='xing5820@gmail.com',
url='http://zstack.org',
license='Apache License 2',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=True,
install_requires=[
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
""",
)
|
'''
'''
Test.Summary = "Test start up of Traffic server with configuration modification of starting port"
Test.SkipUnless(Condition.HasProgram("curl",
"Curl needs to be installed on your system for this test to work"))
ts1 = Test.MakeATSProcess("ts1",select_ports=False)
ts1.Setup.ts.CopyConfig('config/records_8090.config',"records.config")
t = Test.AddTestRun("Test traffic server started properly")
t.StillRunningAfter = ts1
p = t.Processes.Default
p.Command = "curl 127.0.0.1:8090"
p.ReturnCode = 0
p.StartBefore(Test.Processes.ts1, ready = When.PortOpen(8090))
|
"""Copyright (c) <2016> <Yazan Obeidi>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to
do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import numpy as np
import pandas as pd
def kalman_filter(zk, xk, A=np.matrix(1), B=np.matrix(0), Pk=1,
uk=np.array(0), wk=0, Q=0.1, R=1, H=np.matrix(1)):
"""Performs Kalman Filtering on pandas timeseries data.
:param: zk (pandas timeseries): input data
:param: xk (np.array): a priori state estimate vector
:param: A (np.matrix): state transition coefficient matrix
:param: B (np.matrix): control coefficient matrix
:param: Pk (np.matrix): prediction covariance matrix
:param: uk (np.array): control signal vector
:param: wk (float): process noise (has covariance Q)
:param: Q (float): process covariance
:param: R (float): measurement covariance
:param: H (np.matrix): transformation matrix
:return: output (np.array): kalman filtered data
"""
output = np.zeros(len(zk))
for k, t in enumerate(zk.index):
# time update (prediction)
xk = A*xk + B*uk + wk # Predict state
zk_pred = H*xk # Predict measurement
Pk = A*Pk*A.T + Q # Predict error covariance
# measurement update (correction)
vk = zk[t] - zk_pred # Measurement residual
S = (H*Pk*H.T) + R # Prediction covariance
Kk = (Pk*H.T) / S # Compute Kalman Gain
xk = xk + (Kk * vk) # Update estimate with gain * residual
Pk = (1 - Kk*H)*Pk # Update error covariance
output[k] = xk.item()
return output
|
from pathlib import Path
import yaml
__all__ = ("load_config", )
def load_config(settings, user_settings=None, src_files=None):
"""
Negotiate a group of settings from the global defaults, the user-provided
values and config files. The keys in the `settings` argument are the only
settings that are considered valid.
The priority of the values is:
user_settings > src_files > settings
Arguments:
settings (dict):
All the valid settings with its default values.
user_settings (dict):
The settings provided by the user. These overwrite the others.
Any key that is not in the `settings` dict is ignored.
src_files (list of str/Path):
List of YAML files (with extensions) from where to try to read the
default values of the settings provided by the user.
The first one found is the one read, the others are ignored.
These have priority over the global defaults but are overwritten
by the user_settings.
Any key that is not in the `settings` dict is ignored.
Examples:
>>> load_config({"a": 1}, {"a": 2})
{'a': 2}
>>> load_config({"a": 1, "b": 2}, {"b": 3})
{'a': 1, 'b': 3}
>>> load_config({"a": 1}, {"b": 2})
{'a': 1}
"""
assert user_settings or src_files
user_settings = user_settings or {}
user_defaults = _load_user_defaults(src_files)
config = {}
for key, default in settings.items():
value = user_settings.get(key)
if value is not None:
config[key] = value
continue
value = user_defaults.get(key)
if value is not None:
config[key] = value
continue
config[key] = default
return config
def _load_user_defaults(src_files):
for path in src_files or []:
path = Path(path)
if not path.exists():
continue
return yaml.safe_load(path.read_text()) or {}
return {}
|
import logging
import google.auth
from google.auth import iam
from google.auth.credentials import with_scopes_if_required
from google.auth._default import _load_credentials_from_file
from google.auth.transport import requests
from google.oauth2 import service_account
from googleapiclient import discovery
logger = logging.getLogger(__name__)
_TOKEN_URI = 'https://accounts.google.com/o/oauth2/token'
_TOKEN_SCOPE = frozenset(['https://www.googleapis.com/auth/iam'])
def build_service(api, version, credentials_path=None, user_email=None, scopes=None):
"""Build and returns a service object.
Allows delegation of GSuite permissions to the service account when the `user_email` argument is passed.
Args:
api (str): The Admin SDK API to use.
version (str): The Admin SDK API version to use.
credentials_path (str, optional): The path to the service account credentials.
user_email (str): The email of the user. Needs permissions to access the Admin APIs.
scopes (list, optional): A list of scopes to authenticate the request with.
Returns:
Google Service object.
"""
if credentials_path is not None:
logger.info("Getting credentials from file '%s' ...", credentials_path)
credentials, _ = _load_credentials_from_file(credentials_path)
else:
logger.info("Getting default application credentials ...")
credentials, _ = google.auth.default()
if user_email is not None: # make delegated credentials
credentials = _make_delegated_credentials(
credentials,
user_email,
scopes)
return discovery.build(api, version, credentials=credentials)
def _make_delegated_credentials(credentials, user_email, scopes):
"""Make delegated credentials.
Allows a service account to impersonate the user passed in `user_email`,
using a restricted set of scopes.
Args:
credentials (service_account.Credentials): The service account credentials.
user_email (str): The email for the user to impersonate.
scopes (list): A list of scopes.
Returns:
service_account.Credentials: The delegated credentials
"""
request = requests.Request()
credentials = with_scopes_if_required(credentials, _TOKEN_SCOPE)
credentials.refresh(request)
email = credentials.service_account_email
signer = iam.Signer(
request,
credentials,
email)
return service_account.Credentials(
signer,
email,
_TOKEN_URI,
scopes=scopes,
subject=user_email)
|
"""
Sponge Knowledge Base
Triggers - Event pattern
"""
from java.util.concurrent.atomic import AtomicInteger
def onInit():
# Variables for assertions only
sponge.setVariable("countA", AtomicInteger(0))
sponge.setVariable("countAPattern", AtomicInteger(0))
class TriggerA(Trigger):
def onConfigure(self):
self.withEvent("a")
def onRun(self, event):
sponge.getVariable("countA").incrementAndGet()
class TriggerAPattern(Trigger):
def onConfigure(self):
self.withEvent("a.*")
def onRun(self, event):
self.logger.debug("Received matching event {}", event.name)
sponge.getVariable("countAPattern").incrementAndGet()
def onStartup():
for name in ["a", "a1", "a2", "aTest", "b1", "b2", "bTest", "a", "A", "A1" ]:
sponge.event(name).send()
|
"""Unit tests for types module."""
from __future__ import absolute_import
import datetime
import logging
import unittest
import future.tests.base # pylint: disable=unused-import
import mock
try:
from google.cloud.datastore import client
from google.cloud.datastore.helpers import GeoPoint
from apache_beam.io.gcp.datastore.v1new.types import Entity
from apache_beam.io.gcp.datastore.v1new.types import Key
from apache_beam.io.gcp.datastore.v1new.types import Query
from apache_beam.options.value_provider import StaticValueProvider
except ImportError:
client = None
_LOGGER = logging.getLogger(__name__)
@unittest.skipIf(client is None, 'Datastore dependencies are not installed')
class TypesTest(unittest.TestCase):
_PROJECT = 'project'
_NAMESPACE = 'namespace'
def setUp(self):
self._test_client = client.Client(
project=self._PROJECT,
namespace=self._NAMESPACE,
# Don't do any network requests.
_http=mock.MagicMock())
def _assert_keys_equal(self, beam_type, client_type, expected_project):
self.assertEqual(beam_type.path_elements[0], client_type.kind)
self.assertEqual(beam_type.path_elements[1], client_type.id)
self.assertEqual(expected_project, client_type.project)
def testEntityToClientEntity(self):
# Test conversion from Beam type to client type.
k = Key(['kind', 1234], project=self._PROJECT)
kc = k.to_client_key()
exclude_from_indexes = ('datetime', 'key')
e = Entity(k, exclude_from_indexes=exclude_from_indexes)
properties = {
'datetime': datetime.datetime.utcnow(),
'key_ref': Key(['kind2', 1235]),
'bool': True,
'float': 1.21,
'int': 1337,
'unicode': 'text',
'bytes': b'bytes',
'geopoint': GeoPoint(0.123, 0.456),
'none': None,
'list': [1, 2, 3],
'entity': Entity(Key(['kind', 111])),
'dict': {
'property': 5
},
}
e.set_properties(properties)
ec = e.to_client_entity()
self.assertEqual(kc, ec.key)
self.assertSetEqual(set(exclude_from_indexes), ec.exclude_from_indexes)
self.assertEqual('kind', ec.kind)
self.assertEqual(1234, ec.id)
for name, unconverted in properties.items():
converted = ec[name]
if name == 'key_ref':
self.assertNotIsInstance(converted, Key)
self._assert_keys_equal(unconverted, converted, self._PROJECT)
elif name == 'entity':
self.assertNotIsInstance(converted, Entity)
self.assertNotIsInstance(converted.key, Key)
self._assert_keys_equal(unconverted.key, converted.key, self._PROJECT)
else:
self.assertEqual(unconverted, converted)
# Test reverse conversion.
entity_from_client_entity = Entity.from_client_entity(ec)
self.assertEqual(e, entity_from_client_entity)
def testKeyToClientKey(self):
k = Key(['kind1', 'parent'],
project=self._PROJECT,
namespace=self._NAMESPACE)
ck = k.to_client_key()
self.assertEqual(self._PROJECT, ck.project)
self.assertEqual(self._NAMESPACE, ck.namespace)
self.assertEqual(('kind1', 'parent'), ck.flat_path)
self.assertEqual('kind1', ck.kind)
self.assertEqual('parent', ck.id_or_name)
self.assertEqual(None, ck.parent)
k2 = Key(['kind2', 1234], parent=k)
ck2 = k2.to_client_key()
self.assertEqual(self._PROJECT, ck2.project)
self.assertEqual(self._NAMESPACE, ck2.namespace)
self.assertEqual(('kind1', 'parent', 'kind2', 1234), ck2.flat_path)
self.assertEqual('kind2', ck2.kind)
self.assertEqual(1234, ck2.id_or_name)
self.assertEqual(ck, ck2.parent)
def testKeyFromClientKey(self):
k = Key(['k1', 1234], project=self._PROJECT, namespace=self._NAMESPACE)
kfc = Key.from_client_key(k.to_client_key())
self.assertEqual(k, kfc)
k2 = Key(['k2', 'adsf'], parent=k)
kfc2 = Key.from_client_key(k2.to_client_key())
# Converting a key with a parent to a client_key and back loses the parent:
self.assertNotEqual(k2, kfc2)
self.assertTupleEqual(('k1', 1234, 'k2', 'adsf'), kfc2.path_elements)
self.assertIsNone(kfc2.parent)
kfc3 = Key.from_client_key(kfc2.to_client_key())
self.assertEqual(kfc2, kfc3)
kfc4 = Key.from_client_key(kfc2.to_client_key())
kfc4.project = 'other'
self.assertNotEqual(kfc2, kfc4)
def testKeyFromClientKeyNoNamespace(self):
k = Key(['k1', 1234], project=self._PROJECT)
ck = k.to_client_key()
self.assertEqual(None, ck.namespace) # Test that getter doesn't croak.
kfc = Key.from_client_key(ck)
self.assertEqual(k, kfc)
def testKeyToClientKeyMissingProject(self):
k = Key(['k1', 1234], namespace=self._NAMESPACE)
with self.assertRaisesRegex(ValueError, r'project'):
_ = Key.from_client_key(k.to_client_key())
def testQuery(self):
filters = [('property_name', '=', 'value')]
projection = ['f1', 'f2']
order = projection
distinct_on = projection
ancestor_key = Key(['kind', 'id'], project=self._PROJECT)
q = Query(
kind='kind',
project=self._PROJECT,
namespace=self._NAMESPACE,
ancestor=ancestor_key,
filters=filters,
projection=projection,
order=order,
distinct_on=distinct_on)
cq = q._to_client_query(self._test_client)
self.assertEqual(self._PROJECT, cq.project)
self.assertEqual(self._NAMESPACE, cq.namespace)
self.assertEqual('kind', cq.kind)
self.assertEqual(ancestor_key.to_client_key(), cq.ancestor)
self.assertEqual(filters, cq.filters)
self.assertEqual(projection, cq.projection)
self.assertEqual(order, cq.order)
self.assertEqual(distinct_on, cq.distinct_on)
_LOGGER.info('query: %s', q) # Test __repr__()
def testValueProviderFilters(self):
self.vp_filters = [
[(
StaticValueProvider(str, 'property_name'),
StaticValueProvider(str, '='),
StaticValueProvider(str, 'value'))],
[(
StaticValueProvider(str, 'property_name'),
StaticValueProvider(str, '='),
StaticValueProvider(str, 'value')),
('property_name', '=', 'value')],
]
self.expected_filters = [
[('property_name', '=', 'value')],
[('property_name', '=', 'value'), ('property_name', '=', 'value')],
]
for vp_filter, exp_filter in zip(self.vp_filters, self.expected_filters):
q = Query(
kind='kind',
project=self._PROJECT,
namespace=self._NAMESPACE,
filters=vp_filter)
cq = q._to_client_query(self._test_client)
self.assertEqual(exp_filter, cq.filters)
_LOGGER.info('query: %s', q) # Test __repr__()
def testValueProviderNamespace(self):
self.vp_namespace = StaticValueProvider(str, 'vp_namespace')
self.expected_namespace = 'vp_namespace'
q = Query(kind='kind', project=self._PROJECT, namespace=self.vp_namespace)
cq = q._to_client_query(self._test_client)
self.assertEqual(self.expected_namespace, cq.namespace)
_LOGGER.info('query: %s', q) # Test __repr__()
def testQueryEmptyNamespace(self):
# Test that we can pass a namespace of None.
self._test_client.namespace = None
q = Query(project=self._PROJECT, namespace=None)
cq = q._to_client_query(self._test_client)
self.assertEqual(self._test_client.project, cq.project)
self.assertEqual(None, cq.namespace)
if __name__ == '__main__':
unittest.main()
|
'''
Created on Oct 9, 2015
@author: akshah
'''
from bgpDataEngine.bgpDataEngine import bgpDataEngine
from customUtilities.helperFunctions import *
if __name__ == '__main__':
start_time,_=currentTime()
bde=bgpDataEngine()
#Just load files no fetching
mrtFiles = [join('mrtFiles/', f) for f in listdir('mrtFiles/') if isfile(join('mrtFiles/', f)) if join('mrtFiles/',f)[-4:]=='.mrt']
bde.load2DB(mrtFiles)
end_time,_=currentTime()
print('Finished processing in '+str(int((end_time-start_time)/60))+' minutes and '+str(int((end_time-start_time)%60))+' seconds.')
|
"""An overview window showing thumbnails of the image set.
"""
import sys
import math
from PySide import QtCore, QtGui
class ThumbnailWidget(QtGui.QLabel):
def __init__(self, image):
super().__init__()
self.setFrameStyle(self.Box | self.Plain)
self.setLineWidth(4)
palette = self.palette()
frameColor = palette.color(self.backgroundRole())
palette.setColor(self.foregroundRole(), frameColor)
self.setPalette(palette)
self.image = image
self.setImagePixmap()
def _getOverviewWindow(self):
w = self
while not isinstance(w, OverviewWindow):
w = w.parent()
return w
def setImagePixmap(self):
self.setPixmap(self.image.getThumbPixmap())
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
ovwin = self._getOverviewWindow()
ovwin.imageViewer.moveCurrentTo(self.image)
def markActive(self, isActive):
palette = self.palette()
if isActive:
frameColor = QtCore.Qt.blue
else:
frameColor = palette.color(self.backgroundRole())
palette.setColor(self.foregroundRole(), frameColor)
self.setPalette(palette)
class OverviewWindow(QtGui.QMainWindow):
def __init__(self, imageViewer):
super().__init__()
self.imageViewer = imageViewer
self.numcolumns = 4
self.setWindowTitle("Overview")
self.mainLayout = QtGui.QGridLayout()
self._populate()
centralWidget = QtGui.QWidget()
centralWidget.setLayout(self.mainLayout)
scrollArea = QtGui.QScrollArea()
scrollArea.setWidget(centralWidget)
scrollArea.setAlignment(QtCore.Qt.AlignCenter)
self.setCentralWidget(scrollArea)
self.closeAct = QtGui.QAction("&Close", self,
triggered=self.close)
self.fileMenu = QtGui.QMenu("&File", self)
self.fileMenu.addAction(self.closeAct)
self.menuBar().addMenu(self.fileMenu)
# Set the width of the window such that the scrollArea just
# fits. We need to add 24 to the central widget, 20 for the
# vertical scroll bar and 4 for the border.
width = centralWidget.size().width() + 24
size = self.size()
size.setWidth(width)
self.resize(size)
self.activeWidget = None
try:
image = self.imageViewer.selection[self.imageViewer.cur]
self.markActive(image)
except IndexError:
pass
def _populate(self):
"""Populate the mainLayout with thumbnail images.
"""
images = self.imageViewer.selection
ncol = self.numcolumns
c = 0
for i in images:
try:
thumb = ThumbnailWidget(i)
except Exception as e:
print(str(e), file=sys.stderr)
else:
self.mainLayout.addWidget(thumb, c // ncol, c % ncol,
QtCore.Qt.AlignCenter)
c += 1
def updateThumbs(self):
"""Update the mainLayout with thumbnail images.
"""
# Note: this code is based on the assumption that no image
# will ever be added to self.imageViewer.selection and thus we
# only need to consider removing ThumbnailWidgets, but not to
# add any. Furthermore, we assume the order given by
# self.mainLayout.itemAt() is the the order that the widgets
# have been added to self.mainLayout and thus the same as
# self.imageViewer.selection.
numImages = len(self.imageViewer.selection)
for i in range(numImages):
widget = self.mainLayout.itemAt(i).widget()
image = self.imageViewer.selection[i]
if widget.image is not image:
widget.image = image
widget.setImagePixmap()
while self.mainLayout.count() > numImages:
item = self.mainLayout.takeAt(numImages)
item.widget().deleteLater()
def getThumbnailWidget(self, image):
for i in range(self.mainLayout.count()):
w = self.mainLayout.itemAt(i).widget()
if w.image is image:
return w
else:
return None
def markActive(self, image):
if self.activeWidget:
self.activeWidget.markActive(False)
self.activeWidget = self.getThumbnailWidget(image)
if self.activeWidget:
self.activeWidget.markActive(True)
|
import factory
import factory.fuzzy
from .models import BreadLabelValueTestModel, BreadTestModel, BreadTestModel2
class BreadTestModel2Factory(factory.DjangoModelFactory):
FACTORY_FOR = BreadTestModel2
text = factory.fuzzy.FuzzyText(length=10)
class BreadTestModelFactory(factory.DjangoModelFactory):
FACTORY_FOR = BreadTestModel
name = factory.fuzzy.FuzzyText(length=10)
age = factory.fuzzy.FuzzyInteger(low=1, high=99)
other = factory.SubFactory(BreadTestModel2Factory)
class BreadLabelValueTestModelFactory(factory.DjangoModelFactory):
FACTORY_FOR = BreadLabelValueTestModel
name = factory.fuzzy.FuzzyText(length=10)
|
"""Module defines a base class for Pulsar managers using DRMAA."""
import logging
try:
from drmaa import JobState
except (OSError, ImportError, RuntimeError):
JobState = None
from .external import ExternalBaseManager
from ..util.drmaa import DrmaaSessionFactory
from pulsar.managers import status
log = logging.getLogger(__name__)
IGNORE_SUBMISSION_SPEC_MESSAGE = "Submission recieved native_specification but being overridden by manager specification."
class BaseDrmaaManager(ExternalBaseManager):
"""Base class for Pulsar managers using DRMAA."""
def __init__(self, name, app, **kwds):
"""Setup native specification and drmaa session factory."""
super(BaseDrmaaManager, self).__init__(name, app, **kwds)
self.native_specification = kwds.get('native_specification', None)
drmaa_session_factory_class = kwds.get('drmaa_session_factory_class', DrmaaSessionFactory)
drmaa_session_factory = drmaa_session_factory_class()
self.drmaa_session = drmaa_session_factory.get()
def shutdown(self, timeout=None):
"""Cleanup DRMAA session and call shutdown of parent."""
try:
super(BaseDrmaaManager, self).shutdown(timeout)
except Exception:
pass
self.drmaa_session.close()
def _get_status_external(self, external_id):
drmaa_state = self.drmaa_session.job_status(external_id)
return {
JobState.UNDETERMINED: status.COMPLETE,
JobState.QUEUED_ACTIVE: status.QUEUED,
JobState.SYSTEM_ON_HOLD: status.QUEUED,
JobState.USER_ON_HOLD: status.QUEUED,
JobState.USER_SYSTEM_ON_HOLD: status.QUEUED,
JobState.RUNNING: status.RUNNING,
JobState.SYSTEM_SUSPENDED: status.QUEUED,
JobState.USER_SUSPENDED: status.QUEUED,
JobState.DONE: status.COMPLETE,
JobState.FAILED: status.COMPLETE, # Should be a FAILED state here as well
}[drmaa_state]
def _build_template_attributes(self, job_id, command_line, dependencies_description=None, env=[], submit_params={}, setup_params=None):
stdout_path = self._stdout_path(job_id)
stderr_path = self._stderr_path(job_id)
working_directory = self.job_directory(job_id).working_directory()
attributes = {
"remoteCommand": self._setup_job_file(
job_id,
command_line,
dependencies_description=dependencies_description,
env=env,
setup_params=setup_params
),
"jobName": self._job_name(job_id),
"outputPath": ":%s" % stdout_path,
"errorPath": ":%s" % stderr_path,
"workingDirectory": working_directory,
}
submit_native_specification = submit_params.get("native_specification", None)
native_specification = None
if self.native_specification:
native_specification = self.native_specification
if submit_native_specification is not None:
log.warn(IGNORE_SUBMISSION_SPEC_MESSAGE)
elif submit_native_specification:
native_specification = submit_params["native_specification"]
if native_specification is not None:
attributes["nativeSpecification"] = native_specification
log.info("Submitting DRMAA job with nativeSpecification [%s]" % native_specification)
else:
log.debug("No native specification supplied, DRMAA job will be submitted with default parameters.")
return attributes
__all__ = ("BaseDrmaaManager",)
|
from models import logger
from base_service import BaseServiceHandler
import requests
import json
CLIENT_ID = "bt58rttvtqj7f85qqbk752mt"
CLIENT_SECRET = "yHgCNwx2NaXuhqyB73cJ6mcK"
HTTP_METADATA_URL = "https://api.cisco.com/software/v2.0/metadata/"
HTTP_DOWNLOAD_URL = "https://api.cisco.com/software/v2.0/downloads/urls/"
HTTP_EULA_URL = "https://api.cisco.com/software/v2.0/compliance/forms/eula"
HTTP_K9_URL = "https://api.cisco.com/software/v2.0/compliance/forms/k9"
HTTP_DOWNLOAD_STATISTICS_URL = "https://api.cisco.com/software/841/downloads/statistics"
BSD_EXCEPTION_MESSAGE = "exception_message"
BSD_METADATA_TRANS_ID = "metadata_trans_id"
BSD_IMAGE_DETAILS = "image_details"
BSD_IMAGE_NAME = "image_name"
BSD_IMAGE_GUID = "image_guid"
BSD_IMAGE_SIZE = "image_size"
BSD_DOWNLOAD_SESSION_ID = "download_session_id"
BSD_DOWNLOAD_URL = "download_url"
BSD_EULA_FORM = "eula_form_details"
BSD_K9_FORM = "k9_form_details_response"
BSD_FIELD_DETAILS = "field_details"
BSD_FIELD_ID = "field_id"
BSD_FIELD_VALUE = "field_value"
class BSDServiceHandler(BaseServiceHandler):
def __init__(self, username, password, MDF_ID, software_type_ID, PID, image_name):
BaseServiceHandler.__init__(self, username, password)
self.image_name = image_name
self.PID = PID
self.MDF_ID = MDF_ID
self.software_type_ID = software_type_ID
@classmethod
def get_access_token(cls, username, password):
return BaseServiceHandler.get_access_token(username, password, CLIENT_ID, CLIENT_SECRET)
def download(self, output_file_path, callback=None):
access_token = self.get_access_token(self.username, self.password)
UDI = "PID: " + self.PID + " VID: V01 SN: FOX1316G5R5"
response = self.send_meta_data_request(access_token, UDI)
if response is not None:
self.debug_print('response', response.text)
json_text = response.json()
metadata_trans_ID = self.get_json_value(json_text, BSD_METADATA_TRANS_ID)
image_GUID = self.get_json_value(json_text, BSD_IMAGE_GUID)
image_size = self.get_json_value(json_text, BSD_IMAGE_SIZE)
exception_message = self.get_json_value(json_text, BSD_EXCEPTION_MESSAGE)
if exception_message is None:
if metadata_trans_ID is not None and image_GUID is not None:
response = self.send_download_request(access_token, UDI, self.MDF_ID, metadata_trans_ID, image_GUID)
if response is not None:
self.debug_print('response', response.text)
json_text = response.json()
download_url = self.get_json_value(json_text, BSD_DOWNLOAD_URL)
download_session_ID = self.get_json_value(json_text, BSD_DOWNLOAD_SESSION_ID)
# When download_url is null, it may be that the user needs to
# acknowledge the EULA or K9 agreement.
if download_url is None:
eula = self.get_json_value(json_text, BSD_EULA_FORM)
k9 = self.get_json_value(json_text, BSD_K9_FORM)
if eula is not None:
response = self.send_EULA_request(access_token, download_session_ID)
self.debug_print('EULA response', response.text)
elif k9 is not None:
response = self.send_K9_request(access_token, download_session_ID)
self.debug_print('K9 response', response.text)
response = self.send_download_request(access_token, UDI, self.MDF_ID,
metadata_trans_ID, image_GUID)
if response is not None:
self.debug_print('After accepting EULA or K9', response.text)
json_text = response.json()
download_url = self.get_json_value(json_text, BSD_DOWNLOAD_URL)
download_session_ID = self.get_json_value(json_text, BSD_DOWNLOAD_SESSION_ID)
self.debug_print('download_url', download_url)
self.debug_print('download_session', download_session_ID)
if download_url is not None and download_session_ID is not None:
self.send_get_image(access_token, download_url, output_file_path,
self.image_name, image_size, callback)
else:
message = 'User "' + self.username + '" may not have software download privilege on cisco.com.'
raise Exception(message)
else:
logger.error('bsd_service hit exception %s', exception_message)
raise Exception(exception_message)
def send_EULA_request(self, access_token, download_session_ID):
headers = {'Authorization': 'Bearer ' + access_token}
return requests.post(HTTP_EULA_URL + "?download_session_id=" + download_session_ID +
"&user_action=Accepted", headers=headers)
def send_K9_request(self, access_token, download_session_ID):
headers = {'Authorization': 'Bearer ' + access_token}
return requests.post(HTTP_K9_URL + "?download_session_id=" + download_session_ID +
"&user_action=Accepted", headers=headers)
def send_download_statistics(self, access_token, download_session_ID, image_guid, image_size):
headers = {'Authorization': 'Bearer ' + access_token}
return requests.post(HTTP_DOWNLOAD_STATISTICS_URL +
"?download_session_id=" + download_session_ID +
"&image_guid=" + image_guid +
"&download_status=Success" +
"&download_file_size=" + image_size, headers=headers)
def send_get_image(self, access_token, url_string, output_file_path, image_name, image_size, callback=None):
# Segment is 1 MB. For 40 MB files, there will be about 40 updates (i.e. database writes)
chunk_list= get_chunks(int(image_size), int(image_size) / 1048576)
headers = {'Authorization': 'Bearer ' + access_token}
r = requests.get(url_string, headers=headers, stream=True)
total_byte_count = 0
with open(output_file_path, 'wb') as fd:
for chunk in r.iter_content(8192):
fd.write(chunk)
total_byte_count += 8192
if len(chunk_list) > 0 and total_byte_count > chunk_list[0]:
if callback is not None:
callback('Downloading ' + str(total_byte_count) + ' of ' + image_size + ' bytes.')
# Pop the first entry out
del chunk_list[0]
# Create a file which contains the size of the image file.
size_file = open(output_file_path + '.size', 'w')
size_file.write(image_size)
size_file.close()
def send_meta_data_request(self, access_token, UDI):
url_string = HTTP_METADATA_URL + \
"udi/" + UDI + "/" + \
"mdf_id/" + self.MDF_ID + "/" + \
"software_type_id/" + self.software_type_ID + "/" + \
"image_names/" + self.image_name
headers = {'Authorization': 'Bearer ' + access_token}
return requests.get(url_string, headers=headers)
def send_download_request(self, access_token, UDI, MDF_ID, metadata_trans_ID, image_GUID):
url_string = HTTP_DOWNLOAD_URL + \
"udi/" + UDI + "/" + \
"mdf_id/" + MDF_ID + "/" + \
"metadata_trans_id/" + metadata_trans_ID + "/" + \
"image_guids/" + image_GUID;
headers = {'Authorization': 'Bearer ' + access_token}
return requests.get(url_string, headers=headers)
def get_chunks(image_size, segments):
chunk_list = []
if segments == 0:
chunk_list.append(image_size)
else:
chunk = (int)(image_size / segments)
for i in range(int(segments)):
chunk_list.append(chunk * (i + 1))
return chunk_list
if __name__ == '__main__':
print(BSDServiceHandler.get_sn_2_info(BSDServiceHandler.get_access_token("alextang", "xx"), "FOX1316G5R5"))
|
import os
import random
import sys
import time
import conf
import g
from gwis import command
from util_ import misc
log = g.log.getLogger('cmd.lmrk.plog')
class Op_Handler(command.Op_Handler):
__slots__ = (
'trial_num',
'node_id',
'p_num',
)
# *** Constructor
def __init__(self, req):
command.Op_Handler.__init__(self, req)
self.trial_num = None
self.node_id = None
self.p_num = None
# ***
#
def __str__(self):
selfie = (
'landmark_prompt_log: trial_num: %s / node_id: %s / p_num: %s'
% (self.trial_num,
self.node_id,
self.p_num,))
return selfie
# *** Public Interface
#
def decode_request(self):
command.Op_Handler.decode_request(self)
self.trial_num = self.decode_key('trial_num')
self.node_id = self.decode_key('nid')
self.p_num = self.decode_key('p_num')
#
def fetch_n_save(self):
command.Op_Handler.fetch_n_save(self)
if ((self.req.client.username)
and (self.req.client.username != conf.anonymous_username)):
sql = (
"""
INSERT INTO landmark_prompt
(username, trial_num, prompt_num, prompt_time, node_id)
VALUES
(%s, %s, %s, now(), %s)
""") % (self.req.db.quoted(self.req.client.username),
self.trial_num,
self.p_num,
self.node_id,)
self.req.db.transaction_begin_rw()
self.req.db.sql(sql)
self.req.db.transaction_commit()
# ***
|
""":synopsis: Webrecorder session access class."""
from bottle import template, request, HTTPError
from webrecorder.models.user import SessionUser
from webrecorder.models.base import BaseAccess
class SessionAccessCache(BaseAccess):
"""Webrecorder session access.
:cvar str READ_PREFIX: Redis key prefix (read access)
:cvar str WRITE_PREFIX: Redis key prefix (write access)
:ivar Session sesh: session
:ivar StrictRedis redis: Redis interface
:ivar SessionUser _session_user: logged-in user
"""
READ_PREFIX = 'r:'
WRITE_PREFIX = 'w:'
def __init__(self, session, redis):
"""Initialize Webrecorder session access.
:param Session session: Webrecorder session
:param StrictRedis redis: Redis interface
"""
self.sesh = session
self.redis = redis
self._session_user = None
@property
def session_user(self):
"""Read-only attribute session user."""
return self.init_session_user(persist=False)
def init_session_user(self, persist=True, reset=False):
"""Initialize session user.
:param bool persist: whether to persist session user
:returns: session user
:rtype: SessionUser
"""
if not self._session_user or reset:
self._session_user = SessionUser(sesh=self.sesh,
redis=self.redis,
access=self,
persist=persist)
return self._session_user
def get_anon_ttl(self):
"""Get session TTL.
:returns: TTL
:rtype: int
"""
return self.sesh.ttl
def log_in(self, username, remember_me):
"""Log user in.
:param str username: username
:param bool remember_me: whether to extend session TTL
"""
# log in session!
self.sesh.log_in(username, remember_me)
# force session user reinit
self._session_user = None
def is_anon(self, user=None):
"""Return whether current (or given) user is anonymous user.
:param User user: user
:returns: whether user is anonymous user
:rtype: bool
"""
if not user:
user = self.session_user
#return self.sesh.is_anon(user.my_id)
return user.is_anon()
def is_logged_in_user(self, user):
"""Return whether given user is logged in.
:param User user: user
:returns: whether given user is logged in
:rtype: bool
"""
if self.sesh.is_restricted:
return False
return self.session_user == user
def is_superuser(self):
"""Return whether current user is superuser, i.e. has the role
'admin' (level 100).
:returns: whether current user is superuser
:rtype: bool
"""
return self.sesh.curr_role == 'admin'
def assert_is_superuser(self):
"""Assert current user is superuser, i.e. has the role 'admin'
(level 100)."""
if not self.is_superuser():
raise HTTPError(404, 'No Access')
def is_coll_owner(self, collection):
"""Return whether current user is also owner of given collection.
:param Collection collection: collection
:returns: whether current user is owner
:rtype: bool
"""
return self.session_user.is_owner(collection.get_owner())
def check_write_access(self, collection):
"""Return whether current user has right to modify collection.
:param Collection collection: collection
:returns: whether user has right to modify collection
:rtype: bool
"""
if not collection:
return False
if self.is_coll_owner(collection):
return True
return collection.get_prop(self.WRITE_PREFIX + self.session_user.my_id) != None
def check_read_access_public(self, collection, allow_superuser=True):
"""Return whether current user has right to read collection (either
because it is public, user is also owner or optionally
user is superuser).
:param Collection collection: collection
:param bool allow_superuser: whether superuser has right to read
:returns: whether current user has right to read or whether collection
is public
:rtype: bool or str
"""
if not collection:
return False
if collection.is_public():
return 'public'
# if superuser is allowed, then can read
if allow_superuser and self.is_superuser():
return True
if self.is_coll_owner(collection):
return True
if self.is_anon():
return False
return collection.get_prop(self.READ_PREFIX + self.session_user.my_id) != None
def can_read_coll(self, collection, allow_superuser=True):
"""Return whether current user has right to read collection.
:param Collection collection: collection
:param bool allow_superuser: whether superuser has right to read
:returns: whether current user has right to read collection
:rtype: bool
"""
return bool(self.check_read_access_public(collection, allow_superuser=allow_superuser))
def assert_can_read_coll(self, collection):
"""Assert current user has right to read collection.
:param Collection collection: collection
"""
if not self.can_read_coll(collection):
raise HTTPError(404, 'No Read Access')
def can_write_coll(self, collection):
"""Return whether current user has right to modify collection.
:param Collection collection: collection
:returns: whether current user has right
:rtype: bool
"""
return self.check_write_access(collection)
def assert_can_write_coll(self, collection):
"""Assert current user has right to modify collection.
:param Collection collection: collection
"""
if not self.can_write_coll(collection):
raise HTTPError(404, 'No Write Access')
# for now, equivalent to is_owner(), but a different
# permission, and may change
def can_admin_coll(self, collection):
"""Return whether current user has right to administrate collection.
:param Collection collection: collection
:returns: whether current user has right
:rtype: bool
"""
if self.sesh.is_restricted or not collection:
return False
return self.is_coll_owner(collection)
def assert_can_admin_coll(self, collection):
"""Assert currrent user has right to administrate collection.
:param Collection collection: collection
"""
if not self.can_admin_coll(collection):
raise HTTPError(404, 'No Admin Access')
def is_curr_user(self, user):
"""Return whether given user is logged-in user.
:param User user: user
:returns: whether given user is logged-in user
:rtype: bool
"""
return self.session_user == user
def assert_is_curr_user(self, user):
"""Assert given user is current user or current user is superuser.
:param User user: user
"""
if not self.is_curr_user(user) and not self.is_superuser():
raise HTTPError(404, 'Only Valid for Current User')
def assert_is_logged_in(self):
"""Assert current user is logged in."""
if self.session_user.is_anon():
raise HTTPError(404, 'Not Logged In')
def can_read_list(self, blist):
"""Return whether current user has right to read list of bookmarks.
:param BookmarkList blist: list of bookmarks
:returns: whether current user has right to read list
:rtype: bool
"""
if not blist:
return False
coll = blist.get_owner()
if self.is_coll_owner(coll):
return True
if coll.is_public() and blist.is_public():
return True
return False
def assert_can_read_list(self, blist):
"""Assert current user has right to read list.
:param BookmarkList blist: list of bookmarks
"""
if not self.can_read_list(blist):
raise HTTPError(404, 'No List Access')
|
"""Tests for madi.datasets.smart_buildings_dataset."""
from madi.datasets import smart_buildings_dataset
import pandas as pd
from pandas.util.testing import assert_series_equal
import pytest
class TestSmartBuildingsDataset:
def test_smart_buildings_dataset(self):
pd.set_option('display.max_rows', None, 'display.max_columns', None)
ds = smart_buildings_dataset.SmartBuildingsDataset()
assert len(ds.sample) == 60425
assert ds.sample['data:zone_air_heating_temperature_setpoint'].mean(
) == pytest.approx(
290.627693, abs=1e-4)
assert ds.sample['data:zone_air_heating_temperature_setpoint'].std(
) == pytest.approx(
3.703542, abs=1e-4)
assert ds.sample['data:zone_air_temperature_sensor'].mean(
) == pytest.approx(
295.475594, abs=1e-4)
assert ds.sample['data:zone_air_temperature_sensor'].std() == pytest.approx(
0.971860, abs=1e-4)
assert ds.sample['data:zone_air_cooling_temperature_setpoint'].mean(
) == pytest.approx(
299.942589, abs=1e-4)
assert ds.sample['data:zone_air_cooling_temperature_setpoint'].std(
) == pytest.approx(
2.773154, abs=1e-4)
assert ds.sample['data:supply_air_flowrate_sensor'].mean() == pytest.approx(
0.077608, abs=1e-4)
assert ds.sample['data:supply_air_flowrate_sensor'].std() == pytest.approx(
0.100314, abs=1e-4)
assert ds.sample['data:supply_air_damper_percentage_command'].mean(
) == pytest.approx(
45.299588, abs=1e-4)
assert ds.sample['data:supply_air_damper_percentage_command'].std(
) == pytest.approx(
39.005507, abs=1e-4)
assert ds.sample['data:supply_air_flowrate_setpoint'].mean(
) == pytest.approx(
0.079952, abs=1e-4)
assert ds.sample['data:supply_air_flowrate_setpoint'].std(
) == pytest.approx(
0.089611, abs=1e-4)
assert_series_equal(
ds.sample['class_label'].value_counts(),
pd.Series([58504, 1921], name='class_label', index=[1, 0]))
def test_readme(self):
ds = smart_buildings_dataset.SmartBuildingsDataset()
assert len(ds.description) == 1627
|
"""Key bindings for the emacs-like editor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import curses
import curses.ascii
import os
import re
from app.curses_util import *
import app.controller
import app.log
import app.text_buffer
def parse_int(str):
i = 0
k = 0
if len(str) > i and str[i] in ("+", "-"):
i += 1
k = i
while len(str) > k and str[k].isdigit():
k += 1
if k > i:
return int(str[:k])
return 0
def test_parse_int():
assert parse_int("0") == 0
assert parse_int("0e") == 0
assert parse_int("qwee") == 0
assert parse_int("10") == 10
assert parse_int("+10") == 10
assert parse_int("-10") == -10
assert parse_int("--10") == 0
assert parse_int("--10") == 0
class EditText(app.controller.Controller):
"""An EditText is a base class for one-line controllers."""
def __init__(self, view):
app.controller.Controller.__init__(self, view, "EditText")
self.document = None
def set_text_buffer(self, textBuffer):
textBuffer.lines = [u""]
self.commandSet = {
KEY_F1: self.info,
CTRL_A: textBuffer.selection_all,
CTRL_C: textBuffer.edit_copy,
CTRL_H: textBuffer.backspace,
KEY_BACKSPACE1: textBuffer.backspace,
KEY_BACKSPACE2: textBuffer.backspace,
KEY_BACKSPACE3: textBuffer.backspace,
CTRL_Q: self.prg.quit,
CTRL_S: self.save_document,
CTRL_V: textBuffer.edit_paste,
CTRL_X: textBuffer.edit_cut,
CTRL_Y: textBuffer.redo,
CTRL_Z: textBuffer.undo,
# KEY_DOWN: textBuffer.cursor_down,
KEY_LEFT: textBuffer.cursor_left,
KEY_RIGHT: textBuffer.cursor_right,
# KEY_UP: textBuffer.cursor_up,
}
def focus(self):
app.log.info("EditText.focus", repr(self))
self.commandDefault = self.textBuffer.insert_printable
self.commandSet = self.commandSet
def info(self):
app.log.info("EditText command set")
def save_document(self):
app.log.info("save_document", self.document)
if self.document and self.document.textBuffer:
self.document.textBuffer.file_write()
def unfocus(self):
pass
class InteractiveOpener(EditText):
"""Open a file to edit."""
def __init__(self, prg, view, textBuffer):
EditText.__init__(self, prg, view, textBuffer)
self.document = view.host
app.log.info("xxxxx", self.document)
commandSet = self.commandSet.copy()
commandSet.update(
{
KEY_ESCAPE: self.change_to_input_window,
KEY_F1: self.info,
CTRL_I: self.tab_complete_extend,
CTRL_J: self.create_or_open,
CTRL_N: self.create_or_open,
CTRL_O: self.create_or_open,
CTRL_Q: self.prg.quit,
}
)
self.commandSet = commandSet
def focus(self):
app.log.info("InteractiveOpener.focus")
EditText.focus(self)
# Create a new text buffer to display dir listing.
self.view.host.set_text_buffer(text_buffer.TextBuffer(self.prg))
def info(self):
app.log.info("InteractiveOpener command set")
def create_or_open(self):
app.log.info("create_or_open")
expandedPath = os.path.abspath(os.path.expanduser(self.textBuffer.lines[0]))
if not os.path.isdir(expandedPath):
self.view.host.set_text_buffer(
self.prg.bufferManager.load_text_buffer(expandedPath), self.view.host
)
self.change_to_input_window()
def maybe_slash(self, expandedPath):
if (
self.textBuffer.lines[0]
and self.textBuffer.lines[0][-1] != "/"
and os.path.isdir(expandedPath)
):
self.textBuffer.insert("/")
def tab_complete_first(self):
"""Find the first file that starts with the pattern."""
dirPath, fileName = os.path.split(self.lines[0])
foundOnce = ""
for i in os.listdir(os.path.expandvars(os.path.expanduser(dirPath)) or "."):
if i.startswith(fileName):
if foundOnce:
# Found more than one match.
return
fileName = os.path.join(dirPath, i)
if os.path.isdir(fileName):
fileName += "/"
self.lines[0] = fileName
self.on_change()
return
def tab_complete_extend(self):
"""Extend the selection to match characters in common."""
dirPath, fileName = os.path.split(self.textBuffer.lines[0])
expandedDir = os.path.expandvars(os.path.expanduser(dirPath)) or "."
matches = []
if not os.path.isdir(expandedDir):
return
for i in os.listdir(expandedDir):
if i.startswith(fileName):
matches.append(i)
else:
app.log.info("not", i)
if len(matches) <= 0:
self.maybe_slash(expandedDir)
self.on_change()
return
if len(matches) == 1:
self.textBuffer.insert(matches[0][len(fileName) :])
self.maybe_slash(os.path.join(expandedDir, matches[0]))
self.on_change()
return
def find_common_prefix_length(prefixLen):
count = 0
ch = None
for match in matches:
if len(match) <= prefixLen:
return prefixLen
if not ch:
ch = match[prefixLen]
if match[prefixLen] == ch:
count += 1
if count and count == len(matches):
return find_common_prefix_length(prefixLen + 1)
return prefixLen
prefixLen = find_common_prefix_length(len(fileName))
self.textBuffer.insert(matches[0][len(fileName) : prefixLen])
self.on_change()
def set_file_name(self, path):
self.textBuffer.lines = [path]
self.textBuffer.penCol = len(path)
self.textBuffer.goalCol = self.textBuffer.penCol
def on_change(self):
path = os.path.expanduser(os.path.expandvars(self.textBuffer.lines[0]))
dirPath, fileName = os.path.split(path)
dirPath = dirPath or "."
app.log.info("O.on_change", dirPath, fileName)
if os.path.isdir(dirPath):
lines = []
for i in os.listdir(dirPath):
if i.startswith(fileName):
lines.append(i)
if len(lines) == 1 and os.path.isfile(os.path.join(dirPath, fileName)):
self.view.host.set_text_buffer(
self.view.program.bufferManager.load_text_buffer(
os.path.join(dirPath, fileName), self.view.host
)
)
else:
self.view.host.textBuffer.lines = [
os.path.abspath(os.path.expanduser(dirPath)) + ":"
] + lines
else:
self.view.host.textBuffer.lines = [
os.path.abspath(os.path.expanduser(dirPath)) + ": not found"
]
class InteractiveFind(EditText):
"""Find text within the current document."""
def __init__(self, prg, view, textBuffer):
EditText.__init__(self, prg, view, textBuffer)
self.document = view.host
self.commandSet.update(
{
KEY_ESCAPE: self.change_to_input_window,
KEY_F1: self.info,
CTRL_F: self.find_next,
CTRL_J: self.change_to_input_window,
CTRL_R: self.find_prior,
# CTRL_S: self.replacement_text_edit,
KEY_DOWN: self.find_next,
KEY_MOUSE: self.save_event_change_to_host_window,
KEY_UP: self.find_prior,
}
)
self.height = 1
def find_next(self):
self.findCmd = self.document.textBuffer.find_next
def find_prior(self):
self.findCmd = self.document.textBuffer.find_prior
def focus(self):
# self.document.statusLine.hide()
# self.document.resize_by(-self.height, 0)
# self.view.host.move_by(-self.height, 0)
# self.view.host.resize_by(self.height-1, 0)
EditText.focus(self)
self.findCmd = self.document.textBuffer.find
selection = self.document.textBuffer.get_selected_text()
if selection:
self.textBuffer.selection_all()
self.textBuffer.insert_lines(selection)
self.textBuffer.selection_all()
app.log.info("find tb", self.textBuffer.penCol)
def info(self):
app.log.info("InteractiveFind command set")
def on_change(self):
app.log.info("InteractiveFind.on_change")
searchFor = self.textBuffer.lines[0]
try:
self.findCmd(searchFor)
except re.error as e:
self.error = e.message
self.findCmd = self.document.textBuffer.find
# def replacement_text_edit(self):
# pass
def unfocus(self):
app.log.info("unfocus Find")
# self.hide()
class InteractiveGoto(EditText):
"""Jump to a particular line number."""
def __init__(self, prg, view, textBuffer):
EditText.__init__(self, prg, view, textBuffer)
self.document = view.host
commandSet = self.commandSet.copy()
commandSet.update(
{
KEY_ESCAPE: self.change_to_input_window,
KEY_F1: self.info,
CTRL_J: self.change_to_input_window,
KEY_MOUSE: self.save_event_change_to_host_window,
ord("b"): self.goto_bottom,
ord("h"): self.goto_halfway,
ord("t"): self.goto_top,
}
)
self.commandSet = commandSet
def focus(self):
app.log.info("InteractiveGoto.focus")
self.textBuffer.selection_all()
self.textBuffer.insert(str(self.document.textBuffer.penRow + 1))
self.textBuffer.selection_all()
EditText.focus(self)
def info(self):
app.log.info("InteractiveGoto command set")
def goto_bottom(self):
self.cursor_move_to(len(self.document.textBuffer.lines), 0)
self.change_to_input_window()
def goto_halfway(self):
self.cursor_move_to(len(self.document.textBuffer.lines) // 2 + 1, 0)
self.change_to_input_window()
def goto_top(self):
self.cursor_move_to(1, 0)
self.change_to_input_window()
def cursor_move_to(self, row, col):
textBuffer = self.document.textBuffer
penRow = min(max(row - 1, 0), len(textBuffer.lines) - 1)
app.log.info("cursor_move_to row", row, penRow)
textBuffer.cursor_move(
penRow - textBuffer.penRow,
col - textBuffer.penCol,
col - textBuffer.goalCol,
)
def on_change(self):
gotoLine = 0
line = self.textBuffer.parser.row_text(0)
gotoLine, gotoCol = (line.split(",") + ["0", "0"])[:2]
self.cursor_move_to(parse_int(gotoLine), parse_int(gotoCol))
# def unfocus(self):
# self.hide()
class CiEdit(app.controller.Controller):
"""Keyboard mappings for ci."""
def __init__(self, prg, textBuffer):
app.controller.Controller.__init__(self, prg, None, "CiEdit")
app.log.info("CiEdit.__init__")
self.textBuffer = textBuffer
self.commandSet_Main = {
CTRL_SPACE: self.switch_to_command_set_cmd,
CTRL_A: textBuffer.cursor_start_of_line,
CTRL_B: textBuffer.cursor_left,
KEY_LEFT: self.cursor_left,
CTRL_C: self.edit_copy,
CTRL_D: self.delete,
CTRL_E: self.cursor_end_of_line,
CTRL_F: self.cursor_right,
KEY_RIGHT: self.cursor_right,
CTRL_H: self.backspace,
KEY_BACKSPACE1: self.backspace,
KEY_BACKSPACE2: self.backspace,
KEY_BACKSPACE3: self.backspace,
CTRL_J: self.carriage_return,
CTRL_K: self.delete_to_end_of_line,
CTRL_L: self.win.refresh,
CTRL_N: self.cursor_down,
KEY_DOWN: self.cursor_down,
CTRL_O: self.split_line,
CTRL_P: self.cursor_up,
KEY_UP: self.cursor_up,
CTRL_V: self.edit_paste,
CTRL_X: self.edit_cut,
CTRL_Y: self.redo,
CTRL_Z: self.undo,
CTRL_BACKSLASH: self.changeToCmdMode,
# ord('/'): self.switch_to_command_set_cmd,
}
self.commandSet_Cmd = {
ord("a"): self.switch_to_command_set_application,
ord("f"): self.switch_to_command_set_file,
ord("s"): self.switch_to_command_set_select,
ord(";"): self.switch_to_command_set_main,
ord("'"): self.marker_place,
}
self.commandSet_Application = {
ord("q"): self.prg.quit,
ord("t"): self.test,
ord("w"): self.file_write,
ord(";"): self.switch_to_command_set_main,
}
self.commandSet_File = {
ord("o"): self.switch_to_command_set_file_open,
ord("w"): self.file_write,
ord(";"): self.switch_to_command_set_main,
}
self.commandSet_FileOpen = {
ord(";"): self.switch_to_command_set_main,
}
self.commandSet_Select = {
ord("a"): self.selection_all,
ord("b"): self.selection_block,
ord("c"): self.selection_character,
ord("l"): self.selection_line,
ord("x"): self.selection_none,
ord(";"): self.switch_to_command_set_main,
}
self.commandDefault = self.insert_printable
self.commandSet = self.commandSet_Main
def switch_to_command_set_main(self, ignored=1):
self.log("ci main", repr(self.prg))
self.commandDefault = self.insert_printable
self.commandSet = self.commandSet_Main
def switch_to_command_set_cmd(self):
self.log("ci cmd")
self.commandDefault = self.textBuffer.no_op
self.commandSet = self.commandSet_Cmd
def switch_to_command_set_application(self):
self.log("ci application")
self.commandDefault = self.textBuffer.no_op
self.commandSet = self.commandSet_Application
def switch_to_command_set_file(self):
self.commandDefault = self.textBuffer.no_op
self.commandSet = self.commandSet_File
def switch_to_command_set_file_open(self):
self.log("switch_to_command_set_file_open")
self.commandDefault = self.pathInsertPrintable
self.commandSet = self.commandSet_FileOpen
def switch_to_main_and_do_command(self, ch):
self.log("switch_to_main_and_do_command")
self.switch_to_command_set_main()
self.do_command(ch)
def switch_to_command_set_select(self):
self.log("ci select")
self.commandDefault = self.SwitchToMainAndDoCommand
self.commandSet = self.commandSet_Select
self.selection_character()
class EmacsEdit(app.controller.Controller):
"""Emacs is a common Unix based text editor. This keyboard mapping is
similar to basic Emacs commands."""
def __init__(self, view):
app.controller.Controller.__init__(self, view, "EditText")
def focus(self):
app.log.info("EmacsEdit.focus")
self.commandDefault = self.textBuffer.insert_printable
self.commandSet = self.commandSet_Main
def on_change(self):
pass
def set_text_buffer(self, textBuffer):
app.log.info("EmacsEdit.set_text_buffer")
self.textBuffer = textBuffer
self.commandSet_Main = {
KEY_F1: self.info,
CTRL_A: textBuffer.cursor_start_of_line,
CTRL_B: textBuffer.cursor_left,
KEY_LEFT: textBuffer.cursor_left,
CTRL_D: textBuffer.delete,
CTRL_E: textBuffer.cursor_end_of_line,
CTRL_F: textBuffer.cursor_right,
KEY_RIGHT: textBuffer.cursor_right,
# CTRL_H: textBuffer.backspace,
KEY_BACKSPACE1: textBuffer.backspace,
KEY_BACKSPACE2: textBuffer.backspace,
KEY_BACKSPACE3: textBuffer.backspace,
CTRL_J: textBuffer.carriage_return,
CTRL_K: textBuffer.delete_to_end_of_line,
CTRL_L: self.view.host.refresh,
CTRL_N: textBuffer.cursor_down,
KEY_DOWN: textBuffer.cursor_down,
CTRL_O: textBuffer.split_line,
CTRL_P: textBuffer.cursor_up,
KEY_UP: textBuffer.cursor_up,
CTRL_X: self.switch_to_command_set_x,
CTRL_Y: textBuffer.redo,
CTRL_Z: textBuffer.undo,
}
self.commandSet = self.commandSet_Main
self.commandSet_X = {
CTRL_C: self.prg.quit,
}
def info(self):
app.log.info("EmacsEdit Command set main")
app.log.info(repr(self))
def switch_to_command_set_x(self):
self.log("emacs x")
self.commandSet = self.commandSet_X
|
import requests
class GeonamesException(Exception):
pass
class GeonamesClient(object):
'''Simple GeoNames.org client for searching and geocoding terms.
:param username
'''
base_url = 'http://api.geonames.org'
def __init__(self, username):
self.username = username
def geocode(self, query=None, name=None, name_equals=None,
exactly_one=True, country_bias=None,
country=None, feature_code=None,
feature_class=None, admin_code1=None):
'''Implements the `GeoNames.org search API`_. Generally, you should
supply (only) one of query, name, or name equals, but that is not strictly
required (e.g., if you want to look up a state or region by country code and
admin code).
.. _GeoNames.org search API: http://www.geonames.org/export/geonames-search.html
:param query: search over all attributes of a place
:param name: search by place name only
:param name_equals: search by exact place name
:param exactly_one: return only the first match (defaults to true);
if false, returns the full list of results
:param country_bias: list matches from the specified country first;
countries should be specified by two-letter codes
:param country: only return matches from the country;
countries should be specified by two-letter codes
:param feature_code: restrict results to one or more GeoNames feature codes
:param feature_class: restrict results to one or more GeoNames feature classes
:param feature_class: restrict results by the specified admin code (generally
should be used with country bias)
'''
api_url = '%s/searchJSON' % self.base_url
params = {'username': self.username, 'orderBy': 'relevance'}
# query term (really only expect one of these)
if query:
params['query'] = query
if name:
params['name'] = name
if name_equals:
params['name_equals'] = name_equals
if exactly_one:
params['maxRows'] = 1
if country_bias:
params['countryBias'] = country_bias
if country:
params['country'] = country
if admin_code1:
params['adminCode1'] = admin_code1
if feature_code:
# TODO: check that this works correctly for list of params
params['featureCode'] = feature_code
if feature_class:
params['featureClass'] = feature_class
r = requests.get(api_url, params=params)
result = r.json()
if result['totalResultsCount']:
if exactly_one:
return GeonamesResult(result['geonames'][0])
else:
return [GeonamesResult(res) for res in result['geonames']]
def get_by_id(self, geonames_id):
'''Get information about a specific GeoNames ID.
:param geonames_id: geonames identifier to lookup
:returns: :class:`GeonamesResult`
'''
params = {'username': self.username, 'geonameId': geonames_id}
api_url = '%s/getJSON' % self.base_url
resp = requests.get(api_url, params=params)
if resp.status_code != requests.codes.ok:
raise GeonamesException('Error retrieving GeoNames %s: %s' % \
(geonames_id, resp.content))
# geonames returns 200 for not found, have to check contents
data = resp.json()
if 'status' in data and data['status']['value'] == 15:
raise GeonamesException('Error retreving GeoNames %s: %s' % \
(geonames_id, data['status']['message']))
return GeonamesResult(data)
class GeonamesResult(object):
'''Simple result class for locations returned by geonames search,
compatible with :mod:`geopy` results.'''
def __init__(self, data):
self.latitude = data['lat']
self.longitude = data['lng']
self.raw = data
def __unicode__(self):
return self.raw['name']
def __repr__(self):
return u'<GeonamesResult %s>' % unicode(self)
|
import errno
import logging
import os.path
from hadoop import confparse
from desktop.lib import security_util
from libsentry.conf import SENTRY_CONF_DIR, HOSTNAME
LOG = logging.getLogger(__name__)
_SITE_DICT = None
_CONF_HIVE_PROVIDER = 'hive.sentry.server'
_CONF_SENTRY_SERVER_PRINCIPAL = 'sentry.service.server.principal'
_CONF_SENTRY_SERVER_SECURITY_MODE = 'sentry.service.security.mode'
_CONF_SENTRY_SERVER_ADMIN_GROUP = 'sentry.service.admin.group'
def reset():
global _SITE_DICT
_SITE_DICT = None
def get_conf(name='sentry'):
if _SITE_DICT is None:
_parse_sites()
return _SITE_DICT[name]
def get_hive_sentry_provider():
return get_conf(name='hive').get(_CONF_HIVE_PROVIDER, 'server1')
def get_sentry_server_principal():
# Get kerberos principal and replace host pattern
principal = get_conf().get(_CONF_SENTRY_SERVER_PRINCIPAL, None)
if principal:
fqdn = security_util.get_fqdn(HOSTNAME.get())
return security_util.get_kerberos_principal(principal, fqdn)
else:
return None
def get_sentry_server_authentication():
return get_conf().get(_CONF_SENTRY_SERVER_SECURITY_MODE, 'NOSASL').upper()
def get_sentry_server_admin_groups():
return get_conf().get(_CONF_SENTRY_SERVER_ADMIN_GROUP, '').split(',')
def _parse_sites():
global _SITE_DICT
_SITE_DICT ={}
paths = [
('sentry', os.path.join(SENTRY_CONF_DIR.get(), 'sentry-site.xml')),
]
try:
from beeswax.conf import HIVE_CONF_DIR
paths.append(('hive', os.path.join(HIVE_CONF_DIR.get(), 'sentry-site.xml')))
except Exception, e:
LOG.error('Cannot read Hive sentry site: %s' % e)
for name, path in paths:
_SITE_DICT[name] = _parse_site(path)
def _parse_site(site_path):
try:
data = file(site_path, 'r').read()
except IOError, err:
if err.errno != errno.ENOENT:
LOG.error('Cannot read from "%s": %s' % (site_path, err))
return
data = ""
return confparse.ConfParse(data)
|
"""
Copyright 2012 Pontiflex, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ca.models import DBSession
from ca.security.authority.config import Secrets, RevokeDB
from ca.security.authz.policy import capability_finder
from ca.security.authz.policy import CapabilityAuthorizationPolicy
import pyramid.tweens
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.config import Configurator
from pyramid_beaker import session_factory_from_settings
from sqlalchemy import engine_from_config
AUTH_SECRET = 'thisisasecret'
AUTH_SECURE = False
AUTH_COOKIE = 'AUTH_TICKET'
AUTH_TIMEOUT = 600
AUTH_REISSUE = AUTH_TIMEOUT // 10
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
# Parse the CA settings (must occur before creating the Configurator)
Secrets.parse_config(settings)
RevokeDB.parse_config(settings)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
config = Configurator(settings=settings)
session_factory = session_factory_from_settings(settings)
config.set_session_factory(session_factory)
authn_policy = AuthTktAuthenticationPolicy(AUTH_SECRET, secure=AUTH_SECURE,
http_only=True, include_ip=True, cookie_name=AUTH_COOKIE, wild_domain=False,
timeout=AUTH_TIMEOUT, reissue_time=AUTH_REISSUE, callback=capability_finder)
authz_policy = CapabilityAuthorizationPolicy()
config.set_authentication_policy(authn_policy)
config.set_authorization_policy(authz_policy)
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_route('home', '/')
config.add_route('test', '/test')
config.add_route('login', '/login')
config.add_route('logout', '/logout')
config.add_route('crl', '/crl')
config.add_route('request', '/{type}/request')
config.add_route('check', '/{type}/check')
config.add_route('review', '/{type}/review')
config.add_route('revoke', '/{type}/revoke')
config.scan()
return config.make_wsgi_app()
|
"""
Patternfly user interface module.
This module contains models and pages classess for UI elements from patternfly
library, see: https://www.patternfly.org/
"""
|
from nltk.book import *
fdist1 = FreqDist(FreqDist([len(w) for w in text1]))
fdist2 = FreqDist(FreqDist([len(w) for w in text2]))
print fdist1['that']
print fdist2['that']
print
print fdist1.freq('that')
print fdist2.freq('that')
print
print fdist1.N()
print fdist2.N()
print
print fdist1.keys()
print fdist2.keys()
print
print fdist1.max()
print fdist2.max()
print
print fdist1.tabulate()
print fdist2.tabulate()
print
print fdist1.plot()
print fdist2.plot()
print
print fdist1.plot(cumulative=True)
print fdist2.plot(cumulative=True)
print
print fdist1 < fdist2
|
import os
import sys
import time
from optparse import OptionParser
from pyami import mem
from appionlib import apParam
from appionlib import apDisplay
class BasicScript(object):
#=====================
def __init__(self,optargs=sys.argv[1:],quiet=False):
"""
Starts a new function and gets all the parameters
"""
### setup some expected values
self.startmem = mem.active()
self.t0 = time.time()
self.createDefaultStats()
self.quiet = quiet
self.timestamp = apParam.makeTimestamp()
if not self.quiet:
apDisplay.printMsg("Time stamp: "+self.timestamp)
self.functionname = apParam.getFunctionName(sys.argv[0])
if not self.quiet:
apDisplay.printMsg("Function name: "+self.functionname)
apParam.setUmask()
self.parsePythonPath()
loadavg = os.getloadavg()[0]
if loadavg > 2.0:
apDisplay.printMsg("Load average is %.2f, wait for %.1f second " % (round(loadavg,2),loadavg**2))
time.sleep(loadavg**2)
apDisplay.printMsg("Load average is high "+str(round(loadavg,2)))
### setup default parser: run directory, etc.
self.setParams(optargs)
self.checkConflicts()
### write function log
self.logfile = apParam.writeFunctionLog(sys.argv, msg=(not self.quiet))
### any custom init functions go here
self.onInit()
def setParams(self,optargs=sys.argv[1:]):
self.parser = OptionParser()
self.setupParserOptions()
self.params = apParam.convertParserToParams(self.parser)
self.checkForDuplicateCommandLineInputs(optargs)
#=====================
def checkForDuplicateCommandLineInputs(self,optargs=sys.argv[1:]):
args = optargs
argmdict = {}
for arg in args:
elements=arg.split('=')
opt = elements[0].lower()
if opt[0] == "-":
## if action='append', then opt is allowed multiple times
option = self.parser.get_option(opt)
if option is not None and option.action == 'append':
multiple_ok = True
else:
multiple_ok = False
if opt in argmdict and not multiple_ok:
apDisplay.printError("Multiple arguments were supplied for argument: "+str(opt))
argmdict[opt] = True
#=====================
def createDefaultStats(self):
self.stats = {}
self.stats['starttime'] = time.time()
self.stats['count'] = 1
self.stats['lastcount'] = 0
self.stats['startmem'] = mem.active()
self.stats['memleak'] = 0
self.stats['peaksum'] = 0
self.stats['lastpeaks'] = None
self.stats['imagesleft'] = 1
self.stats['peaksumsq'] = 0
self.stats['timesum'] = 0
self.stats['timesumsq'] = 0
self.stats['skipcount'] = 0
self.stats['waittime'] = 0
self.stats['lastimageskipped'] = False
self.stats['notpair'] = 0
self.stats['memlist'] = [mem.active()]
#=====================
def close(self):
self.onClose()
loadavg = os.getloadavg()[0]
if loadavg > 2.0:
apDisplay.printMsg("Load average is high "+str(round(loadavg,2)))
time.sleep(loadavg**2)
apParam.closeFunctionLog(functionname=self.functionname,
logfile=self.logfile, msg=(not self.quiet))
if self.quiet is False:
apDisplay.printMsg("Ended at "+time.strftime("%a, %d %b %Y %H:%M:%S"))
apDisplay.printMsg("Memory increase during run: %.3f MB"%((mem.active()-self.startmem)/1024.0))
apDisplay.printColor("Total run time:\t"+apDisplay.timeString(time.time()-self.t0),"green")
#=====================
def parsePythonPath(self):
pythonpath = os.environ.get("PYTHONPATH")
if pythonpath is None:
return
paths = pythonpath.split(":")
leginons = {}
appions = {}
for p in paths:
if "appion" in p:
appions[p] = None
if "leginon" in p:
leginons[p] = None
leginons = leginons.keys()
appions = appions.keys()
if len(appions) > 1:
apDisplay.printWarning("There is more than one appion directory in your PYTHONPATH")
print appions
if len(leginons) > 1:
apDisplay.printWarning("There is more than one leginon directory in your PYTHONPATH")
print leginons
#######################################################
#### ITEMS BELOW CAN BE SPECIFIED IN A NEW PROGRAM ####
#######################################################
#=====================
def setupParserOptions(self):
"""
set the input parameters
this function should be rewritten in each program
"""
apDisplay.printError("you did not create a 'setupParserOptions' function in your script")
self.parser.set_usage("Usage: %prog --commit --description='<text>' [options]")
self.parser.add_option("--stackid", dest="stackid", type="int",
help="ID for particle stack (optional)", metavar="INT")
#=====================
def checkConflicts(self):
"""
make sure the necessary parameters are set correctly
"""
apDisplay.printError("you did not create a 'checkConflicts' function in your script")
if self.params['runname'] is None:
apDisplay.printError("enter a run name ID, e.g. --runname=run1")
if self.params['description'] is None:
apDisplay.printError("enter a description, e.g. --description='awesome data'")
#=====================
def start(self):
"""
this is the main component of the script
where all the processing is done
"""
raise NotImplementedError()
#=====================
def onInit(self):
return
#=====================
def onClose(self):
return
class BasicScriptInstanceRun(object):
'''
Create an instance of a subclass of BasicScript
according to the jobtype and then run it
'''
def __init__(self):
command = sys.argv[1:]
self.jobtype = self.getJobType(command)
self.app = self.createInst(self.jobtype,command)
if self.app is None:
apDisplay.printError('No BasicScript subclass instance created')
else:
self.app.start()
self.run()
self.app.close()
def getJobType(self, command):
jobtype = None
#Search for the command option that specified the job type
for option in command:
if option.startswith(r'--jobtype='):
#We only need the part after the '='
jobtype = option.split('=')[1]
#Don't process anymore of the list then needed
break
return jobtype
def createInst(self, jobtype, command):
'''
Create Instance of BasicScript or its subclasses according to the jobtype.
'''
jobInstance = BasicScript()
return jobInstance
def run(self):
'''
Do something
'''
pass
class TestScript(BasicScript):
#------------
def setupParserOptions(self):
apDisplay.printMsg("Parser options")
#------------
def checkConflicts(self):
apDisplay.printMsg("Conflicts")
#------------
def start(self):
apDisplay.printMsg("Hey this works")
if __name__ == '__main__':
testscript = TestScript()
testscript.start()
testscript.close()
|
import json
import urllib
import urlparse
import traceback
import sys
import log
import re
ESCAPE_PATTERN = re.compile('%|{')
def prepareParams(request):
if request.method == 'POST':
params = dict(urlparse.parse_qsl(request.data))
elif request.method == 'GET':
params = dict(request.args.items())
else:
return None
return params if len(params) > 0 else None
def loadJson(data):
try:
result = json.loads(urllib.unquote_plus(data))
except Exception, e:
result = json.loads(data)
return result
def parseData(reqUrl, request):
if reqUrl.strip() == '':
return None
# check ecrypt mode
if reqUrl.find('enc.apg') < 0:
encMode = False
else:
encMode = True
parsedData = prepareParams(request)
try:
if parsedData:
if 'eData' in parsedData.keys():
if encMode and not re.search(ESCAPE_PATTERN, parsedData['eData']):
log.error("Encryption mode is not yet supported!")
extData = {}
else:
extData = parsedData['eData']
if len(extData) == 0:
parsedData['eData'] = "truncated"
else:
extDataDict = loadJson(extData)
parsedData['eData'] = True
parsedData.update(extDataDict)
else:
parsedData['eData'] = False
if 'pdata' in parsedData.keys():
log.debug("pdata %s" % parsedData['pdata'])
extPdata = loadJson(parsedData['pdata'])
parsedData['pdata'] = True
parsedData.update(extPdata)
except Exception, e:
log.error("POST parsing ERROR: %s" % e)
log.error(parsedData)
log.error('\n'.join(traceback.format_exception(*sys.exc_info())))
if parsedData != None and parsedData.has_key('rip') == False and request.headers.has_key('X-Forwarded-For'):
parsedData['rip'] = request.headers['X-Forwarded-For']
log.debug("last parsedData \n")
log.debug(parsedData)
return parsedData
|
__author__ = "Simone Campagna"
__all__ = [
'DbTable',
]
import collections
class DbTable(object):
def __init__(self, fields, dict_type=None, singleton=False):
self.fields = collections.OrderedDict(fields)
if dict_type is None:
dict_type = collections.namedtuple('DbTable_dict_type', self.fields.keys())
self.dict_type = dict_type
if hasattr(self.dict_type, '_fields'):
self.field_names = self.dict_type._fields
else:
self.field_names = tuple(self.fields.keys())
self.singleton = singleton
|
import os
import sys
from os.path import relpath, join
from setuptools import find_packages, setup
from setuptools.command.install import install
import versioneer
assert sys.version_info[:2] == (2, 7), "Sorry, this package requires Python 2.7."
PACKAGE_NAME = 'moldesign'
CLASSIFIERS = """\
Development Status :: 4 - Beta
Intended Audience :: Science/Research
Intended Audience :: Developers
Intended Audience :: Education
License :: OSI Approved :: Apache Software License
Programming Language :: Python :: 2.7
Programming Language :: Python :: 2 :: Only
Topic :: Scientific/Engineering :: Chemistry
Topic :: Scientific/Engineering :: Physics
Topic :: Scientific/Engineering :: Visualization
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
HOME = os.environ['HOME']
CONFIG_DIR = os.path.join(HOME, '.moldesign')
PYEXT = set('.py .pyc .pyo'.split())
with open('requirements.txt', 'r') as reqfile:
requirements = [x.strip() for x in reqfile if x.strip()]
def find_package_data(pkgdir):
""" Just include all files that won't be included as package modules.
"""
files = []
for root, dirnames, filenames in os.walk(pkgdir):
not_a_package = '__init__.py' not in filenames
for fn in filenames:
basename, fext = os.path.splitext(fn)
if not_a_package or (fext not in PYEXT) or ('static' in fn):
files.append(relpath(join(root, fn), pkgdir))
return files
class PostInstall(install):
def run(self):
install.run(self)
self.prompt_intro()
def prompt_intro(self): # this doesn't actually display - print statements don't work?
print('Thank you for installing the Molecular Design Toolkit!!!')
print('For help, documentation, and any questions, visit us at ')
print(' http://moldesign.bionano.autodesk.com/')
print('\nTo get started, please run:')
print(' >>> python -m moldesign intro')
cmdclass = versioneer.get_cmdclass()
cmdclass['install'] = PostInstall
setup(
name=PACKAGE_NAME,
version=versioneer.get_version(),
classifiers=CLASSIFIERS.splitlines(),
packages=find_packages(),
package_data={PACKAGE_NAME: find_package_data(PACKAGE_NAME)},
install_requires=requirements,
url='http://moldesign.bionano.autodesk.com',
cmdclass=cmdclass,
license='Apache 2.0',
author='Aaron Virshup, BioNano Research at Autodesk',
author_email='moleculardesigntoolkit@autodesk.com',
description='The Molecular Design Toolkit: Dead-simple chemical simulation, visualization, '
'and cloud computing in a notebook'
)
|
import errno
import logging
import mimetypes
import operator
import os
import parquet
import posixpath
import re
import shutil
import stat as stat_module
import urllib
from datetime import datetime
from django.contrib import messages
from django.contrib.auth.models import User, Group
from django.core.urlresolvers import reverse
from django.template.defaultfilters import stringformat, filesizeformat
from django.http import Http404, HttpResponse, HttpResponseNotModified
from django.views.decorators.http import require_http_methods
from django.views.static import was_modified_since
from django.shortcuts import redirect
from django.utils.functional import curry
from django.utils.http import http_date
from django.utils.html import escape
from django.utils.translation import ugettext as _
from cStringIO import StringIO
from gzip import GzipFile
from avro import datafile, io
from aws.s3.s3fs import S3FileSystemException
from desktop import appmanager
from desktop.lib import i18n, paginator
from desktop.lib.conf import coerce_bool
from desktop.lib.django_util import make_absolute, render, format_preserving_redirect
from desktop.lib.django_util import JsonResponse
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.fs import splitpath
from hadoop.fs.hadoopfs import Hdfs
from hadoop.fs.exceptions import WebHdfsException
from hadoop.fs.fsutils import do_overwrite_save
from filebrowser.conf import MAX_SNAPPY_DECOMPRESSION_SIZE
from filebrowser.conf import SHOW_DOWNLOAD_BUTTON
from filebrowser.conf import SHOW_UPLOAD_BUTTON
from filebrowser.lib.archives import archive_factory
from filebrowser.lib.rwx import filetype, rwx
from filebrowser.lib import xxd
from filebrowser.forms import RenameForm, UploadFileForm, UploadArchiveForm, MkDirForm, EditorForm, TouchForm,\
RenameFormSet, RmTreeFormSet, ChmodFormSet, ChownFormSet, CopyFormSet, RestoreFormSet,\
TrashPurgeForm
DEFAULT_CHUNK_SIZE_BYTES = 1024 * 4 # 4KB
MAX_CHUNK_SIZE_BYTES = 1024 * 1024 # 1MB
DOWNLOAD_CHUNK_SIZE = 64 * 1024 * 1024 # 64MB
BYTES_PER_LINE = 16
BYTES_PER_SENTENCE = 2
MAX_FILEEDITOR_SIZE = 256 * 1024
INLINE_DISPLAY_MIMETYPE = re.compile('video/|image/|audio/|application/pdf|application/msword|application/excel|'
'application/vnd\.ms|'
'application/vnd\.openxmlformats')
logger = logging.getLogger(__name__)
class ParquetOptions(object):
def __init__(self, col=None, format='json', no_headers=True, limit=-1):
self.col = col
self.format = format
self.no_headers = no_headers
self.limit = limit
def index(request):
# Redirect to home directory by default
path = request.user.get_home_directory()
try:
if not request.fs.isdir(path):
path = '/'
except Exception:
pass
return view(request, path)
def _file_reader(fh):
"""Generator that reads a file, chunk-by-chunk."""
while True:
chunk = fh.read(DOWNLOAD_CHUNK_SIZE)
if chunk == '':
fh.close()
break
yield chunk
def download(request, path):
"""
Downloads a file.
This is inspired by django.views.static.serve.
?disposition={attachment, inline}
"""
if not request.fs.exists(path):
raise Http404(_("File not found: %(path)s.") % {'path': escape(path)})
if not request.fs.isfile(path):
raise PopupException(_("'%(path)s' is not a file.") % {'path': path})
content_type = mimetypes.guess_type(path)[0] or 'application/octet-stream'
stats = request.fs.stats(path)
mtime = stats['mtime']
size = stats['size']
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'), mtime, size):
return HttpResponseNotModified()
# TODO(philip): Ideally a with statement would protect from leaks,
# but tricky to do here.
fh = request.fs.open(path)
response = HttpResponse(_file_reader(fh), content_type=content_type)
response["Last-Modified"] = http_date(stats['mtime'])
response["Content-Length"] = stats['size']
response['Content-Disposition'] = request.GET.get('disposition', 'attachment')
return response
def view(request, path):
"""Dispatches viewing of a path to either index() or fileview(), depending on type."""
# default_to_home is set in bootstrap.js
if 'default_to_home' in request.GET:
home_dir_path = request.user.get_home_directory()
if request.fs.isdir(home_dir_path):
return format_preserving_redirect(request, reverse(view, kwargs=dict(path=home_dir_path)))
# default_to_home is set in bootstrap.js
if 'default_to_trash' in request.GET:
home_trash = request.fs.join(request.fs.trash_path, 'Current', request.user.get_home_directory()[1:])
if request.fs.isdir(home_trash):
return format_preserving_redirect(request, reverse(view, kwargs=dict(path=home_trash)))
if request.fs.isdir(request.fs.trash_path):
return format_preserving_redirect(request, reverse(view, kwargs=dict(path=request.fs.trash_path)))
try:
decoded_path = urllib.unquote(path)
if path != decoded_path:
path = decoded_path
stats = request.fs.stats(path)
if stats.isDir:
return listdir_paged(request, path)
else:
return display(request, path)
except (IOError, WebHdfsException), e:
msg = _("Cannot access: %(path)s. ") % {'path': escape(path)}
if "Connection refused" in e.message:
msg += _(" The HDFS REST service is not available. ")
if request.user.is_superuser and not _is_hdfs_superuser(request):
msg += _(' Note: you are a Hue admin but not a HDFS superuser, "%(superuser)s" or part of HDFS supergroup, "%(supergroup)s".') \
% {'superuser': request.fs.superuser, 'supergroup': request.fs.supergroup}
if request.is_ajax():
exception = {
'error': msg
}
return JsonResponse(exception)
else:
raise PopupException(msg , detail=e)
def home_relative_view(request, path):
home_dir_path = request.user.get_home_directory()
if request.fs.exists(home_dir_path):
path = '%s%s' % (home_dir_path, path)
return view(request, path)
def edit(request, path, form=None):
"""Shows an edit form for the given path. Path does not necessarily have to exist."""
try:
stats = request.fs.stats(path)
except IOError, ioe:
# A file not found is OK, otherwise re-raise
if ioe.errno == errno.ENOENT:
stats = None
else:
raise
# Can't edit a directory
if stats and stats['mode'] & stat_module.S_IFDIR:
raise PopupException(_("Cannot edit a directory: %(path)s") % {'path': path})
# Maximum size of edit
if stats and stats['size'] > MAX_FILEEDITOR_SIZE:
raise PopupException(_("File too big to edit: %(path)s") % {'path': path})
if not form:
encoding = request.REQUEST.get('encoding') or i18n.get_site_encoding()
if stats:
f = request.fs.open(path)
try:
try:
current_contents = unicode(f.read(), encoding)
except UnicodeDecodeError:
raise PopupException(_("File is not encoded in %(encoding)s; cannot be edited: %(path)s.") % {'encoding': encoding, 'path': path})
finally:
f.close()
else:
current_contents = u""
form = EditorForm(dict(path=path, contents=current_contents, encoding=encoding))
data = dict(
exists=(stats is not None),
stats=stats,
form=form,
path=path,
filename=os.path.basename(path),
dirname=os.path.dirname(path),
breadcrumbs = parse_breadcrumbs(path),
show_download_button = SHOW_DOWNLOAD_BUTTON.get())
return render("edit.mako", request, data)
def save_file(request):
"""
The POST endpoint to save a file in the file editor.
Does the save and then redirects back to the edit page.
"""
form = EditorForm(request.POST)
is_valid = form.is_valid()
path = form.cleaned_data.get('path')
if request.POST.get('save') == "Save As":
if not is_valid:
return edit(request, path, form=form)
else:
return render("saveas.mako", request, {'form': form})
if not path:
raise PopupException(_("No path specified"))
if not is_valid:
return edit(request, path, form=form)
encoding = form.cleaned_data['encoding']
data = form.cleaned_data['contents'].encode(encoding)
try:
if request.fs.exists(path):
do_overwrite_save(request.fs, path, data)
else:
request.fs.create(path, overwrite=False, data=data)
except WebHdfsException, e:
raise PopupException(_("The file could not be saved"), detail=e.message.splitlines()[0])
except Exception, e:
raise PopupException(_("The file could not be saved"), detail=e)
messages.info(request, _('Saved %(path)s.') % {'path': os.path.basename(path)})
request.path = reverse("filebrowser.views.edit", kwargs=dict(path=path))
return edit(request, path, form)
def parse_breadcrumbs(path):
parts = splitpath(path)
url, breadcrumbs = '', []
for part in parts:
if url and not url.endswith('/'):
url += '/'
url += part
breadcrumbs.append({'url': url, 'label': part})
return breadcrumbs
def listdir(request, path, chooser):
"""
Implements directory listing (or index).
Intended to be called via view().
TODO: Remove?
"""
if not request.fs.isdir(path):
raise PopupException(_("Not a directory: %(path)s") % {'path': path})
file_filter = request.REQUEST.get('file_filter', 'any')
assert file_filter in ['any', 'file', 'dir']
home_dir_path = request.user.get_home_directory()
breadcrumbs = parse_breadcrumbs(path)
data = {
'path': path,
'file_filter': file_filter,
'breadcrumbs': breadcrumbs,
'current_dir_path': path,
'current_request_path': request.path,
'home_directory': request.fs.isdir(home_dir_path) and home_dir_path or None,
'cwd_set': True,
'is_superuser': request.user.username == request.fs.superuser,
'groups': request.user.username == request.fs.superuser and [str(x) for x in Group.objects.values_list('name', flat=True)] or [],
'users': request.user.username == request.fs.superuser and [str(x) for x in User.objects.values_list('username', flat=True)] or [],
'superuser': request.fs.superuser,
'show_upload': (request.REQUEST.get('show_upload') == 'false' and (False,) or (True,))[0],
'show_download_button': SHOW_DOWNLOAD_BUTTON.get(),
'show_upload_button': SHOW_UPLOAD_BUTTON.get()
}
stats = request.fs.listdir_stats(path)
# Include parent dir, unless at filesystem root.
if not request.fs.isroot(path):
parent_path = request.fs.parent_path(path)
parent_stat = request.fs.stats(parent_path)
# The 'path' field would be absolute, but we want its basename to be
# actually '..' for display purposes. Encode it since _massage_stats expects byte strings.
parent_stat['path'] = parent_path
stats.insert(0, parent_stat)
data['files'] = [_massage_stats(request, stat) for stat in stats]
if chooser:
return render('chooser.mako', request, data)
else:
return render('listdir.mako', request, data)
def _massage_page(page):
return {
'number': page.number,
'num_pages': page.num_pages(),
'previous_page_number': page.previous_page_number(),
'next_page_number': page.next_page_number(),
'start_index': page.start_index(),
'end_index': page.end_index(),
'total_count': page.total_count()
}
def listdir_paged(request, path):
"""
A paginated version of listdir.
Query parameters:
pagenum - The page number to show. Defaults to 1.
pagesize - How many to show on a page. Defaults to 15.
sortby=? - Specify attribute to sort by. Accepts:
(type, name, atime, mtime, size, user, group)
Defaults to name.
descending - Specify a descending sort order.
Default to false.
filter=? - Specify a substring filter to search for in
the filename field.
"""
if not request.fs.isdir(path):
raise PopupException("Not a directory: %s" % (path,))
pagenum = int(request.GET.get('pagenum', 1))
pagesize = int(request.GET.get('pagesize', 30))
do_as = None
if request.user.is_superuser or request.user.has_hue_permission(action="impersonate", app="security"):
do_as = request.GET.get('doas', request.user.username)
if hasattr(request, 'doas'):
do_as = request.doas
home_dir_path = request.user.get_home_directory()
breadcrumbs = parse_breadcrumbs(path)
if do_as:
all_stats = request.fs.do_as_user(do_as, request.fs.listdir_stats, path)
else:
all_stats = request.fs.listdir_stats(path)
# Filter first
filter_str = request.GET.get('filter', None)
if filter_str:
filtered_stats = filter(lambda sb: filter_str in sb['name'], all_stats)
all_stats = filtered_stats
# Sort next
sortby = request.GET.get('sortby', None)
descending_param = request.GET.get('descending', None)
if sortby is not None:
if sortby not in ('type', 'name', 'atime', 'mtime', 'user', 'group', 'size'):
logger.info("Invalid sort attribute '%s' for listdir." %
(sortby,))
else:
all_stats = sorted(all_stats,
key=operator.attrgetter(sortby),
reverse=coerce_bool(descending_param))
# Do pagination
page = paginator.Paginator(all_stats, pagesize).page(pagenum)
shown_stats = page.object_list
# Include parent dir always as second option, unless at filesystem root.
if not request.fs.isroot(path):
parent_path = request.fs.parent_path(path)
parent_stat = request.fs.stats(parent_path)
# The 'path' field would be absolute, but we want its basename to be
# actually '..' for display purposes. Encode it since _massage_stats expects byte strings.
parent_stat['path'] = parent_path
parent_stat['name'] = ".."
shown_stats.insert(0, parent_stat)
# Include same dir always as first option to see stats of the current folder
current_stat = request.fs.stats(path)
# The 'path' field would be absolute, but we want its basename to be
# actually '.' for display purposes. Encode it since _massage_stats expects byte strings.
current_stat['path'] = path
current_stat['name'] = "."
shown_stats.insert(1, current_stat)
page.object_list = [ _massage_stats(request, s) for s in shown_stats ]
is_trash_enabled = request.fs._get_scheme(path) == 'hdfs'
is_fs_superuser = _is_hdfs_superuser(request)
data = {
'path': path,
'breadcrumbs': breadcrumbs,
'current_request_path': request.path,
'is_trash_enabled': is_trash_enabled,
'files': page.object_list,
'page': _massage_page(page),
'pagesize': pagesize,
'home_directory': request.fs.isdir(home_dir_path) and home_dir_path or None,
'descending': descending_param,
# The following should probably be deprecated
'cwd_set': True,
'file_filter': 'any',
'current_dir_path': path,
'is_fs_superuser': is_fs_superuser,
'groups': is_fs_superuser and [str(x) for x in Group.objects.values_list('name', flat=True)] or [],
'users': is_fs_superuser and [str(x) for x in User.objects.values_list('username', flat=True)] or [],
'superuser': request.fs.superuser,
'supergroup': request.fs.supergroup,
'is_sentry_managed': request.fs.is_sentry_managed(path),
'apps': appmanager.get_apps_dict(request.user).keys(),
'show_download_button': SHOW_DOWNLOAD_BUTTON.get(),
'show_upload_button': SHOW_UPLOAD_BUTTON.get()
}
return render('listdir.mako', request, data)
def chooser(request, path):
"""
Returns the html to JFrame that will display a file prompt.
Dispatches viewing of a path to either index() or fileview(), depending on type.
"""
# default_to_home is set in bootstrap.js
home_dir_path = request.user.get_home_directory()
if 'default_to_home' in request.GET and request.fs.isdir(home_dir_path):
return listdir(request, home_dir_path, True)
if request.fs.isdir(path):
return listdir(request, path, True)
elif request.fs.isfile(path):
return display(request, path)
else:
raise Http404(_("File not found: %(path)s") % {'path': escape(path)})
def _massage_stats(request, stats):
"""
Massage a stats record as returned by the filesystem implementation
into the format that the views would like it in.
"""
path = stats['path']
normalized = request.fs.normpath(path)
return {
'path': normalized,
'name': stats['name'],
'stats': stats.to_json_dict(),
'mtime': datetime.fromtimestamp(stats['mtime']).strftime('%B %d, %Y %I:%M %p') if stats['mtime'] is not None else '',
'humansize': filesizeformat(stats['size']),
'type': filetype(stats['mode']),
'rwx': rwx(stats['mode'], stats['aclBit']),
'mode': stringformat(stats['mode'], "o"),
'url': make_absolute(request, "view", dict(path=normalized)),
'is_sentry_managed': request.fs.is_sentry_managed(path)
}
def stat(request, path):
"""
Returns just the generic stats of a file.
Intended for use via AJAX (and hence doesn't provide
an HTML view).
"""
if not request.fs.exists(path):
raise Http404(_("File not found: %(path)s") % {'path': escape(path)})
stats = request.fs.stats(path)
return JsonResponse(_massage_stats(request, stats))
def content_summary(request, path):
if not request.fs.exists(path):
raise Http404(_("File not found: %(path)s") % {'path': escape(path)})
response = {'status': -1, 'message': '', 'summary': None}
try:
stats = request.fs.get_content_summary(path)
response['status'] = 0
response['summary'] = stats.summary
except WebHdfsException, e:
response['message'] = _("The file could not be saved") + e.message.splitlines()[0]
return JsonResponse(response)
def display(request, path):
"""
Implements displaying part of a file.
GET arguments are length, offset, mode, compression and encoding
with reasonable defaults chosen.
Note that display by length and offset are on bytes, not on characters.
TODO(philip): Could easily built-in file type detection
(perhaps using something similar to file(1)), as well
as more advanced binary-file viewing capability (de-serialize
sequence files, decompress gzipped text files, etc.).
There exists a python-magic package to interface with libmagic.
"""
if not request.fs.isfile(path):
raise PopupException(_("Not a file: '%(path)s'") % {'path': path})
# display inline files just if it's not an ajax request
if not request.is_ajax():
mimetype = mimetypes.guess_type(path)[0]
if mimetype is not None and INLINE_DISPLAY_MIMETYPE.search(mimetype):
return redirect(reverse('filebrowser.views.download', args=[path]) + '?disposition=inline')
stats = request.fs.stats(path)
encoding = request.GET.get('encoding') or i18n.get_site_encoding()
# I'm mixing URL-based parameters and traditional
# HTTP GET parameters, since URL-based parameters
# can't naturally be optional.
# Need to deal with possibility that length is not present
# because the offset came in via the toolbar manual byte entry.
end = request.GET.get("end")
if end:
end = int(end)
begin = request.GET.get("begin", 1)
if begin:
# Subtract one to zero index for file read
begin = int(begin) - 1
if end:
offset = begin
length = end - begin
if begin >= end:
raise PopupException(_("First byte to display must be before last byte to display."))
else:
length = int(request.GET.get("length", DEFAULT_CHUNK_SIZE_BYTES))
# Display first block by default.
offset = int(request.GET.get("offset", 0))
mode = request.GET.get("mode")
compression = request.GET.get("compression")
if mode and mode not in ["binary", "text"]:
raise PopupException(_("Mode must be one of 'binary' or 'text'."))
if offset < 0:
raise PopupException(_("Offset may not be less than zero."))
if length < 0:
raise PopupException(_("Length may not be less than zero."))
if length > MAX_CHUNK_SIZE_BYTES:
raise PopupException(_("Cannot request chunks greater than %(bytes)d bytes.") % {'bytes': MAX_CHUNK_SIZE_BYTES})
# Do not decompress in binary mode.
if mode == 'binary':
compression = 'none'
# Read out based on meta.
compression, offset, length, contents =\
read_contents(compression, path, request.fs, offset, length)
# Get contents as string for text mode, or at least try
uni_contents = None
if not mode or mode == 'text':
uni_contents = unicode(contents, encoding, errors='replace')
is_binary = uni_contents.find(i18n.REPLACEMENT_CHAR) != -1
# Auto-detect mode
if not mode:
mode = is_binary and 'binary' or 'text'
# Get contents as bytes
if mode == "binary":
xxd_out = list(xxd.xxd(offset, contents, BYTES_PER_LINE, BYTES_PER_SENTENCE))
dirname = posixpath.dirname(path)
# Start with index-like data:
data = _massage_stats(request, request.fs.stats(path))
# And add a view structure:
data["success"] = True
data["view"] = {
'offset': offset,
'length': length,
'end': offset + len(contents),
'dirname': dirname,
'mode': mode,
'compression': compression,
'size': stats['size'],
'max_chunk_size': str(MAX_CHUNK_SIZE_BYTES)
}
data["filename"] = os.path.basename(path)
data["editable"] = stats['size'] < MAX_FILEEDITOR_SIZE
if mode == "binary":
# This might be the wrong thing for ?format=json; doing the
# xxd'ing in javascript might be more compact, or sending a less
# intermediate representation...
logger.debug("xxd: " + str(xxd_out))
data['view']['xxd'] = xxd_out
data['view']['masked_binary_data'] = False
else:
data['view']['contents'] = uni_contents
data['view']['masked_binary_data'] = is_binary
data['breadcrumbs'] = parse_breadcrumbs(path)
data['show_download_button'] = SHOW_DOWNLOAD_BUTTON.get()
return render("display.mako", request, data)
def read_contents(codec_type, path, fs, offset, length):
"""
Reads contents of a passed path, by appropriately decoding the data.
Arguments:
codec_type - The type of codec to use to decode. (Auto-detected if None).
path - The path of the file to read.
fs - The FileSystem instance to use to read.
offset - Offset to seek to before read begins.
length - Amount of bytes to read after offset.
Returns: A tuple of codec_type, offset, length and contents read.
"""
contents = ''
fhandle = None
try:
fhandle = fs.open(path)
stats = fs.stats(path)
# Auto codec detection for [gzip, avro, snappy, none]
if not codec_type:
contents = fhandle.read(3)
fhandle.seek(0)
codec_type = 'none'
if path.endswith('.gz') and detect_gzip(contents):
codec_type = 'gzip'
offset = 0
elif path.endswith('.avro') and detect_avro(contents):
codec_type = 'avro'
elif detect_parquet(fhandle):
codec_type = 'parquet'
elif path.endswith('.snappy') and snappy_installed():
codec_type = 'snappy'
elif snappy_installed() and stats.size <= MAX_SNAPPY_DECOMPRESSION_SIZE.get():
fhandle.seek(0)
if detect_snappy(fhandle.read()):
codec_type = 'snappy'
fhandle.seek(0)
if codec_type == 'gzip':
contents = _read_gzip(fhandle, path, offset, length, stats)
elif codec_type == 'avro':
contents = _read_avro(fhandle, path, offset, length, stats)
elif codec_type == 'parquet':
contents = _read_parquet(fhandle, path, offset, length, stats)
elif codec_type == 'snappy':
contents = _read_snappy(fhandle, path, offset, length, stats)
else:
# for 'none' type.
contents = _read_simple(fhandle, path, offset, length, stats)
finally:
if fhandle:
fhandle.close()
return (codec_type, offset, length, contents)
def _decompress_snappy(compressed_content):
try:
import snappy
return snappy.decompress(compressed_content)
except Exception, e:
raise PopupException(_('Failed to decompress snappy compressed file.'), detail=e)
def _read_snappy(fhandle, path, offset, length, stats):
if not snappy_installed():
raise PopupException(_('Failed to decompress snappy compressed file. Snappy is not installed.'))
if stats.size > MAX_SNAPPY_DECOMPRESSION_SIZE.get():
raise PopupException(_('Failed to decompress snappy compressed file. File size is greater than allowed max snappy decompression size of %d.') % MAX_SNAPPY_DECOMPRESSION_SIZE.get())
return _read_simple(StringIO(_decompress_snappy(fhandle.read())), path, offset, length, stats)
def _read_avro(fhandle, path, offset, length, stats):
contents = ''
try:
fhandle.seek(offset)
data_file_reader = datafile.DataFileReader(fhandle, io.DatumReader())
try:
contents_list = []
read_start = fhandle.tell()
# Iterate over the entire sought file.
for datum in data_file_reader:
read_length = fhandle.tell() - read_start
if read_length > length and len(contents_list) > 0:
break
else:
datum_str = str(datum) + "\n"
contents_list.append(datum_str)
finally:
data_file_reader.close()
contents = "".join(contents_list)
except:
logging.exception("Could not read avro file at %s" % path)
raise PopupException(_("Failed to read Avro file."))
return contents
def _read_parquet(fhandle, path, offset, length, stats):
try:
dumped_data = StringIO()
parquet._dump(fhandle, ParquetOptions(), out=dumped_data)
dumped_data.seek(offset)
return dumped_data.read()
except:
logging.exception("Could not read parquet file at %s" % path)
raise PopupException(_("Failed to read Parquet file."))
def _read_gzip(fhandle, path, offset, length, stats):
contents = ''
if offset and offset != 0:
raise PopupException(_("Offsets are not supported with Gzip compression."))
try:
contents = GzipFile('', 'r', 0, StringIO(fhandle.read())).read(length)
except:
logging.exception("Could not decompress file at %s" % path)
raise PopupException(_("Failed to decompress file."))
return contents
def _read_simple(fhandle, path, offset, length, stats):
contents = ''
try:
fhandle.seek(offset)
contents = fhandle.read(length)
except:
logging.exception("Could not read file at %s" % path)
raise PopupException(_("Failed to read file."))
return contents
def detect_gzip(contents):
'''This is a silly small function which checks to see if the file is Gzip'''
return contents[:2] == '\x1f\x8b'
def detect_avro(contents):
'''This is a silly small function which checks to see if the file is Avro'''
# Check if the first three bytes are 'O', 'b' and 'j'
return contents[:3] == '\x4F\x62\x6A'
def detect_snappy(contents):
'''
This is a silly small function which checks to see if the file is Snappy.
It requires the entire contents of the compressed file.
This will also return false if snappy decompression if we do not have the library available.
'''
try:
import snappy
return snappy.isValidCompressed(contents)
except:
logging.exception('failed to detect snappy')
return False
def detect_parquet(fhandle):
"""
Detect parquet from magic header bytes.
"""
return parquet._check_header_magic_bytes(fhandle)
def snappy_installed():
'''Snappy is library that isn't supported by python2.4'''
try:
import snappy
return True
except ImportError:
return False
except:
logging.exception('failed to verify if snappy is installed')
return False
def _calculate_navigation(offset, length, size):
"""
List of (offset, length, string) tuples for suggested navigation through the file.
If offset is -1, then this option is already "selected". (Whereas None would
be the natural pythonic way, Django's template syntax doesn't let us test
against None (since its truth value is the same as 0).)
By all means this logic ought to be in the template, but the template
language is too limiting.
"""
if offset == 0:
first, prev = (-1, None, _("First Block")), (-1, None, _("Previous Block"))
else:
first, prev = (0, length, _("First Block")), (max(0, offset - length), length, _("Previous Block"))
if offset + length >= size:
next, last = (-1, None, _("Next Block")), (-1, None, _("Last Block"))
else:
# 1-off Reasoning: if length is the same as size, you want to start at 0.
next, last = (offset + length, length, _("Next Block")), (max(0, size - length), length, _("Last Block"))
return first, prev, next, last
def default_initial_value_extractor(request, parameter_names):
initial_values = {}
for p in parameter_names:
val = request.GET.get(p)
if val:
initial_values[p] = val
return initial_values
def formset_initial_value_extractor(request, parameter_names):
"""
Builds a list of data that formsets should use by extending some fields to every object,
whilst others are assumed to be received in order.
Formsets should receive data that looks like this: [{'param1': <something>,...}, ...].
The formsets should then handle construction on their own.
"""
def _intial_value_extractor(request):
if not submitted:
return []
# Build data with list of in order parameters receive in POST data
# Size can be inferred from largest list returned in POST data
data = []
for param in submitted:
i = 0
for val in request.POST.getlist(param):
if len(data) == i:
data.append({})
data[i][param] = val
i += 1
# Extend every data object with recurring params
for kwargs in data:
for recurrent in recurring:
kwargs[recurrent] = request.POST.get(recurrent)
initial_data = data
return {'initial': initial_data}
return _intial_value_extractor
def default_arg_extractor(request, form, parameter_names):
return [form.cleaned_data[p] for p in parameter_names]
def formset_arg_extractor(request, formset, parameter_names):
data = []
for form in formset.forms:
data_dict = {}
for p in parameter_names:
data_dict[p] = form.cleaned_data[p]
data.append(data_dict)
return data
def default_data_extractor(request):
return {'data': request.POST.copy()}
def formset_data_extractor(recurring=[], submitted=[]):
"""
Builds a list of data that formsets should use by extending some fields to every object,
whilst others are assumed to be received in order.
Formsets should receive data that looks like this: [{'param1': <something>,...}, ...].
The formsets should then handle construction on their own.
"""
def _data_extractor(request):
if not submitted:
return []
# Build data with list of in order parameters receive in POST data
# Size can be inferred from largest list returned in POST data
data = []
for param in submitted:
i = 0
for val in request.POST.getlist(param):
if len(data) == i:
data.append({})
data[i][param] = val
i += 1
# Extend every data object with recurring params
for kwargs in data:
for recurrent in recurring:
kwargs[recurrent] = request.POST.get(recurrent)
initial = list(data)
return {'initial': initial, 'data': data}
return _data_extractor
def generic_op(form_class, request, op, parameter_names, piggyback=None, template="fileop.mako", data_extractor=default_data_extractor, arg_extractor=default_arg_extractor, initial_value_extractor=default_initial_value_extractor, extra_params=None):
"""
Generic implementation for several operations.
@param form_class form to instantiate
@param request incoming request, used for parameters
@param op callable with the filesystem operation
@param parameter_names list of form parameters that are extracted and then passed to op
@param piggyback list of form parameters whose file stats to look up after the operation
@param data_extractor function that extracts POST data to be used by op
@param arg_extractor function that extracts args from a given form or formset
@param initial_value_extractor function that extracts the initial values of a form or formset
@param extra_params dictionary of extra parameters to send to the template for rendering
"""
# Use next for non-ajax requests, when available.
next = request.GET.get("next", request.POST.get("next", None))
ret = dict({
'next': next
})
if extra_params is not None:
ret['extra_params'] = extra_params
for p in parameter_names:
val = request.REQUEST.get(p)
if val:
ret[p] = val
if request.method == 'POST':
form = form_class(**data_extractor(request))
ret['form'] = form
if form.is_valid():
args = arg_extractor(request, form, parameter_names)
try:
op(*args)
except (IOError, WebHdfsException), e:
msg = _("Cannot perform operation.")
# TODO: Only apply this message for HDFS
if request.user.is_superuser and not _is_hdfs_superuser(request):
msg += _(' Note: you are a Hue admin but not a HDFS superuser, "%(superuser)s" or part of HDFS supergroup, "%(supergroup)s".') \
% {'superuser': request.fs.superuser, 'supergroup': request.fs.supergroup}
raise PopupException(msg, detail=e)
except S3FileSystemException, e:
msg = _("S3 filesystem exception.")
raise PopupException(msg, detail=e)
except NotImplementedError, e:
msg = _("Cannot perform operation.")
raise PopupException(msg, detail=e)
if next:
logging.debug("Next: %s" % next)
# Doesn't need to be quoted: quoting is done by HttpResponseRedirect.
return format_preserving_redirect(request, next)
ret["success"] = True
try:
if piggyback:
piggy_path = form.cleaned_data[piggyback]
ret["result"] = _massage_stats(request, request.fs.stats(piggy_path))
except Exception, e:
# Hard to report these more naturally here. These happen either
# because of a bug in the piggy-back code or because of a
# race condition.
logger.exception("Exception while processing piggyback data")
ret["result_error"] = True
ret['user'] = request.user
return render(template, request, ret)
else:
# Initial parameters may be specified with get with the default extractor
initial_values = initial_value_extractor(request, parameter_names)
formset = form_class(initial=initial_values)
ret['form'] = formset
return render(template, request, ret)
def rename(request):
def smart_rename(src_path, dest_path):
"""If dest_path doesn't have a directory specified, use same dir."""
if "#" in dest_path:
raise PopupException(_("Could not rename folder \"%s\" to \"%s\": Hashes are not allowed in filenames." % (src_path, dest_path)))
if "/" not in dest_path:
src_dir = os.path.dirname(src_path)
dest_path = request.fs.join(src_dir, dest_path)
if request.fs.exists(dest_path):
raise PopupException(_('The destination path "%s" already exists.') % dest_path)
request.fs.rename(src_path, dest_path)
return generic_op(RenameForm, request, smart_rename, ["src_path", "dest_path"], None)
def mkdir(request):
def smart_mkdir(path, name):
# Make sure only one directory is specified at a time.
# No absolute directory specification allowed.
if posixpath.sep in name or "#" in name:
raise PopupException(_("Could not name folder \"%s\": Slashes or hashes are not allowed in filenames." % name))
request.fs.mkdir(request.fs.join(path, name))
return generic_op(MkDirForm, request, smart_mkdir, ["path", "name"], "path")
def touch(request):
def smart_touch(path, name):
# Make sure only the filename is specified.
# No absolute path specification allowed.
if posixpath.sep in name:
raise PopupException(_("Could not name file \"%s\": Slashes are not allowed in filenames." % name))
request.fs.create(request.fs.join(path, name))
return generic_op(TouchForm, request, smart_touch, ["path", "name"], "path")
@require_http_methods(["POST"])
def rmtree(request):
recurring = []
params = ["path"]
def bulk_rmtree(*args, **kwargs):
for arg in args:
request.fs.do_as_user(request.user, request.fs.rmtree, arg['path'], 'skip_trash' in request.GET)
return generic_op(RmTreeFormSet, request, bulk_rmtree, ["path"], None,
data_extractor=formset_data_extractor(recurring, params),
arg_extractor=formset_arg_extractor,
initial_value_extractor=formset_initial_value_extractor)
@require_http_methods(["POST"])
def move(request):
recurring = ['dest_path']
params = ['src_path']
def bulk_move(*args, **kwargs):
for arg in args:
request.fs.rename(arg['src_path'], arg['dest_path'])
return generic_op(RenameFormSet, request, bulk_move, ["src_path", "dest_path"], None,
data_extractor=formset_data_extractor(recurring, params),
arg_extractor=formset_arg_extractor,
initial_value_extractor=formset_initial_value_extractor)
@require_http_methods(["POST"])
def copy(request):
recurring = ['dest_path']
params = ['src_path']
def bulk_copy(*args, **kwargs):
for arg in args:
request.fs.copy(arg['src_path'], arg['dest_path'], recursive=True, owner=request.user)
return generic_op(CopyFormSet, request, bulk_copy, ["src_path", "dest_path"], None,
data_extractor=formset_data_extractor(recurring, params),
arg_extractor=formset_arg_extractor,
initial_value_extractor=formset_initial_value_extractor)
@require_http_methods(["POST"])
def chmod(request):
recurring = ["sticky", "user_read", "user_write", "user_execute", "group_read", "group_write", "group_execute", "other_read", "other_write", "other_execute"]
params = ["path"]
def bulk_chmod(*args, **kwargs):
op = curry(request.fs.chmod, recursive=request.POST.get('recursive', False))
for arg in args:
op(arg['path'], arg['mode'])
# mode here is abused: on input, it's a string, but when retrieved,
# it's an int.
return generic_op(ChmodFormSet, request, bulk_chmod, ['path', 'mode'], "path",
data_extractor=formset_data_extractor(recurring, params),
arg_extractor=formset_arg_extractor,
initial_value_extractor=formset_initial_value_extractor)
@require_http_methods(["POST"])
def chown(request):
# This is a bit clever: generic_op takes an argument (here, args), indicating
# which POST parameters to pick out and pass to the given function.
# We update that mapping based on whether or not the user selected "other".
param_names = ["path", "user", "group"]
if request.POST.get("user") == "__other__":
param_names[1] = "user_other"
if request.POST.get("group") == "__other__":
param_names[2] = "group_other"
recurring = ["user", "group", "user_other", "group_other"]
params = ["path"]
def bulk_chown(*args, **kwargs):
op = curry(request.fs.chown, recursive=request.POST.get('recursive', False))
for arg in args:
varg = [arg[param] for param in param_names]
op(*varg)
return generic_op(ChownFormSet, request, bulk_chown, param_names, "path",
data_extractor=formset_data_extractor(recurring, params),
arg_extractor=formset_arg_extractor,
initial_value_extractor=formset_initial_value_extractor)
@require_http_methods(["POST"])
def trash_restore(request):
recurring = []
params = ["path"]
def bulk_restore(*args, **kwargs):
for arg in args:
request.fs.do_as_user(request.user, request.fs.restore, arg['path'])
return generic_op(RestoreFormSet, request, bulk_restore, ["path"], None,
data_extractor=formset_data_extractor(recurring, params),
arg_extractor=formset_arg_extractor,
initial_value_extractor=formset_initial_value_extractor)
@require_http_methods(["POST"])
def trash_purge(request):
return generic_op(TrashPurgeForm, request, request.fs.purge_trash, [], None)
@require_http_methods(["POST"])
def upload_file(request):
"""
A wrapper around the actual upload view function to clean up the temporary file afterwards if it fails.
Returns JSON.
e.g. {'status' 0/1, data:'message'...}
"""
response = {'status': -1, 'data': ''}
try:
resp = _upload_file(request)
response.update(resp)
except Exception, ex:
response['data'] = str(ex).split('\n', 1)[0]
hdfs_file = request.FILES.get('hdfs_file')
if hdfs_file and hasattr(hdfs_file, 'remove'): # TODO: Call from proxyFS
hdfs_file.remove()
return JsonResponse(response)
def _upload_file(request):
"""
Handles file uploaded by HDFSfileUploadHandler.
The uploaded file is stored in HDFS at its destination with a .tmp suffix.
We just need to rename it to the destination path.
"""
form = UploadFileForm(request.POST, request.FILES)
response = {'status': -1, 'data': ''}
if request.META.get('upload_failed'):
raise PopupException(request.META.get('upload_failed'))
if form.is_valid():
uploaded_file = request.FILES['hdfs_file']
dest = form.cleaned_data['dest']
filepath = request.fs.join(dest, uploaded_file.name)
if request.fs.isdir(dest) and posixpath.sep in uploaded_file.name:
raise PopupException(_('Sorry, no "%(sep)s" in the filename %(name)s.' % {'sep': posixpath.sep, 'name': uploaded_file.name}))
try:
request.fs.upload(file=uploaded_file, path=dest, username=request.user.username)
response['status'] = 0
except IOError, ex:
already_exists = False
try:
already_exists = request.fs.exists(dest)
except Exception:
pass
if already_exists:
msg = _('Destination %(name)s already exists.') % {'name': dest}
else:
msg = _('Copy to %(name)s failed: %(error)s') % {'name': dest, 'error': ex}
raise PopupException(msg)
response.update({
'path': filepath,
'result': _massage_stats(request, request.fs.stats(filepath)),
'next': request.GET.get("next")
})
return response
else:
raise PopupException(_("Error in upload form: %s") % (form.errors,))
@require_http_methods(["POST"])
def upload_archive(request):
"""
A wrapper around the actual upload view function to clean up the temporary file afterwards.
Returns JSON.
e.g. {'status' 0/1, data:'message'...}
"""
response = {'status': -1, 'data': ''}
try:
try:
resp = _upload_archive(request)
response.update(resp)
except Exception, ex:
response['data'] = str(ex)
finally:
hdfs_file = request.FILES.get('hdfs_file')
if hdfs_file:
hdfs_file.remove()
return JsonResponse(response)
def _upload_archive(request):
"""
Handles archive upload.
The uploaded file is stored in memory.
We need to extract it and rename it.
"""
form = UploadArchiveForm(request.POST, request.FILES)
response = {'status': -1, 'data': ''}
if form.is_valid():
uploaded_file = request.FILES['archive']
# Always a dir
if request.fs.isdir(form.cleaned_data['dest']) and posixpath.sep in uploaded_file.name:
raise PopupException(_('No "%(sep)s" allowed in the filename %(name)s.' % {'sep': posixpath.sep, 'name': uploaded_file.name}))
dest = request.fs.join(form.cleaned_data['dest'], uploaded_file.name)
try:
# Extract if necessary
# Make sure dest path is without the extension
if dest.lower().endswith('.zip'):
temp_path = archive_factory(uploaded_file, 'zip').extract()
if not temp_path:
raise PopupException(_('Could not extract contents of file.'))
# Move the file to where it belongs
dest = dest[:-4]
elif dest.lower().endswith('.tar.gz') or dest.lower().endswith('.tgz'):
temp_path = archive_factory(uploaded_file, 'tgz').extract()
if not temp_path:
raise PopupException(_('Could not extract contents of file.'))
# Move the file to where it belongs
dest = dest[:-7] if dest.lower().endswith('.tar.gz') else dest[:-4]
elif dest.lower().endswith('.bz2') or dest.lower().endswith('.bzip2'):
temp_path = archive_factory(uploaded_file, 'bz2').extract()
if not temp_path:
raise PopupException(_('Could not extract contents of file.'))
# Move the file to where it belongs
dest = dest[:-6] if dest.lower().endswith('.bzip2') else dest[:-4]
else:
raise PopupException(_('Could not interpret archive type.'))
request.fs.copyFromLocal(temp_path, dest)
shutil.rmtree(temp_path)
response['status'] = 0
except IOError, ex:
already_exists = False
try:
already_exists = request.fs.exists(dest)
except Exception:
pass
if already_exists:
msg = _('Destination %(name)s already exists.') % {'name': dest}
else:
msg = _('Copy to %(name)s failed: %(error)s') % {'name': dest, 'error': ex}
raise PopupException(msg)
response.update({
'path': dest,
'result': _massage_stats(request, request.fs.stats(dest)),
'next': request.GET.get("next")
})
return response
else:
raise PopupException(_("Error in upload form: %s") % (form.errors,))
def status(request):
status = request.fs.status()
data = {
# Beware: "messages" is special in the context browser.
'msgs': status.get_messages(),
'health': status.get_health(),
'datanode_report': status.get_datanode_report(),
'name': request.fs.name
}
return render("status.mako", request, data)
def location_to_url(location, strict=True):
"""
If possible, returns a file browser URL to the location.
Prunes HDFS URI to path.
Location is a URI, if strict is True.
Python doesn't seem to have a readily-available URI-comparison
library, so this is quite hacky.
"""
if location is None:
return None
split_path = Hdfs.urlsplit(location)
if strict and not split_path[1] or not split_path[2]:
# No netloc not full url or no URL
return None
path = location
if split_path[0] == 'hdfs':
path = split_path[2]
return reverse("filebrowser.views.view", kwargs=dict(path=path))
def truncate(toTruncate, charsToKeep=50):
"""
Returns a string truncated to 'charsToKeep' length plus ellipses.
"""
if len(toTruncate) > charsToKeep:
truncated = toTruncate[:charsToKeep] + "..."
return truncated
else:
return toTruncate
def _is_hdfs_superuser(request):
return request.user.username == request.fs.superuser or request.user.groups.filter(name__exact=request.fs.supergroup).exists()
|
'''
The envi.memcanvas module is the home of the base MemoryRenderer object and
MemoryCanvas objects.
'''
import sys
import logging
import traceback
import envi.symstore.resolver as e_resolv
logger = logging.getLogger(__name__)
class MemoryRenderer(object):
"""
A top level object for all memory renderers
"""
def rendSymbol(self, mcanv, va):
"""
If there is a symbolic name for the current va, print it...
"""
sym = mcanv.syms.getSymByAddr(va)
if sym is not None:
mcanv.addVaText("%s:\n" % repr(sym), va)
def rendVa(self, mcanv, va):
tag = mcanv.getVaTag(va)
mcanv.addText("%.8x:" % va, tag=tag)
def rendChars(self, mcanv, bytez):
for b in bytez:
bstr = "%.2x" % b
if b < 0x20 or b > 0x7e:
b = "."
else:
b = chr(b)
mcanv.addNameText(b, bstr)
def render(self, mcanv, va):
"""
Render one "unit" and return the size you ate.
mcanv will be a MemoryCanvas extender and va
is the virtual address you are expected to render.
"""
raise Exception("Implement render!")
class MemoryCanvas(object):
"""
A memory canvas is a place where the textual representation
of memory will be displayed. The methods implemented here show
how a memory canvas which simply prints would be implemented.
"""
def __init__(self, mem=None, syms=None):
if mem is None:
raise Exception("MemoryCanvas must include mem args")
if syms is None:
syms = e_resolv.SymbolResolver()
self.mem = mem
self.syms = syms
self.currend = None
self.renderers = {}
self._canv_scrolled = False
self._canv_navcallback = None
# A few things for tracking renders.
self._canv_beginva = None
self._canv_endva = None
self._canv_rendvas = []
def setScrolledCanvas(self, scroll):
self._canv_scrolled = scroll
def write(self, msg):
# So a canvas can act like simple standard out
self.addText(msg)
def setNavCallback(self, callback):
'''
Set a navigation "callback" that will be called with
a memory expression as it's first argument anytime the
canvas recieves user input which desires nav...
'''
self._canv_navcallback = callback
def addRenderer(self, name, rend):
self.renderers[name] = rend
self.currend = rend
def getRenderer(self, name):
return self.renderers.get(name)
def getRendererNames(self):
ret = list(self.renderers.keys())
ret.sort()
return ret
def setRenderer(self, name):
rend = self.renderers.get(name)
if rend is None:
raise Exception("Unknown renderer: %s" % name)
self.currend = rend
def getTag(self, typename):
"""
Retrieve a non-named tag (doesn't highlight or do
anything particularly special, but allows color
by typename).
"""
return None
def getNameTag(self, name, typename='name'):
"""
Retrieve a "tag" object for a name. "Name" tags will
(if possible) be highlighted in the rendered interface
"""
return None # No highlighting in plain text
def getVaTag(self, va):
"""
Retrieve a tag object suitable for showing that the text
added with this tag should link through to the specified
virtual address in the memory canvas.
"""
return None # No linking in plain text
def addText(self, text, tag=None):
"""
Add text to the canvas with a specified tag.
NOTE: Implementors should probably check _canv_scrolled to
decide if they should scroll to the end of the view...
"""
sys.stdout.write(text)
def addNameText(self, text, name=None, typename='name'):
if name is None:
name = bytes([ord(x) for x in text])
else:
name = bytes([ord(x) for x in name])
tag = self.getNameTag(name, typename=typename)
self.addText(text, tag=tag)
def addVaText(self, text, va):
tag = self.getVaTag(va)
self.addText(text, tag=tag)
def render(self, va, size, rend=None):
raise Exception('Deprecated! use renderMemory!')
def clearCanvas(self):
pass
def _beginRenderMemory(self, va, size, rend):
pass
def _endRenderMemory(self, va, size, rend):
pass
def _beginRenderVa(self, va):
pass
def _endRenderVa(self, va):
pass
def _beginUpdateVas(self, valist):
raise Exception("Default canvas can't update!")
def _endUpdateVas(self):
pass
def _beginRenderAppend(self):
raise Exception("Default canvas can't append!")
def _endRenderAppend(self):
pass
def _beginRenderPrepend(self):
raise Exception("Default canvas can't prepend!")
def _endRenderPrepend(self):
pass
def _isRendered(self, va, maxva):
'''
Returns true if any part of the current render overlaps
with the specified region.
'''
if self._canv_beginva is None:
return False
if self._canv_endva is None:
return False
if va > self._canv_endva:
return False
if maxva < self._canv_beginva:
return False
return True
def _loc_helper(self, va):
'''
allows subclassess to make the starting VA make more contextual sense.
'''
return (va, 0)
def renderMemoryUpdate(self, va, size, init=None, fini=None):
maxva = va + size
if not self._isRendered(va, maxva):
return
# Find the index of the first and last change
iend = None
ibegin = None
for i, (rendva, rendsize) in enumerate(self._canv_rendvas):
if ibegin is None and va <= rendva:
ibegin = i
if iend is None and maxva <= rendva:
iend = i
if ibegin is not None and iend is not None:
break
saved_last = self._canv_rendvas[iend:]
saved_first = self._canv_rendvas[:ibegin]
updatedvas = self._canv_rendvas[ibegin:iend]
# We must actually start rendering from the beginning
# of the first updated VA index
startva = updatedvas[0][0]
endva = self._canv_endva
if saved_last:
endva = saved_last[0][0]
newrendvas = []
self._beginUpdateVas(updatedvas, init)
try:
while startva < endva:
self._beginRenderVa(startva)
rsize = self.currend.render(self, startva)
newrendvas.append((startva, rsize))
self._endRenderVa(startva)
startva += rsize
except Exception:
s = traceback.format_exc()
self.addText("\nException At %s: %s\n" % (hex(va), s))
self._canv_rendvas = saved_first + newrendvas + saved_last
self._endUpdateVas(fini)
def renderMemoryPrepend(self, size, cb=None):
firstva, firstsize = self._canv_rendvas[0]
va, szdiff = self._loc_helper(firstva - size)
size += szdiff
self._beginRenderPrepend()
savedrendvas = self._canv_rendvas
self._canv_rendvas = []
self._canv_beginva = va
rend = self.currend
try:
while va < firstva:
self._beginRenderVa(va)
rsize = rend.render(self, va)
self._canv_rendvas.append((va, rsize))
self._endRenderVa(va)
va += rsize
self._canv_rendvas.extend(savedrendvas)
except Exception:
s = traceback.format_exc()
self.addText("\nException At %s: %s\n" % (hex(va), s))
self._endRenderPrepend(cb)
def renderMemoryAppend(self, size, cb=None):
lastva, lastsize = self._canv_rendvas[-1]
va = lastva + lastsize
self._beginRenderAppend()
rend = self.currend
try:
maxva = va + size
while va < maxva:
self._beginRenderVa(va)
rsize = rend.render(self, va)
self._canv_rendvas.append((va, rsize))
self._endRenderVa(va)
va += rsize
self._canv_endva = maxva
except Exception:
s = traceback.format_exc()
self.addText("\nException At %s: %s\n" % (hex(va), s))
self._endRenderAppend(cb)
def renderMemory(self, va, size, rend=None, cb=None):
# if this is not a "scrolled" canvas, clear it.
if not self._canv_scrolled:
self.clearCanvas()
if rend is None:
rend = self.currend
self.currend = rend
# Set our canvas render tracking variables.
self._canv_beginva = va
self._canv_endva = va + size
self._canv_rendvas = []
# A callback for "bulk" rendering (let the canvas cache...)
self._beginRenderMemory(va, size, rend)
try:
maxva = va + size
while va < maxva:
self._beginRenderVa(va)
try:
rsize = rend.render(self, va)
self._canv_rendvas.append((va, rsize))
self._endRenderVa(va)
va += rsize
except Exception as e:
logger.error(traceback.format_exc())
self.addText("\nRender Exception At %s: %s\n" % (hex(va), str(e)))
self._endRenderVa(va)
break
except Exception as e:
self.addText("\nException At %s: %s\n" % (hex(va), str(e)))
# Canvas callback for render completion (or error...)
self._endRenderMemory(va, size, rend, cb)
class StringMemoryCanvas(MemoryCanvas):
def __init__(self, mem, syms=None):
MemoryCanvas.__init__(self, mem, syms=syms)
self.strval = ''
# we perform manual clearing of the canvas.
# we don't want it cleared every renderMemory call.
self.setScrolledCanvas(True)
def clearCanvas(self):
self.strval = ''
def addText(self, text, tag=None):
self.strval += text
def __str__(self):
return self.strval
class CanvasMethodProxy(object):
'''
Target for teecanvas.
'''
def __init__(self, canvases, name):
self.canvases = canvases
self.name = name
def __call__(self, *args, **kwargs):
for canvas in self.canvases:
attr = getattr(canvas, self.name)
attr(*args, **kwargs)
class TeeCanvas(object):
'''
Replaces the canvas on an object (temporarily) with a proxy canvas that
forwards requests to other canvases.
Example usage:
with TeeCanvas(self, (self.canvas, canvas2)) as tc:
self.onecmd(command)
'''
def __init__(self, target, canvases):
self.target = target
self.ocanvas = None
self.canvases = canvases
def __getattr__(self, name):
return CanvasMethodProxy(self.canvases, name)
def __enter__(self):
'''
replace the canvas of the target with ourselves.
'''
self.ocanvas = self.target.canvas
self.target.canvas = self
def __exit__(self, exc_type, exc_val, exc_tb):
'''
restore the canvas of the target.
'''
self.target.canvas = self.ocanvas
|
"""
WSGI config for web_reflectivity project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "web_reflectivity.settings")
application = get_wsgi_application()
|
from setuptools import setup
setup(
setup_requires=['pbr'],
pbr=True,
)
|
import sys
import web
import MySQLdb
from DBUtils.PooledDB import PooledDB
from json import JSONEncoder, JSONDecoder
from log import initlog, logger
import config
from myfuntions import saveUrl, getUrl, toKey, batchSave
def is_ip_deny(thisweb):
'''只有重定向接口不用检查'''
# print "REMOTE_ADDR=%s" % thisweb.ctx.env.get('REMOTE_ADDR')
# logger.info("REMOTE_ADDR=%s" % thisweb.ctx.env.get('REMOTE_ADDR'))
if thisweb.ctx.env.get('REMOTE_ADDR') not in config.CONFIG['ip_white_set']:
logger.error("%s is deny." % thisweb.ctx.env.get('REMOTE_ADDR'))
return True
else:
return False
# return thisweb.ctx.env.get('REMOTE_ADDR') not in config.CONFIG['ip_white_set']
class url2key:
def GET(self):
return self.POST()
def POST(self):
logger.info("woker %s" % 'url2key')
if is_ip_deny(web):
raise web.forbidden()
ip = web.ctx.env.get('REMOTE_ADDR')
url = web.input().get('url')
logger.info('url2key IN: ip=%s url=%s' % (ip, url))
# print "url=%s" % url
if url is None or len(url.strip()) == 0:
logger.error('url is %s' % 'None')
return web.NotFound('url is null')
key = saveUrl(url.strip())
logger.info("url2key OUT: ip=%s key=%s" % (ip, key))
# print "key=%s" % key
# TODO 构造json,返回
return key
class redirect_to:
def GET(self, key):
logger.info("woker %s" % 'redirect_to')
logger.info("key=%s" % key)
# print "key=%s" % key
if key is None or len(key.strip()) == 0:
logger.error('key is %s' % 'None')
return web.NotFound('key is null')
url = getUrl(key.strip())
if url is None:
logger.error('geturl url is %s' % 'None')
return web.NotFound(message='url not found')
# return web.seeother(url, absolute=True)# TODO 检查语义是否合理
if not url.lower().startswith("http"):
url = "http://%s" % url
logger.info("url is %s, 302" % url)
return web.redirect(url, '302')
class url2key_without_save:
def GET(self):
logger.info("woker %s" % 'url2key_without_save')
if is_ip_deny(web):
raise web.forbidden()
url = web.input().get('url')
logger.info("url=%s" % url)
# print "url=%s" % url
if url is None or len(url.strip()) == 0:
logger.error("url is %s" % "None")
return web.NotFound('url is null')
key = toKey(url.strip())
logger.info("key=%s" % key)
# print "key=%s" % key
# TODO 构造json,返回
return key
class key2url:
def POST(self):
return self.GET()
def GET(self):
logger.info("woker %s" % 'key2url')
if is_ip_deny(web):
raise web.forbidden()
ip = web.ctx.env.get('REMOTE_ADDR')
key = web.input().get('key')
logger.info("key2url IN: ip=%s key=%s" % (ip, key))
# print "key=%s" % key
if key is None or len(key.strip()) == 0:
logger.error('%s is None' % 'key')
return web.NotFound('key is null')
url = getUrl(key.strip())
logger.info("key2url OUT: ip=%s url=%s" % (ip, url))
# print "url=%s" % url
# TODO 构造json,返回
return url
class batch_url2key:
def POST(self):
logger.info("woker %s" % 'batch_url2key')
if is_ip_deny(web):
raise web.forbidden()
ip = web.ctx.env.get('REMOTE_ADDR')
urls = web.input().get('urls')
logger.info("batch_url2key IN: ip=%s urls=%s" % (ip, urls))
# print "urls=%s" % urls
if urls is None or len(urls.strip()) == 0:
logger.error('%s is Null' % 'urls')
return web.NotFound('urls is null')
url_list = JSONDecoder().decode(urls)
# print 'will save'
keys = batchSave(url_list)
logger.info("batch_url2key OUT: ip=%s keys=%s" % (ip, keys))
# print 'save ok'
return JSONEncoder().encode({'keys': keys})
class batch_key2url:
def POST(self):
return self.GET()
def GET(self):
logger.info("woker %s" % 'batch_key2url')
if is_ip_deny(web):
raise web.forbidden()
ip = web.ctx.env.get('REMOTE_ADDR')
keys = web.input().get('keys')
logger.info("batch_key2url IN: ip=%s keys=%s" % (ip, keys))
# print "keys=%s" % keys
if keys is None or len(keys.strip()) == 0:
logger.error('%s is Null' % 'keys')
return web.NotFound('keys is null')
key_list = JSONDecoder().decode(keys)
def _mapfn(key):
if key is None or len(key.strip()) == 0:
logger.debug('%s is None' % 'key')
return ''
else:
url = getUrl(key.strip())
return url
urls = map(_mapfn, key_list)
logger.info("batch_key2url OUT: ip=%s url_list=%s" % (ip, urls))
return JSONEncoder().encode({'urls': urls})
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding("utf-8")
initlog(config.CONFIG['log_name'])
dbc = config.CONFIG['db']
config.pool = PooledDB(creator=MySQLdb, mincached=dbc['mincached'], maxcached=dbc['maxcached'],
# maxshared=10, maxconnections=80, maxusage=100,
maxshared=10, maxconnections=80,
host=dbc['host'], port=dbc['port'], user=dbc['user'], passwd=dbc['passwd'],
db=dbc['db'], use_unicode=dbc['use_unicode'], charset=dbc['charset'])
urls = (
'/url2key', 'url2key',
'/url2key_without_save', 'url2key_without_save',
'/key2url', 'key2url',
'/batch_url2key', 'batch_url2key',
'/batch_key2url', 'batch_key2url',
'/(.+)', 'redirect_to')
web.config.debug = config.CONFIG['is_debug']
app = web.application(urls, globals())
app.run()
|
"""
Support for Postfix
This module is currently little more than a config file viewer and editor. It
is able to read the master.cf file (which is one style) and files in the style
of main.cf (which is a different style, that is used in multiple postfix
configuration files).
The design of this module is such that when files are edited, a minimum of
changes are made to them. Each file should look as if it has been edited by
hand; order, comments and whitespace are all preserved.
"""
import logging
import re
import salt.utils.files
import salt.utils.path
import salt.utils.stringutils
SWWS = re.compile(r"^\s")
log = logging.getLogger(__name__)
MAIN_CF = "/etc/postfix/main.cf"
MASTER_CF = "/etc/postfix/master.cf"
def __virtual__():
"""
Only load the module if Postfix is installed
"""
if salt.utils.path.which("postfix"):
return True
return (False, "postfix execution module not loaded: postfix not installed.")
def _parse_master(path=MASTER_CF):
"""
Parse the master.cf file. This file is essentially a whitespace-delimited
columnar file. The columns are: service, type, private (yes), unpriv (yes),
chroot (yes), wakeup (never), maxproc (100), command + args.
This function parses out the columns, leaving empty lines and comments
intact. Where the value doesn't detract from the default, a dash (-) will
be used.
Returns a dict of the active config lines, and a list of the entire file,
in order. These compliment each other.
"""
with salt.utils.files.fopen(path, "r") as fh_:
full_conf = salt.utils.stringutils.to_unicode(fh_.read())
# Condense the file based on line continuations, but keep order, comments
# and whitespace
conf_list = []
conf_dict = {}
for line in full_conf.splitlines():
if not line.strip() or line.strip().startswith("#"):
conf_list.append(line)
continue
comps = line.strip().split()
conf_line = {
"service": comps[0],
"conn_type": comps[1],
"private": comps[2],
"unpriv": comps[3],
"chroot": comps[4],
"wakeup": comps[5],
"maxproc": comps[6],
"command": " ".join(comps[7:]),
}
dict_key = "{} {}".format(comps[0], comps[1])
conf_list.append(conf_line)
conf_dict[dict_key] = conf_line
return conf_dict, conf_list
def show_master(path=MASTER_CF):
"""
Return a dict of active config values. This does not include comments,
spacing or order.
The data returned from this function should not be used for direct
modification of the main.cf file; other functions are available for that.
CLI Examples:
.. code-block:: bash
salt <minion> postfix.show_master
salt <minion> postfix.show_master path=/path/to/master.cf
"""
conf_dict, conf_list = _parse_master(path) # pylint: disable=W0612
return conf_dict
def set_master(
service,
conn_type,
private="y",
unpriv="y",
chroot="y",
wakeup="n",
maxproc="100",
command="",
write_conf=True,
path=MASTER_CF,
):
"""
Set a single config value in the master.cf file. If the value does not
already exist, it will be appended to the end.
Because of shell parsing issues, '-' cannot be set as a value, as is normal
in the master.cf file; either 'y', 'n' or a number should be used when
calling this function from the command line. If the value used matches the
default, it will internally be converted to a '-'. Calling this function
from the Python API is not affected by this limitation
The settings and their default values, in order, are: service (required),
conn_type (required), private (y), unpriv (y), chroot (y), wakeup (n),
maxproc (100), command (required).
By default, this function will write out the changes to the master.cf file,
and then returns the full contents of the file. By setting the
``write_conf`` option to ``False``, it will skip writing the file.
CLI Example:
.. code-block:: bash
salt <minion> postfix.set_master smtp inet n y n n 100 smtpd
"""
conf_dict, conf_list = _parse_master(path)
new_conf = []
dict_key = "{} {}".format(service, conn_type)
new_line = _format_master(
service,
conn_type,
private,
unpriv,
chroot,
wakeup,
maxproc,
command,
)
for line in conf_list:
if isinstance(line, dict):
if line["service"] == service and line["conn_type"] == conn_type:
# This is the one line that we're changing
new_conf.append(new_line)
else:
# No changes to this line, but it still needs to be
# formatted properly
new_conf.append(_format_master(**line))
else:
# This line is a comment or is empty
new_conf.append(line)
if dict_key not in conf_dict:
# This config value does not exist, so append it to the end
new_conf.append(new_line)
if write_conf:
_write_conf(new_conf, path)
return "\n".join(new_conf)
def _format_master(
service, conn_type, private, unpriv, chroot, wakeup, maxproc, command
):
"""
Format the given values into the style of line normally used in the
master.cf file.
"""
# ==========================================================================
# service type private unpriv chroot wakeup maxproc command + args
# (yes) (yes) (yes) (never) (100)
# ==========================================================================
# smtp inet n - n - - smtpd
if private == "y":
private = "-"
if unpriv == "y":
unpriv = "-"
if chroot == "y":
chroot = "-"
if wakeup == "n":
wakeup = "-"
maxproc = str(maxproc)
if maxproc == "100":
maxproc = "-"
conf_line = "{:9s} {:5s} {:7s} {:7s} {:7s} {:7s} {:7s} {}".format(
service,
conn_type,
private,
unpriv,
chroot,
wakeup,
maxproc,
command,
)
# print(conf_line)
return conf_line
def _parse_main(path=MAIN_CF):
"""
Parse files in the style of main.cf. This is not just a "name = value" file;
there are other rules:
* Comments start with #
* Any whitespace at the beginning of a line denotes that that line is a
continuation from the previous line.
* The whitespace rule applies to comments.
* Keys defined in the file may be referred to as variables further down in
the file.
"""
with salt.utils.files.fopen(path, "r") as fh_:
full_conf = salt.utils.stringutils.to_unicode(fh_.read())
# Condense the file based on line continuations, but keep order, comments
# and whitespace
conf_list = []
for line in full_conf.splitlines():
if not line.strip():
conf_list.append(line)
continue
if re.match(SWWS, line):
if not conf_list:
# This should only happen at the top of the file
conf_list.append(line)
continue
if not isinstance(conf_list[-1], str):
conf_list[-1] = ""
# This line is a continuation of the previous line
conf_list[-1] = "\n".join([conf_list[-1], line])
else:
conf_list.append(line)
# Extract just the actual key/value pairs
pairs = {}
for line in conf_list:
if not line.strip():
continue
if line.startswith("#"):
continue
comps = line.split("=")
pairs[comps[0].strip()] = "=".join(comps[1:]).strip()
# Return both sets of data, they compliment each other elsewhere
return pairs, conf_list
def show_main(path=MAIN_CF):
"""
Return a dict of active config values. This does not include comments,
spacing or order. Bear in mind that order is functionally important in the
main.cf file, since keys can be referred to as variables. This means that
the data returned from this function should not be used for direct
modification of the main.cf file; other functions are available for that.
CLI Examples:
.. code-block:: bash
salt <minion> postfix.show_main
salt <minion> postfix.show_main path=/path/to/main.cf
"""
pairs, conf_list = _parse_main(path) # pylint: disable=W0612
return pairs
def set_main(key, value, path=MAIN_CF):
"""
Set a single config value in the main.cf file. If the value does not already
exist, it will be appended to the end.
CLI Example:
.. code-block:: bash
salt <minion> postfix.set_main mailq_path /usr/bin/mailq
"""
pairs, conf_list = _parse_main(path)
new_conf = []
key_line_match = re.compile("^{}([\\s=]|$)".format(re.escape(key)))
if key in pairs:
for line in conf_list:
if re.match(key_line_match, line):
new_conf.append("{} = {}".format(key, value))
else:
new_conf.append(line)
else:
conf_list.append("{} = {}".format(key, value))
new_conf = conf_list
_write_conf(new_conf, path)
return new_conf
def _write_conf(conf, path=MAIN_CF):
"""
Write out configuration file.
"""
with salt.utils.files.fopen(path, "w") as fh_:
for line in conf:
line = salt.utils.stringutils.to_str(line)
if isinstance(line, dict):
fh_.write(" ".join(line))
else:
fh_.write(line)
fh_.write("\n")
def show_queue():
"""
Show contents of the mail queue
CLI Example:
.. code-block:: bash
salt '*' postfix.show_queue
"""
cmd = "mailq"
out = __salt__["cmd.run"](cmd).splitlines()
queue = []
queue_pattern = re.compile(
r"(?P<queue_id>^[A-Z0-9]+)\s+(?P<size>\d+)\s(?P<timestamp>\w{3}\s\w{3}\s\d{1,2}\s\d{2}\:\d{2}\:\d{2})\s+(?P<sender>.+)"
)
recipient_pattern = re.compile(r"^\s+(?P<recipient>.+)")
queue_id, size, timestamp, sender, recipient = None, None, None, None, None
for line in out:
if re.match("^[-|postqueue:|Mail]", line):
# discard in-queue wrapper
continue
if re.match(queue_pattern, line):
m = re.match(queue_pattern, line)
queue_id = m.group("queue_id")
size = m.group("size")
timestamp = m.group("timestamp")
sender = m.group("sender")
elif re.match(recipient_pattern, line): # recipient/s
m = re.match(recipient_pattern, line)
recipient = m.group("recipient")
elif not line: # end of record
if all((queue_id, size, timestamp, sender, recipient)):
queue.append(
{
"queue_id": queue_id,
"size": size,
"timestamp": timestamp,
"sender": sender,
"recipient": recipient,
}
)
return queue
def delete(queue_id):
"""
Delete message(s) from the mail queue
CLI Example:
.. code-block:: bash
salt '*' postfix.delete 5C33CA0DEA
salt '*' postfix.delete ALL
"""
ret = {"message": "", "result": True}
if not queue_id:
log.error("Require argument queue_id")
if not queue_id == "ALL":
queue = show_queue()
_message = None
for item in queue:
if item["queue_id"] == queue_id:
_message = item
if not _message:
ret["message"] = "No message in queue with ID {}".format(queue_id)
ret["result"] = False
return ret
cmd = "postsuper -d {}".format(queue_id)
result = __salt__["cmd.run_all"](cmd)
if result["retcode"] == 0:
if queue_id == "ALL":
ret["message"] = "Successfully removed all messages"
else:
ret["message"] = "Successfully removed message with queue id {}".format(
queue_id
)
else:
if queue_id == "ALL":
ret["message"] = "Unable to removed all messages"
else:
ret["message"] = "Unable to remove message with queue id {}: {}".format(
queue_id, result["stderr"]
)
return ret
def hold(queue_id):
"""
Put message(s) on hold from the mail queue
CLI Example:
.. code-block:: bash
salt '*' postfix.hold 5C33CA0DEA
salt '*' postfix.hold ALL
"""
ret = {"message": "", "result": True}
if not queue_id:
log.error("Require argument queue_id")
if not queue_id == "ALL":
queue = show_queue()
_message = None
for item in queue:
if item["queue_id"] == queue_id:
_message = item
if not _message:
ret["message"] = "No message in queue with ID {}".format(queue_id)
ret["result"] = False
return ret
cmd = "postsuper -h {}".format(queue_id)
result = __salt__["cmd.run_all"](cmd)
if result["retcode"] == 0:
if queue_id == "ALL":
ret["message"] = "Successfully placed all messages on hold"
else:
ret[
"message"
] = "Successfully placed message on hold with queue id {}".format(queue_id)
else:
if queue_id == "ALL":
ret["message"] = "Unable to place all messages on hold"
else:
ret[
"message"
] = "Unable to place message on hold with queue id {}: {}".format(
queue_id, result["stderr"]
)
return ret
def unhold(queue_id):
"""
Set held message(s) in the mail queue to unheld
CLI Example:
.. code-block:: bash
salt '*' postfix.unhold 5C33CA0DEA
salt '*' postfix.unhold ALL
"""
ret = {"message": "", "result": True}
if not queue_id:
log.error("Require argument queue_id")
if not queue_id == "ALL":
queue = show_queue()
_message = None
for item in queue:
if item["queue_id"] == queue_id:
_message = item
if not _message:
ret["message"] = "No message in queue with ID {}".format(queue_id)
ret["result"] = False
return ret
cmd = "postsuper -H {}".format(queue_id)
result = __salt__["cmd.run_all"](cmd)
if result["retcode"] == 0:
if queue_id == "ALL":
ret["message"] = "Successfully set all message as unheld"
else:
ret[
"message"
] = "Successfully set message as unheld with queue id {}".format(queue_id)
else:
if queue_id == "ALL":
ret["message"] = "Unable to set all message as unheld."
else:
ret[
"message"
] = "Unable to set message as unheld with queue id {}: {}".format(
queue_id, result["stderr"]
)
return ret
def requeue(queue_id):
"""
Requeue message(s) in the mail queue
CLI Example:
.. code-block:: bash
salt '*' postfix.requeue 5C33CA0DEA
salt '*' postfix.requeue ALL
"""
ret = {"message": "", "result": True}
if not queue_id:
log.error("Required argument queue_id")
if not queue_id == "ALL":
queue = show_queue()
_message = None
for item in queue:
if item["queue_id"] == queue_id:
_message = item
if not _message:
ret["message"] = "No message in queue with ID {}".format(queue_id)
ret["result"] = False
return ret
cmd = "postsuper -r {}".format(queue_id)
result = __salt__["cmd.run_all"](cmd)
if result["retcode"] == 0:
if queue_id == "ALL":
ret["message"] = "Successfully requeued all messages"
else:
ret["message"] = "Successfully requeued message with queue id {}".format(
queue_id
)
else:
if queue_id == "ALL":
ret["message"] = "Unable to requeue all messages"
else:
ret["message"] = "Unable to requeue message with queue id {}: {}".format(
queue_id, result["stderr"]
)
return ret
|
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Algorithm.Framework")
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Algorithm.Framework import *
from QuantConnect.Algorithm.Framework.Portfolio import *
from QuantConnect.Algorithm.Framework.Risk import *
class TrailingStopRiskManagementModel(RiskManagementModel):
'''Provides an implementation of IRiskManagementModel that limits the maximum possible loss
measured from the highest unrealized profit'''
def __init__(self, maximumDrawdownPercent = 0.05):
'''Initializes a new instance of the TrailingStopRiskManagementModel class
Args:
maximumDrawdownPercent: The maximum percentage drawdown allowed for algorithm portfolio compared with the highest unrealized profit, defaults to 5% drawdown'''
self.maximumDrawdownPercent = -abs(maximumDrawdownPercent)
self.trailingHighs = dict()
def ManageRisk(self, algorithm, targets):
'''Manages the algorithm's risk at each time step
Args:
algorithm: The algorithm instance
targets: The current portfolio targets to be assessed for risk'''
riskAdjustedTargets = list()
for kvp in algorithm.Securities:
symbol = kvp.Key
security = kvp.Value
# Remove if not invested
if not security.Invested:
self.trailingHighs.pop(symbol, None)
continue
# Add newly invested securities
if symbol not in self.trailingHighs:
self.trailingHighs[symbol] = security.Holdings.AveragePrice # Set to average holding cost
continue
# Check for new highs and update - set to tradebar high
if self.trailingHighs[symbol] < security.High:
self.trailingHighs[symbol] = security.High
continue
# Check for securities past the drawdown limit
securityHigh = self.trailingHighs[symbol]
drawdown = (security.Low / securityHigh) - 1
if drawdown < self.maximumDrawdownPercent:
# liquidate
riskAdjustedTargets.append(PortfolioTarget(symbol, 0))
return riskAdjustedTargets
|
from python.decorators import euler_timer
from python.functions import prime_factors
def increment(value, list_):
"""
This updates the value according to the list. Since we seek 4
consecutive numbers with exactly 4 prime factors, we can jump
4 numbers if the last doesn't have 4 factors, can jump 3 if
the second to last doesn't have 4 factors, and so on
"""
if list_[-1] != 4:
return value + 4
# We can assume the last element is a 4
if list_[-2:] != [4, 4]:
return value + 3
# We can assume the last 2 elements are [4,4]
if list_[-3:] != [4, 4, 4]:
return value + 2
# We can assume the last 3 elements are [4,4,4]
return value + 1
def main(verbose=False):
# Find the first four consecutive integers to have four distinct
# primes factors. What is the first of these numbers?
factor_hash = {1: [], 2: [2]}
# Smallest product of 4 primes is 2*3*5*7 = 210
# We need to update the hash to get to this point
for i in range(3, 210 + 1):
prime_factors(i, hash_=factor_hash)
smallest = 210 # The smallest integer of the four
num_factors = [len(prime_factors(smallest + i,
unique=True,
hash_=factor_hash))
for i in range(4)]
while num_factors != [4, 4, 4, 4]:
smallest = increment(smallest, num_factors)
num_factors = [len(prime_factors(smallest + i,
unique=True,
hash_=factor_hash))
for i in range(4)]
return smallest
if __name__ == '__main__':
print euler_timer(47)(main)(verbose=True)
|
__author__ = """Chris Tabor (dxdstudio@gmail.com)"""
if __name__ == '__main__':
from os import getcwd
from os import sys
sys.path.append(getcwd())
from MOAL.helpers.display import Section
from MOAL.helpers.display import print_success
from MOAL.helpers.display import print_info
from random import randrange as rr
DEBUG = True if __name__ == '__main__' else False
class MemoryManager:
def __init__(self):
self.memory_blocks = {}
self.last_block = 1
self.MAX_RAM = 16000000000
self.free_ram = self.MAX_RAM
def __str__(self):
print_success('Viewing applications in memory...')
for node, ram_usage in self.memory_blocks.iteritems():
print('--- {} | {} bytes @ {}'.format(
node, sum([f[0] for f in ram_usage]),
[f[1] for f in ram_usage]))
return ''
def _divvy_ram(self, ram):
"""This is an arbitrary, somewhat meaningless method, to simulate
the notion of free blocks of ram that must be allocated and
referenced via pointer. A given program may require X ram, but
the location of available ram may be disparate, so various blocks have
to be stored as a free linked list, or similar data structure.
Since we've already covered linked lists, association lists, etc...,
there isn't much value outside learning the context of this data
structure, which tends to be in memory management."""
subdivisions = rr(2, 20) # Arbitrary number
flist = []
while subdivisions > 0:
diff = rr(0, ram)
# Store ram as [current block of memory, location]
flist.append([diff, str(self.last_block).zfill(4)])
ram = ram - diff
self.last_block += 1
subdivisions -= 1
if DEBUG:
print_info('Ram: {} / diff: {}'.format(ram, diff))
return flist
def malloc(self, item, ram):
self.memory_blocks[item] = self._divvy_ram(ram)
self.free_ram -= ram
print('Ram decreased: {} ({} bytes)'.format(self.free_ram, ram))
def free(self, item):
ram = sum([f[0] for f in self.memory_blocks[item]])
self.free_ram += ram
print('Ram increased: {} ({} bytes)'.format(self.free_ram, ram))
del self.memory_blocks[item]
if DEBUG:
with Section('Free list - Memory Manager'):
manager = MemoryManager()
manager.malloc('itunes', 128000000)
manager.malloc('photoshop', 256000000)
manager.malloc('chrome', 64000000)
print(manager)
manager.free('photoshop')
manager.free('itunes')
print(manager)
|
from flask import url_for, flash, abort
from flask_login import current_user
from pycroft.lib.user import status
from pycroft.model.user import User
def user_btn_style(user):
"""Determine the icons and style of the button to a users page.
First, add glyphicons concerning status warnings (finance,
traffic, no network access, abuse), or an ok icon. Append the
admin icon (always) and the “has an LDAP-account” icon (only if
not a member anymore).
The button class is ``info`` for non-members, ``success`` for
members, ``warning`` for traffic, and ``danger`` for other
felonies.
:param s: A user's status dict
:return: The bootstrap glyphicon classes and the button class
:rtype: tuple(list(str),str)
"""
glyphicons = []
btn_class = None
tooltips = []
props = set(p.property_name for p in user.current_properties)
if 'network_access' not in props:
glyphicons.append('glyphicon-remove')
tooltips.append('Zugang gesperrt')
if 'payment_in_default' in props:
glyphicons.append('glyphicon-euro')
btn_class = 'btn-warning'
tooltips.append('nicht bezahlt')
if 'member' in props:
if 'traffic_limit_exceeded' in props:
glyphicons.append('glyphicon-stats')
btn_class = 'btn-warning'
tooltips.append('Traffic')
if 'violation' in props:
glyphicons.append('glyphicon-alert')
btn_class = 'btn-danger'
tooltips.append('Verstoß')
else:
btn_class = 'btn-info'
tooltips.append('Kein Mitglied')
glyphicons = glyphicons or ['glyphicon-ok']
btn_class = btn_class or 'btn-success'
if 'user_show' in props:
glyphicons.append('glyphicon-wrench')
tooltips.append('Admin')
if 'member' not in props and 'ldap' in props:
glyphicons.append('glyphicon-cloud')
tooltips.append('Eintrag im LDAP')
tooltip = ', '.join(tooltips)
return btn_class, glyphicons, tooltip
def user_button(user):
btn_class, glyphicons, tooltip = user_btn_style(user)
return {
'href': url_for("user.user_show", user_id=user.id),
'title': user.name,
'icon': glyphicons,
'btn_class': btn_class,
'tooltip': tooltip
}
def get_user_or_404(user_id):
user = User.q.get(user_id)
if user is None:
flash(u"Nutzer mit ID {} existiert nicht!".format(user_id,), 'error')
abort(404)
return user
def no_membership_change():
return not current_user.has_property('groups_change_membership')
def no_hosts_change():
return not current_user.has_property('hosts_change')
|
import os
import djcelery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'findingaids.settings')
os.environ['PYTHON_EGG_CACHE'] = '/tmp'
if 'HTTP_PROXY' not in os.environ:
os.environ['HTTP_PROXY'] = 'http://localhost:3128/'
os.environ['VIRTUAL_ENV'] = '/home/httpd/findingaids/env/'
djcelery.setup_loader()
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
"""
Views for home page.
"""
from django import template
from django import shortcuts
from django.views.decorators import vary
import horizon
from horizon.views import auth as auth_views
def qunit_tests(request):
return shortcuts.render(request, "qunit.html")
def user_home(user):
if user.admin:
return horizon.get_dashboard('syspanel').get_absolute_url()
return horizon.get_dashboard('nova').get_absolute_url()
@vary.vary_on_cookie
def splash(request):
form, handled = auth_views.Login.maybe_handle(request)
if handled:
return handled
request.session.clear()
return shortcuts.render(request, 'splash.html', {'form': form})
|
"""
WSGI config for gmusic project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "gmusic.settings")
application = get_wsgi_application()
|
__author__ = 'saeedamen'
from findatapy.market.datavendor import DataVendor
from findatapy.market.ioengine import IOEngine, SpeedCache
from findatapy.market.market import Market, FXVolFactory, FXCrossFactory, FXConv, RatesFactory
from findatapy.market.marketdatagenerator import MarketDataGenerator
from findatapy.market.marketdatarequest import MarketDataRequest
|
""" Attribute nodes
Knowing attributes of an object is very important, esp. when it comes to 'self'
and objects and classes.
There will be a methods "computeExpression*Attribute" to aid predicting them,
with many variants for setting, deleting, and accessing. Also there is some
complication in the form of special lookups, that won't go through the normal
path, but just check slots.
Due to ``getattr`` and ``setattr`` built-ins, there is also a different in the
computations for objects and for compile time known strings. This reflects what
CPython also does with "tp_getattr" and "tp_getattro".
These nodes are therefore mostly delegating the work to expressions they
work on, and let them decide and do the heavy lifting of optimization
and annotation is happening in the nodes that implement these compute slots.
"""
from .ExpressionBases import (
ExpressionChildHavingBase,
ExpressionChildrenHavingBase,
)
from .NodeBases import StatementChildHavingBase, StatementChildrenHavingBase
from .NodeMakingHelpers import wrapExpressionWithNodeSideEffects
class StatementAssignmentAttribute(StatementChildrenHavingBase):
"""Assignment to an attribute.
Typically from code like: source.attribute_name = expression
Both source and expression may be complex expressions, the source
is evaluated first. Assigning to an attribute has its on slot on
the source, which gets to decide if it knows it will work or not,
and what value it will be.
"""
__slots__ = ("attribute_name",)
kind = "STATEMENT_ASSIGNMENT_ATTRIBUTE"
named_children = ("source", "expression")
def __init__(self, expression, attribute_name, source, source_ref):
StatementChildrenHavingBase.__init__(
self,
values={"expression": expression, "source": source},
source_ref=source_ref,
)
self.attribute_name = attribute_name
def getDetails(self):
return {"attribute_name": self.attribute_name}
def getAttributeName(self):
return self.attribute_name
def computeStatement(self, trace_collection):
result, change_tags, change_desc = self.computeStatementSubExpressions(
trace_collection=trace_collection
)
if result is not self:
return result, change_tags, change_desc
return self.subnode_expression.computeExpressionSetAttribute(
set_node=self,
attribute_name=self.attribute_name,
value_node=self.subnode_source,
trace_collection=trace_collection,
)
@staticmethod
def getStatementNiceName():
return "attribute assignment statement"
class StatementDelAttribute(StatementChildHavingBase):
"""Deletion of an attribute.
Typically from code like: del source.attribute_name
The source may be complex expression. Deleting an attribute has its on
slot on the source, which gets to decide if it knows it will work or
not, and what value it will be.
"""
kind = "STATEMENT_DEL_ATTRIBUTE"
named_child = "expression"
__slots__ = ("attribute_name",)
def __init__(self, expression, attribute_name, source_ref):
StatementChildHavingBase.__init__(self, value=expression, source_ref=source_ref)
self.attribute_name = attribute_name
def getDetails(self):
return {"attribute_name": self.attribute_name}
def getAttributeName(self):
return self.attribute_name
def computeStatement(self, trace_collection):
result, change_tags, change_desc = self.computeStatementSubExpressions(
trace_collection=trace_collection
)
if result is not self:
return result, change_tags, change_desc
return self.subnode_expression.computeExpressionDelAttribute(
set_node=self,
attribute_name=self.attribute_name,
trace_collection=trace_collection,
)
@staticmethod
def getStatementNiceName():
return "attribute del statement"
class ExpressionAttributeLookup(ExpressionChildHavingBase):
"""Looking up an attribute of an object.
Typically code like: source.attribute_name
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP"
named_child = "expression"
__slots__ = ("attribute_name",)
def __init__(self, expression, attribute_name, source_ref):
ExpressionChildHavingBase.__init__(
self, value=expression, source_ref=source_ref
)
self.attribute_name = attribute_name
def getAttributeName(self):
return self.attribute_name
def getDetails(self):
return {"attribute_name": self.attribute_name}
def computeExpression(self, trace_collection):
return self.subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name=self.attribute_name,
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name=self.attribute_name
)
@staticmethod
def isKnownToBeIterable(count):
# TODO: Could be known. We would need for computeExpressionAttribute to
# either return a new node, or a decision maker.
return None
class ExpressionAttributeLookupSpecial(ExpressionAttributeLookup):
"""Special lookup up an attribute of an object.
Typically from code like this: with source: pass
These directly go to slots, and are performed for with statements
of Python2.7 or higher.
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_SPECIAL"
def computeExpression(self, trace_collection):
return self.subnode_expression.computeExpressionAttributeSpecial(
lookup_node=self,
attribute_name=self.attribute_name,
trace_collection=trace_collection,
)
class ExpressionBuiltinGetattr(ExpressionChildrenHavingBase):
"""Built-in "getattr".
Typical code like this: getattr(object_arg, name, default)
The default is optional, but computed before the lookup is done.
"""
kind = "EXPRESSION_BUILTIN_GETATTR"
named_children = ("expression", "name", "default")
def __init__(self, expression, name, default, source_ref):
ExpressionChildrenHavingBase.__init__(
self,
values={"expression": expression, "name": name, "default": default},
source_ref=source_ref,
)
def computeExpression(self, trace_collection):
trace_collection.onExceptionRaiseExit(BaseException)
default = self.subnode_default
if default is None or not default.mayHaveSideEffects():
attribute = self.subnode_name
attribute_name = attribute.getStringValue()
if attribute_name is not None:
source = self.subnode_expression
if source.isKnownToHaveAttribute(attribute_name):
# If source has side effects, they must be evaluated, before
# the lookup, meaning, a temporary variable should be assigned.
# For now, we give up in this case.
side_effects = source.extractSideEffects()
if not side_effects:
result = ExpressionAttributeLookup(
expression=source,
attribute_name=attribute_name,
source_ref=self.source_ref,
)
result = wrapExpressionWithNodeSideEffects(
new_node=result, old_node=attribute
)
return (
result,
"new_expression",
"""Replaced call to built-in 'getattr' with constant \
attribute '%s' to mere attribute lookup"""
% attribute_name,
)
return self, None, None
class ExpressionBuiltinSetattr(ExpressionChildrenHavingBase):
"""Built-in "setattr".
Typical code like this: setattr(source, attribute, value)
"""
kind = "EXPRESSION_BUILTIN_SETATTR"
named_children = ("expression", "attribute", "value")
def __init__(self, expression, name, value, source_ref):
ExpressionChildrenHavingBase.__init__(
self,
values={"expression": expression, "attribute": name, "value": value},
source_ref=source_ref,
)
def computeExpression(self, trace_collection):
trace_collection.onExceptionRaiseExit(BaseException)
# Note: Might be possible to predict or downgrade to mere attribute set.
return self, None, None
class ExpressionBuiltinHasattr(ExpressionChildrenHavingBase):
kind = "EXPRESSION_BUILTIN_HASATTR"
named_children = ("expression", "attribute")
def __init__(self, expression, name, source_ref):
ExpressionChildrenHavingBase.__init__(
self,
values={"expression": expression, "attribute": name},
source_ref=source_ref,
)
def computeExpression(self, trace_collection):
# We do at least for compile time constants optimization here, but more
# could be done, were we to know shapes.
source = self.subnode_expression
if source.isCompileTimeConstant():
attribute = self.subnode_attribute
attribute_name = attribute.getStringValue()
if attribute_name is not None:
# If source or attribute have side effects, they must be
# evaluated, before the lookup.
(
result,
tags,
change_desc,
) = trace_collection.getCompileTimeComputationResult(
node=self,
computation=lambda: hasattr(
source.getCompileTimeConstant(), attribute_name
),
description="Call to 'hasattr' pre-computed.",
)
result = wrapExpressionWithNodeSideEffects(
new_node=result, old_node=attribute
)
result = wrapExpressionWithNodeSideEffects(
new_node=result, old_node=source
)
return result, tags, change_desc
trace_collection.onExceptionRaiseExit(BaseException)
return self, None, None
class ExpressionAttributeCheck(ExpressionChildHavingBase):
kind = "EXPRESSION_ATTRIBUTE_CHECK"
named_child = "expression"
__slots__ = ("attribute_name",)
def __init__(self, expression, attribute_name, source_ref):
ExpressionChildHavingBase.__init__(
self, value=expression, source_ref=source_ref
)
self.attribute_name = attribute_name
def getDetails(self):
return {"attribute_name": self.attribute_name}
def computeExpression(self, trace_collection):
# We do at least for compile time constants optimization here, but more
# could be done, were we to know shapes.
source = self.subnode_expression
if source.isCompileTimeConstant():
(
result,
tags,
change_desc,
) = trace_collection.getCompileTimeComputationResult(
node=self,
computation=lambda: hasattr(
source.getCompileTimeConstant(), self.attribute_name
),
description="Attribute check has been pre-computed.",
)
# If source has has side effects, they must be evaluated.
result = wrapExpressionWithNodeSideEffects(new_node=result, old_node=source)
return result, tags, change_desc
trace_collection.onExceptionRaiseExit(BaseException)
return self, None, None
@staticmethod
def mayRaiseException(exception_type):
return False
def getAttributeName(self):
return self.attribute_name
|
import warnings
warnings.warn('The sre module is deprecated, please import re.', DeprecationWarning, 2)
from re import *
from re import __all__
from re import _compile
|
"""A tf.distribute.Strategy for running on a single device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import numpy_dataset
from tensorflow.python.distribute import values
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
@tf_export("distribute.OneDeviceStrategy", v1=[])
class OneDeviceStrategy(distribute_lib.Strategy):
"""A distribution strategy for running on a single device.
Using this strategy will place any variables created in its scope on the
specified device. Input distributed through this strategy will be
prefetched to the specified device. Moreover, any functions called via
`strategy.run` will also be placed on the specified device
as well.
Typical usage of this strategy could be testing your code with the
tf.distribute.Strategy API before switching to other strategies which
actually distribute to multiple devices/machines.
For example:
```
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
with strategy.scope():
v = tf.Variable(1.0)
print(v.device) # /job:localhost/replica:0/task:0/device:GPU:0
def step_fn(x):
return x * 2
result = 0
for i in range(10):
result += strategy.run(step_fn, args=(i,))
print(result) # 90
```
"""
def __init__(self, device):
"""Creates a `OneDeviceStrategy`.
Args:
device: Device string identifier for the device on which the variables
should be placed. See class docs for more details on how the device is
used. Examples: "/cpu:0", "/gpu:0", "/device:CPU:0", "/device:GPU:0"
"""
super(OneDeviceStrategy, self).__init__(OneDeviceExtended(self, device))
distribute_lib.distribution_strategy_gauge.get_cell("V2").set(
"OneDeviceStrategy")
def experimental_distribute_dataset(self, dataset): # pylint: disable=useless-super-delegation
"""Distributes a tf.data.Dataset instance provided via dataset.
In this case, there is only one device, so this is only a thin wrapper
around the input dataset. It will, however, prefetch the input data to the
specified device. The returned distributed dataset can be iterated over
similar to how regular datasets can.
NOTE: Currently, the user cannot add any more transformations to a
distributed dataset.
Example:
```
strategy = tf.distribute.OneDeviceStrategy()
dataset = tf.data.Dataset.range(10).batch(2)
dist_dataset = strategy.experimental_distribute_dataset(dataset)
for x in dist_dataset:
print(x) # [0, 1], [2, 3],...
```
Args:
dataset: `tf.data.Dataset` to be prefetched to device.
Returns:
A "distributed `Dataset`" that the caller can iterate over.
"""
return super(OneDeviceStrategy, self).experimental_distribute_dataset(
dataset)
def experimental_distribute_datasets_from_function(self, dataset_fn): # pylint: disable=useless-super-delegation
"""Distributes `tf.data.Dataset` instances created by calls to `dataset_fn`.
`dataset_fn` will be called once for each worker in the strategy. In this
case, we only have one worker and one device so `dataset_fn` is called
once.
The `dataset_fn` should take an `tf.distribute.InputContext` instance where
information about batching and input replication can be accessed:
```
def dataset_fn(input_context):
batch_size = input_context.get_per_replica_batch_size(global_batch_size)
d = tf.data.Dataset.from_tensors([[1.]]).repeat().batch(batch_size)
return d.shard(
input_context.num_input_pipelines, input_context.input_pipeline_id)
inputs = strategy.experimental_distribute_datasets_from_function(dataset_fn)
for batch in inputs:
replica_results = strategy.run(replica_fn, args=(batch,))
```
IMPORTANT: The `tf.data.Dataset` returned by `dataset_fn` should have a
per-replica batch size, unlike `experimental_distribute_dataset`, which uses
the global batch size. This may be computed using
`input_context.get_per_replica_batch_size`.
Args:
dataset_fn: A function taking a `tf.distribute.InputContext` instance and
returning a `tf.data.Dataset`.
Returns:
A "distributed `Dataset`", which the caller can iterate over like regular
datasets.
"""
return super(
OneDeviceStrategy, self).experimental_distribute_datasets_from_function(
dataset_fn)
def experimental_local_results(self, value): # pylint: disable=useless-super-delegation
"""Returns the list of all local per-replica values contained in `value`.
In `OneDeviceStrategy`, the `value` is always expected to be a single
value, so the result is just the value in a tuple.
Args:
value: A value returned by `experimental_run()`, `run()`,
`extended.call_for_each_replica()`, or a variable created in `scope`.
Returns:
A tuple of values contained in `value`. If `value` represents a single
value, this returns `(value,).`
"""
return super(OneDeviceStrategy, self).experimental_local_results(value)
def run(self, fn, args=(), kwargs=None, options=None): # pylint: disable=useless-super-delegation
"""Run `fn` on each replica, with the given arguments.
In `OneDeviceStrategy`, `fn` is simply called within a device scope for the
given device, with the provided arguments.
Args:
fn: The function to run. The output must be a `tf.nest` of `Tensor`s.
args: (Optional) Positional arguments to `fn`.
kwargs: (Optional) Keyword arguments to `fn`.
options: (Optional) An instance of `tf.distribute.RunOptions` specifying
the options to run `fn`.
Returns:
Return value from running `fn`.
"""
return super(OneDeviceStrategy, self).run(fn, args, kwargs, options)
def reduce(self, reduce_op, value, axis): # pylint: disable=useless-super-delegation
"""Reduce `value` across replicas.
In `OneDeviceStrategy`, there is only one replica, so if axis=None, value
is simply returned. If axis is specified as something other than None,
such as axis=0, value is reduced along that axis and returned.
Example:
```
t = tf.range(10)
result = strategy.reduce(tf.distribute.ReduceOp.SUM, t, axis=None).numpy()
# result: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
result = strategy.reduce(tf.distribute.ReduceOp.SUM, t, axis=0).numpy()
# result: 45
```
Args:
reduce_op: A `tf.distribute.ReduceOp` value specifying how values should
be combined.
value: A "per replica" value, e.g. returned by `run` to
be combined into a single tensor.
axis: Specifies the dimension to reduce along within each
replica's tensor. Should typically be set to the batch dimension, or
`None` to only reduce across replicas (e.g. if the tensor has no batch
dimension).
Returns:
A `Tensor`.
"""
return super(OneDeviceStrategy, self).reduce(reduce_op, value, axis)
def scope(self): # pylint: disable=useless-super-delegation
"""Returns a context manager selecting this Strategy as current.
Inside a `with strategy.scope():` code block, this thread
will use a variable creator set by `strategy`, and will
enter its "cross-replica context".
In `OneDeviceStrategy`, all variables created inside `strategy.scope()`
will be on `device` specified at strategy construction time.
See example in the docs for this class.
Returns:
A context manager to use for creating variables with this strategy.
"""
return super(OneDeviceStrategy, self).scope()
@tf_export(v1=["distribute.OneDeviceStrategy"]) # pylint: disable=missing-docstring
class OneDeviceStrategyV1(distribute_lib.StrategyV1):
__doc__ = OneDeviceStrategy.__doc__.replace(
"For example:\n ```",
"For example:\n ```\n tf.enable_eager_execution()")
def __init__(self, device):
super(OneDeviceStrategyV1, self).__init__(OneDeviceExtended(self, device))
distribute_lib.distribution_strategy_gauge.get_cell("V1").set(
"OneDeviceStrategy")
__init__.__doc__ = OneDeviceStrategy.__init__.__doc__
class OneDeviceExtended(distribute_lib.StrategyExtendedV1):
"""Implementation of OneDeviceStrategy."""
def __init__(self, container_strategy, device):
super(OneDeviceExtended, self).__init__(container_strategy)
self._device = device_util.resolve(device)
suffix_loc = self._device.rfind("/")
self._input_device = self._device[:suffix_loc] + "/device:CPU:0"
worker_device_pairs = [(self._input_device, [self._device])]
self._input_workers = input_lib.InputWorkers(worker_device_pairs)
def _create_variable(self, next_creator, **kwargs):
colocate_with = kwargs.pop("colocate_with", None)
if colocate_with is None:
with ops.device(self._device):
return next_creator(**kwargs)
elif isinstance(colocate_with, numpy_dataset.SingleDevice):
with ops.device(colocate_with.device):
return next_creator(**kwargs)
else:
with ops.colocate_with(colocate_with):
return next_creator(**kwargs)
def _validate_colocate_with_variable(self, colocate_with_variable):
values.validate_colocate(colocate_with_variable, self)
def _make_dataset_iterator(self, dataset):
"""Make iterator from dataset without splitting the batch."""
# Note that split_batch_by argument is not passed because it is always 1 in
# this strategy, and adding it adds unnecessary overhead to the dataset.
return input_lib.DatasetIterator(dataset, self._input_workers,
self._container_strategy())
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
return input_lib.InputFunctionIterator(input_fn, self._input_workers,
[distribute_lib.InputContext()],
self._container_strategy())
def _experimental_make_numpy_dataset(self, numpy_input, session):
return numpy_dataset.one_host_numpy_dataset(
numpy_input, numpy_dataset.SingleDevice(self._input_device), session)
def _broadcast_to(self, tensor, destinations):
del destinations
return tensor
def _experimental_distribute_dataset(self, dataset):
# Note that split_batch_by argument is not passed because it is always 1 in
# this strategy, and adding it adds unnecessary overhead to the dataset.
return input_lib.get_distributed_dataset(dataset, self._input_workers,
self._container_strategy())
def _experimental_distribute_datasets_from_function(self, dataset_fn):
return input_lib.get_distributed_datasets_from_function(
dataset_fn,
self._input_workers,
[distribute_lib.InputContext()],
self._container_strategy())
def _experimental_distribute_values_from_function(self, value_fn):
# TODO(b/137795644): This should return a PerReplica value but other
# methods like run in OneDeviceStrategy need to be modified
# to do the same.
return value_fn(distribute_lib.ValueContext())
# TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed.
def _experimental_run_steps_on_iterator(self, fn, iterator, iterations,
initial_loop_values=None):
if initial_loop_values is None:
initial_loop_values = {}
initial_loop_values = nest.flatten(initial_loop_values)
ctx = input_lib.MultiStepContext()
def body(i, *args):
"""A wrapper around `fn` to create the while loop body."""
del args
fn_result = fn(ctx, iterator.get_next())
flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)
with ops.control_dependencies([fn_result]):
return [i + 1] + flat_last_step_outputs
# We capture the control_flow_context at this point, before we run `fn`
# inside a while_loop. This is useful in cases where we might need to exit
# these contexts and get back to the outer context to do some things, for
# e.g. create an op which should be evaluated only once at the end of the
# loop on the host. One such usage is in creating metrics' value op.
self._outer_control_flow_context = (
ops.get_default_graph()._get_control_flow_context()) # pylint: disable=protected-access
# TODO(priyag): Use max_iterations instead of an explicit counter.
cond = lambda i, *args: i < iterations
i = constant_op.constant(0)
loop_result = control_flow_ops.while_loop(
cond, body, [i] + initial_loop_values, name="",
parallel_iterations=1, back_prop=False, swap_memory=False,
return_same_structure=True)
del self._outer_control_flow_context
ctx.run_op = control_flow_ops.group(loop_result)
# Convert the last_step_outputs from a list to the original dict structure
# of last_step_outputs.
last_step_tensor_outputs = loop_result[1:]
last_step_tensor_outputs_dict = nest.pack_sequence_as(
ctx.last_step_outputs, last_step_tensor_outputs)
ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access
return ctx
def _call_for_each_replica(self, fn, args, kwargs):
strategy = self._container_strategy()
with ops.device(self._device), _OneDeviceReplicaContext(strategy):
return fn(*args, **kwargs)
def _reduce_to(self, reduce_op, value, destinations, experimental_hints):
del reduce_op, destinations, experimental_hints
return value
def _update(self, var, fn, args, kwargs, group):
# The implementations of _update() and _update_non_slot() are identical
# except _update() passes `var` as the first argument to `fn()`.
return self._update_non_slot(var, fn, (var,) + tuple(args), kwargs, group)
def _update_non_slot(self, colocate_with, fn, args, kwargs, group):
del colocate_with
with ops.device(self._device), distribute_lib.UpdateContext(self._device):
result = fn(*args, **kwargs)
if group:
return result
else:
return nest.map_structure(self._local_results, result)
def read_var(self, replica_local_var):
"""Read the aggregate value of a replica-local variable."""
return array_ops.identity(replica_local_var)
def _local_results(self, value):
return (value,)
def value_container(self, value):
return value
def _in_multi_worker_mode(self):
"""Whether this strategy indicates working in multi-worker settings."""
return False
@property
def _num_replicas_in_sync(self):
return 1
@property
def worker_devices(self):
return (self._device,)
@property
def parameter_devices(self):
return (self._device,)
def non_slot_devices(self, var_list):
del var_list
return (self._device,)
@property
def experimental_should_init(self):
return True
@property
def experimental_between_graph(self):
return False
@property
def should_checkpoint(self):
return True
@property
def should_save_summary(self):
return True
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
"""Global and per-replica batching are equivalent for OneDeviceStrategy."""
return True
@property
def _support_per_replica_values(self):
return False
class _OneDeviceReplicaContext(distribute_lib.ReplicaContext):
"""ReplicaContext for OneDeviceStrategy."""
def __init__(self, strategy):
zero = constant_op.constant(0, dtypes.int32)
distribute_lib.ReplicaContext.__init__(
self, strategy, replica_id_in_sync_group=zero)
@property
def devices(self):
return self._strategy.extended.worker_devices
|
from tqdm.auto import tqdm
from termcolor import cprint
import codecs
from itertools import permutations
from geopy.distance import distance
import numpy as np
import json
import sys
FNAME_IN = "data/cities15000.txt"
OUT_DIR = "data/"
MAPPING = {
'name': 1,
'lat': 4,
'lon': 5,
'country_code': 8,
'population': 14,
'elevation': 15
}
COUNTRIES = {
"FR": "France",
"DE": "Germany",
"IT": "Italy",
"ES": "Spain",
"PT": "Portugal",
"BE": "Belgium",
"NL": "Netherlands",
"CH": "Switzerland"
}
MIN_LAT = 35 # avoid islands that don't make sense for TSP
BLACKLIST = ['Palma']
TOTAL_CITIES = 24482
def main():
if len(sys.argv) != 2:
cprint("Usage %s num_cities" % sys.argv[0], 'red')
quit()
num_cities = int(sys.argv[1])
# city data to array
cities = []
f = codecs.open(FNAME_IN, 'r', encoding='utf8')
pb = tqdm(total=TOTAL_CITIES, desc='Parsing cities', unit='cities')
for idx, row in enumerate(f):
row = row.split('\t')
# keep only cities from the countries we are interested in
country_code = row[MAPPING['country_code']].upper()
if country_code in COUNTRIES:
city = {}
for field_name, field_idx in MAPPING.items():
city[field_name] = row[field_idx]
city['country_name'] = COUNTRIES[country_code]
if city['population'] == '':
continue
if city['name'] in BLACKLIST:
continue
city['population'] = int(city['population'])
city['lat'] = float(city['lat'])
city['lon'] = float(city['lon'])
if city['elevation'] != '':
city['elevation'] = int(city['elevation'])
else:
city['elevation'] = 0
if city['lat'] < MIN_LAT:
continue
cities.append(city)
pb.update()
pb.close()
# select cities
cprint('Found %d cities' % (len(cities)), 'green')
cprint("Selecting %d largest cities" % num_cities, 'blue')
cities = sorted(cities, key=lambda k: k['population'], reverse=True)
cities = cities[:num_cities]
# assign id
for idx, city in enumerate(cities):
city['idx'] = idx
fname = "%scities_%s.json" % (OUT_DIR, num_cities)
with open(fname, 'w+') as out:
out.write(json.dumps(cities))
# distance matrix
total = len(cities) * len(cities) - 1
# Using int16 to save memory as no cities are >65000km appart
# or the distance computation is wrong as we don't consider Mars cities :)
distances_matrix = np.zeros((len(cities), len(cities)), dtype='int16')
pb = tqdm(total=total, desc='Computing geodesic distances')
for p in permutations(cities, r=2):
loc1 = (p[0]['lat'], p[0]['lon'])
loc2 = (p[1]['lat'], p[1]['lon'])
idx1 = p[0]['idx']
idx2 = p[1]['idx']
dist = distance(loc1, loc2).km
distances_matrix[idx1][idx2] = int(dist)
pb.update()
pb.close()
fname = "%sdistances_%s.npz" % (OUT_DIR, num_cities)
np.savez_compressed(fname, distances=distances_matrix)
cprint('data ready! %s' % fname, 'green')
if __name__ == "__main__":
main()
|
from __future__ import division, print_function, unicode_literals
__doc__ = """
This is the first step to create narrow diacrits for /i and /j.
It duplicates "dieresiscomb", "gravecomb", "acutecomb", "brevecomb", "tildecomb", "macroncomb", "ogonekcomb" addind a ".narrow" suffix.
The next step will rename those glyphs to .narrow and update the components for /i and /j.
"""
font = Glyphs.font
diacritics = ["dieresiscomb", "gravecomb", "acutecomb", "brevecomb", "tildecomb", "macroncomb", "ogonekcomb"]
narrow_diacritics = []
for d in diacritics:
g_narrow = "%s.narrow" % d
narrow_diacritics.append(g_narrow)
if font.glyphs[g_narrow]:
print("%s already exist." % g_narrow)
pass
else:
g = font.glyphs[d]
g.duplicate(g_narrow)
print("%s was created" % g_narrow)
for i, master in enumerate(font.masters):
layer1 = font.glyphs['idotless'].layers[font.masters[i].id]
for d in narrow_diacritics:
layer2 = font.glyphs[d].layers[font.masters[i].id]
background_layer2 = layer2.background
background_layer2 = []
for path in layer1.paths:
new_path = path.copy()
background_layer2.append(new_path)
print("idotless %s was copy to %s %s background layer" % (layer1.name, d, layer2.name))
ndiacritics = '/'.join([i for i in narrow_diacritics])
glyph_str = ''
for g in narrow_diacritics:
glyph_str += "/" + g
font.newTab(glyph_str)
Glyphs.showMacroWindow()
|
import sys
if sys.version_info >= (3, 8):
# noinspection PyUnresolvedReferences
from typing import Protocol
else:
# noinspection PyUnresolvedReferences
from typing_extensions import Protocol
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import pandas as pd
import numpy as np
class CSVReader(object):
def __init__(self, filepath, batch_size):
''' Read in CSV.
The format of the csv fshould be:
<tokens><features><label>. ie.,
if there are N words, and I features per word :
w1,w2,w3,...,wn,f1_1,f1_2,f2_1,f2_2,...fn_i,label
i.e, lets say we had two features per word, capitialization and suffix,
and a part of speech label for the center word:
w_-1, w_0, w_1, f_caps_-1, f_suff_-1, f_caps_0, f_suff_0, f_caps_1, f_suff_1, label
the , dog, ate, TITLE , he , LOWER , og , LOWER , at , NOUN
w_0 is the word to tag with the label. caps_0 and suff_0 are its
capitalization and suffix, respectively
The headers for words MUST be prefix w_<i>, a "w" followed relative position
of the center.
the headers for features MUST be prefix <feat_name>_<i>.
'''
self.filepath = filepath
self.batch_size = batch_size
self.df = pd.read_csv(filepath)
self.word_cols = [c for c in self.df.columns if c.startswith('w_')]
self.seq_len = len(self.word_cols)
non_feats = set(self.word_cols + ['label'])
self.feat_cols = [c for c in self.df.columns if c.startswith('f_')]
self.num_feats = int(len(self.feat_cols) / len(self.word_cols))
# sanity check
assert len(self.feat_cols) == len(self.word_cols) * self.num_feats
def batcher(self, num_epochs):
self.epoch = 0
df = self.df
while self.epoch < num_epochs:
indices = range(len(df))
random.shuffle(indices)
num_batches = len(df) // self.batch_size
last_row = num_batches * self.batch_size
for i in range(0, last_row, self.batch_size):
#print i, i+self.batch_size
batch = df.loc[indices[i:i+self.batch_size]]
tokens = batch[self.word_cols].as_matrix()
features = batch[self.feat_cols].as_matrix()
features = np.reshape(features, (self.batch_size, self.seq_len, self.num_feats))
labels = batch['label'].values
yield tokens, features, labels
self.epoch += 1
if __name__ == '__main__':
import sys
csvreader = CSVReader(sys.argv[1], 9)
feats = []
toks = []
labels = []
for t,f,l in csvreader.batcher(1):
print( t)
print( f)
print( l)
|
import os
import requests
import simplejson as json
import settings as s
from boxviewerror import raise_for_view_error
DOCUMENTS_RESOURCE = '/documents'
SESSIONS_RESOURCE = '/sessions'
VIEW_RESOURCE = '/view'
PROCESSING = 'processing'
DONE = 'done'
def _set_token_from_env():
box_view_token = os.environ.get('BOX_VIEW_TOKEN')
if not box_view_token:
raise ValueError(
"""Calling BoxViewClient() with no arguments requires environment
variables to be set like this:
>>> import os
>>> os.environ['BOX_VIEW_TOKEN'] = YOUR_BOX_VIEW_TOKEN
"""
)
return box_view_token
class BoxViewClient(object):
"""A simple wrapper around the Box View API
Args:
api_token: A valid box view api token, get one here: bit.ly/boxapikey
Attributes:
"""
def __init__(self, api_token=None):
if not api_token:
api_token = _set_token_from_env()
auth_header = {'Authorization': 'Token {}'.format(api_token)}
self.requests = requests.session()
self.requests.headers = auth_header
self.url = s.VIEW_API_URL
self.upload_url = s.UPLOAD_VIEW_API_URL
# Core API Methods
@raise_for_view_error
def upload_document(self, url):
"""
"""
resource = '{}{}'.format(self.url, DOCUMENTS_RESOURCE)
headers = {'Content-type': 'application/json'}
data = json.dumps({'url': url})
response = self.requests.post(resource, headers=headers, data=data)
return response
@raise_for_view_error
def multipart_upload_document(self, document):
"""
"""
resource = '{}{}'.format(self.upload_url, DOCUMENTS_RESOURCE)
files = {'file': document}
response = self.requests.post(resource, files=files)
return response
@raise_for_view_error
def get_document(self, document_id):
"""
"""
resource = '{}{}/{}'.format(
self.url,
DOCUMENTS_RESOURCE,
document_id
)
response = self.requests.get(resource)
return response
@raise_for_view_error
def create_session(self, document_id, expires_at=None):
"""
"""
resource = '{}{}'.format(self.url, SESSIONS_RESOURCE)
headers = {'Content-type': 'application/json'}
data = {'document_id': document_id}
if expires_at:
data['expires_at'] = expires_at
data = json.dumps(data)
response = self.requests.post(resource, headers=headers, data=data)
return response
# Convenience Methods
def ready_to_view(self, document_id):
"""
"""
document_status = self.get_document_status(document_id)
return document_status == DONE
def get_document_status(self, document_id):
"""
"""
document = self.get_document(document_id).json()
return document['status']
@staticmethod
def create_session_url(session_id, theme=None):
"""
"""
if not theme:
theme = 'light'
return '{}{}/{}{}?theme={}'.format(
s.VIEW_API_URL,
SESSIONS_RESOURCE,
session_id,
VIEW_RESOURCE,
theme
)
|
import sys, os
import cloud_sptheme as csp
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
extensions = ['sphinx.ext.autodoc','rst2pdf.pdfbuilder','sphinxcontrib.plantuml']
plantuml = ['java','-jar','/sbin/plantuml.jar']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'contents'
project = u'Mirantis OpenStack'
copyright = u'2013, Mirantis Inc.'
version = '4.0'
release = '4.0'
exclude_patterns = ['_*', "pages", 'pdf', 'contents', 'index', '*-guide']
pygments_style = 'sphinx'
html_theme = "mirantis"
html_theme_options = { "roottarget": "index" }
html_theme_path = ["_templates", csp.get_theme_dir()]
html_add_permalinks = None
html_title = 'Mirantis OpenStack' + 'v' + release + ' | Documentation'
html_logo = '_static/fuel_gradient_200.png'
html_favicon = '_static/mirantis_icon.ico'
html_static_path = ['_static']
html_use_smartypants = False
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'doc_license.html'],
}
html_use_index = True
html_split_index = False
html_show_sourcelink = False
html_show_sphinx = False
html_show_copyright = True
htmlhelp_basename = 'fueldoc'
latex_elements = {
}
latex_documents = [
('index', 'fuel.tex', u'Mirantis OpenStack | Documentation',
u'Mirantis Inc.', 'manual'),
]
man_pages = [
('index', 'fuel', u'Mirantis OpenStack | Documentation',
[u'Mirantis'], 1)
]
texinfo_documents = [
('index', 'fuel', u'Mirantis OpenStack | Documentation',
u'Mirantis Inc.', 'fuel', 'One line description of project.',
'Miscellaneous'),
]
source_encoding = 'utf-8'
blockdiag_antialias = True
acttdiag_antialias = True
seqdiag_antialias = True
nwdiag_antialias = True
extensions += ['rst2pdf.pdfbuilder']
pdf_documents = [
('relnotes/index', u'Mirantis-OpenStack-4.0-RelNotes', u'Release Notes',
u'2013, Mirantis Inc.')
]
pdf_stylesheets = ['letter', 'mirantis']
pdf_style_path = ['_templates']
pdf_fit_mode = "shrink"
pdf_break_level = 1
pdf_breakside = 'any'
pdf_verbosity = 0
pdf_use_index = False
pdf_cover_template = 'mirantiscover.tmpl'
pdf_page_template = 'oneColumn'
pdf_use_toc = False
pdf_toc_depth = 2
pdf_fit_background_mode = 'scale'
pdf_font_path = ['C:\\Windows\\Fonts\\', '/usr/share/fonts', '_fonts']
|
'''
Integration Test
For NFS/SMP/Ceph/FusionStor
Check:
1. Stop ha vm and check it changed to stopped.
2. Loop step 1 for totoally 10 times.
3. Force stop host ha vm located to check natural vm ha functionality.
@author: SyZhao
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.host_operations as host_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import zstackwoodpecker.header.vm as vm_header
import zstackwoodpecker.operations.ha_operations as ha_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
import apibinding.inventory as inventory
import time
import os
vm = None
host_uuid = None
host_ip = None
max_attempts = None
storagechecker_timeout = None
test_stub = test_lib.lib_get_test_stub()
test_host = None
def test():
global vm
global host_uuid
global host_ip
global max_attempts
global storagechecker_timeout
test_lib.lib_skip_if_ps_num_is_not_eq_number(1)
allow_ps_list = [inventory.CEPH_PRIMARY_STORAGE_TYPE, inventory.NFS_PRIMARY_STORAGE_TYPE, 'SharedMountPoint', 'AliyunNAS', inventory.FUSIONSTOR_PRIMARY_STORAGE_TYPE]
test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list)
test_lib.lib_cur_env_is_not_scenario()
if test_lib.lib_get_ha_enable() != 'true':
test_util.test_skip("vm ha not enabled. Skip test")
vm_creation_option = test_util.VmOption()
image_name = os.environ.get('imageName_net')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
l3_name = os.environ.get('l3VlanNetworkName1')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
test_lib.clean_up_all_vr()
mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
conditions = res_ops.gen_query_conditions('state', '=', 'Enabled')
conditions = res_ops.gen_query_conditions('status', '=', 'Connected', conditions)
conditions = res_ops.gen_query_conditions('managementIp', '!=', mn_ip, conditions)
host_uuid = res_ops.query_resource(res_ops.HOST, conditions)[0].uuid
vm_creation_option.set_host_uuid(host_uuid)
vm_creation_option.set_l3_uuids([l3_net_uuid])
vm_creation_option.set_image_uuid(image_uuid)
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
vm_creation_option.set_name('multihost_basic_vm')
vm = test_vm_header.ZstackTestVm()
vm.set_creation_option(vm_creation_option)
vm.create()
vm.check()
test_stub.ensure_host_has_no_vr(host_uuid)
host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp
host_port = test_lib.lib_get_host_port(host_ip)
test_util.test_logger("host %s is disconnecting" %(host_ip))
ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid, "NeverStop")
for i in range(10):
test_stub.stop_ha_vm(vm.get_vm().uuid)
vm.set_state(vm_header.STOPPED)
vm.check()
vm.start()
vm.check()
#vm ha natural feature:
host_list = test_stub.get_sce_hosts(test_lib.all_scenario_config, test_lib.scenario_file)
for host in host_list:
if host.ip_ == host_ip or hasattr(host, 'managementIp_') and host.managementIp_ == host_ip:
test_host = host
break
if not test_host:
test_util.test_fail('there is no host with ip %s in scenario file.' %(host_ip))
test_stub.stop_host(test_host, test_lib.all_scenario_config)
#test_util.test_logger("wait for 60 seconds")
#time.sleep(60)
test_stub.start_host(test_host, test_lib.all_scenario_config)
test_stub.recover_host_vlan(test_host, test_lib.all_scenario_config, test_lib.deploy_config)
vm.set_state(vm_header.RUNNING)
vm.check()
vm.update()
if test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp == host_ip:
test_util.test_fail("VM is expected to start running on another host")
vm.destroy()
test_util.test_pass('Test checking vm status after graceful stop and start success')
def error_cleanup():
global vm
if vm:
try:
vm.destroy()
except:
pass
def env_recover():
test_util.test_logger("recover host: %s" % (test_host.ip_))
test_stub.recover_host(test_host, test_lib.all_scenario_config, test_lib.deploy_config)
#host_ops.reconnect_host(host_uuid)
|
from google.cloud import bigquery_storage_v1beta2
def sample_split_read_stream():
# Create a client
client = bigquery_storage_v1beta2.BigQueryReadClient()
# Initialize request argument(s)
request = bigquery_storage_v1beta2.SplitReadStreamRequest(name="name_value",)
# Make the request
response = client.split_read_stream(request=request)
# Handle the response
print(response)
|
"""
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the 'License');
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an 'AS IS' BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from nose.tools import nottest
@nottest
def basic_test_file_json(**kwargs):
return json.dumps([basic_test_event_data(**kwargs)])
@nottest
def basic_test_event_data(
log='misc_log_type',
service='unit-test-service',
source='unit-test-source',
override_data=None):
result = {
'data': {
'key': 'value'
},
'description': 'Integration test event for unit testing',
'log': log,
'service': service,
'source': source,
'trigger_rules': [
'misc_rule'
]
}
if override_data:
del result['data']
result['override_record'] = override_data
result['log'] = 'override_log_type'
return result
|
import webapp2
import logging
import json
import urllib2
import datetime
from google.appengine.api import urlfetch
from google.appengine.api import mail
from google.appengine.ext import ndb
from user.models import User
from models import Monster
from lib import tools
class MonsterMain(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'application/json'
self.response.write('')
class MonsterSet(webapp2.RequestHandler):
def get(self):
owner = self.request.get('owner')
name = self.request.get('name')
species = self.request.get('species')
if len(owner) > 0 and len(name) > 0:
user = User.get_by_username(owner)
if user is not None:
#logging.debug('Monster.get_by_user_name(%s, %s): %s' % (owner, name, Monster.get_by_user_name(user.key, name)))
if Monster.get_by_user_name(user.key, name) is not None:
monster = Monster.get_by_user_name(user.key, name)
monster.modified = datetime.datetime.now()
action = 'Modify'
else:
monster = Monster()
action = 'Create'
monster.owner = user.key
monster.name = name
if len(species) > 0:
try:
monster.species = int(species)
except TypeError:
pass
monster.put()
#logging.debug('User: %s' % user)
#logging.debug('Monster: %s' % monster)
userdict = tools.serialize_dict(user.to_dict())
monsterdict = tools.serialize_dict(monster.to_dict())
response = {'status': 0, 'message': 'Monster successfully created/modified.', 'action': action, 'user': userdict, 'monster': monsterdict}
else:
response = {'status': 2, 'message': 'Couldn\'t find user %s.' % owner}
else:
response = {'status': 1, 'message': 'Missing owner and/or name parameter.'}
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json.dumps(response))
class MonsterGet(webapp2.RequestHandler):
def get(self):
owner = self.request.get('owner')
name = self.request.get('name')
key = self.request.get('key')
if len(owner) > 0 and len(name) > 0 or len(key) > 0:
if len(key) == 0:
url = 'http://%s/api/user/get?username=%s' % (tools.APP_HOSTNAME, owner)
result = urlfetch.fetch(url)
user = json.loads(result.content)['user']
monster =Monster.get_by_user_name(ndb.Key(urlsafe = user['key']), name)
else:
monster = Monster.get_by_urlkey(key)
url = 'http://%s/api/user/get?key=%s' % (tools.APP_HOSTNAME, monster.owner.urlsafe())
logging.debug('Calling monster/get API: %s' % url)
result = urlfetch.fetch(url)
user = json.loads(result.content)['user']
if monster is not None:
response = {
'status': 0,
'message': 'Successfully retrieving monster %s.' % name,
'monster': tools.serialize_dict(monster.to_dict()),
'user': user
}
else:
response = {'status': 1, 'message': 'Couldn\'t retrieve monster %s.', 'owner': owner, 'name': name}
else:
response = {'status': 2, 'message': 'Missing parameter owner and/or name.', 'owner': owner, 'name': name}
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json.dumps(response))
class MonsterFetch(webapp2.RequestHandler):
def get(self):
owner = self.request.get('owner')
if len(owner) > 0:
user = User.get_by_username(owner)
if user is not None:
monsters = Monster.fetch_by_userkey(user.key)
monsterlist = []
for monster in monsters:
monsterlist.append(tools.serialize_dict(monster.to_dict()))
response = {'status': 0, 'message': 'Successfully fetching monsters for user %s' % owner, 'monsters': monsterlist, 'user': tools.serialize_dict(user.to_dict())}
else:
response = {'status': 1, 'message': 'User %s not found.' % owner, 'owner': owner}
else:
response = {'status': 2, 'message': 'Missing parameter owner'}
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json.dumps(response))
|
import sqlite3
schema_filename = 'todo_schema.sql'
with sqlite3.connect(':memory:') as conn:
conn.row_factory = sqlite3.Row
print('Creating schema')
with open(schema_filename, 'rt') as f:
schema = f.read()
conn.executescript(schema)
print('Inserting initial data')
conn.execute("""
insert into project (name, description, deadline)
values ('pymotw', 'Python Module of the Week',
'2010-11-01')
""")
data = [
('write about select', 'done', '2010-10-03',
'pymotw'),
('write about random', 'waiting', '2010-10-10',
'pymotw'),
('write about sqlite3', 'active', '2010-10-17',
'pymotw'),
]
conn.executemany("""
insert into task (details, status, deadline, project)
values (?, ?, ?, ?)
""", data)
print('Dumping:')
for text in conn.iterdump():
print(text)
|
"""
web.blueprints.finance
~~~~~~~~~~~~~~
This module defines view functions for /finance
:copyright: (c) 2012 by AG DSN.
"""
from datetime import timedelta, datetime, date
from functools import partial
from itertools import groupby, zip_longest, chain
from io import StringIO
from flask import (
Blueprint, abort, flash, jsonify, redirect, render_template, request,
url_for)
from flask_login import current_user
from flask_wtf import FlaskForm
from sqlalchemy import func, or_, and_, Text, cast
from sqlalchemy.orm import joinedload
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from wtforms import BooleanField
from pycroft import config, lib
from pycroft.helpers.i18n import localized
from pycroft.helpers.util import map_or_default
from pycroft.lib import finance
from pycroft.lib.finance import get_typed_splits, \
end_payment_in_default_memberships, \
post_transactions_for_membership_fee, build_transactions_query, \
match_activities, take_actions_for_payment_in_default_users
from pycroft.lib.user import encode_type2_user_id
from pycroft.model.finance import (
BankAccount, BankAccountActivity, Split, MembershipFee, MT940Error)
from pycroft.model.session import session
from pycroft.model.user import User
from pycroft.model.finance import Account, Transaction
from web.blueprints.access import BlueprintAccess
from web.blueprints.helpers.fints import FinTS3Client
from web.blueprints.helpers.table import date_format
from web.blueprints.finance.forms import (
AccountCreateForm, BankAccountCreateForm, BankAccountActivityEditForm,
BankAccountActivitiesImportForm, TransactionCreateForm,
MembershipFeeCreateForm, MembershipFeeEditForm, FeeApplyForm,
HandlePaymentsInDefaultForm, FixMT940Form, BankAccountActivityReadForm)
from web.blueprints.finance.tables import FinanceTable, FinanceTableSplitted, \
MembershipFeeTable, UsersDueTable, BankAccountTable, \
BankAccountActivityTable, TransactionTable, ImportErrorTable, \
UnconfirmedTransactionsTable
from web.blueprints.navigation import BlueprintNavigation
from web.template_filters import date_filter, money_filter, datetime_filter
from web.template_tests import privilege_check
from web.templates import page_resources
from web.blueprints.helpers.api import json_agg_core
from sqlalchemy.sql.expression import literal_column, func, select, Join
from fints.dialog import FinTSDialogError
from fints.exceptions import FinTSClientPINError
from fints.utils import mt940_to_array
from datetime import date
bp = Blueprint('finance', __name__)
access = BlueprintAccess(bp, required_properties=['finance_show'])
nav = BlueprintNavigation(bp, "Finanzen", blueprint_access=access)
@bp.route('/')
@bp.route('/bank-accounts')
@bp.route('/bank-accounts/list')
@nav.navigate(u"Bankkonten")
def bank_accounts_list():
bank_account_table = BankAccountTable(
data_url=url_for('.bank_accounts_list_json'),
create_account=privilege_check(current_user, 'finance_change'))
bank_account_activity_table = BankAccountActivityTable(
data_url=url_for('.bank_accounts_activities_json'))
return render_template(
'finance/bank_accounts_list.html',
bank_account_table=bank_account_table,
bank_account_activity_table=bank_account_activity_table,
)
@bp.route('/bank-accounts/list/json')
def bank_accounts_list_json():
return jsonify(items=[
{
'name': bank_account.name,
'bank': bank_account.bank,
'ktonr': bank_account.account_number,
'blz': bank_account.routing_number,
'iban': bank_account.iban,
'bic': bank_account.bic,
'kto': {
'href': url_for('.accounts_show',
account_id=bank_account.account_id),
'title': 'Konto anzeigen',
'btn_class': 'btn-primary'
},
'balance': money_filter(bank_account.account.balance),
'last_imported_at': '{}'.format(
map_or_default(bank_account.last_imported_at, datetime.date,
'nie'))
} for bank_account in BankAccount.q.all()])
@bp.route('/bank-accounts/activities/json')
def bank_accounts_activities_json():
def actions(activity_id):
return [{
'href': url_for(
'.bank_account_activities_edit',
activity_id=activity_id),
'title': '',
'btn_class': 'btn-primary',
'icon': 'glyphicon-pencil'
}]
activity_q = (BankAccountActivity.q
.options(joinedload(BankAccountActivity.bank_account))
.filter(BankAccountActivity.transaction_id == None))
return jsonify(items=[{
'bank_account': activity.bank_account.name,
'name': activity.other_name,
'valid_on': date_format(activity.valid_on),
'imported_at': date_format(activity.imported_at),
'reference': activity.reference,
'amount': money_filter(activity.amount),
'iban': activity.other_account_number,
'actions': actions(activity.id),
} for activity in activity_q.all()])
@bp.route('/bank-accounts/import/errors/json')
def bank_accounts_errors_json():
return jsonify(items=[
{
'name': error.bank_account.name,
'fix': {
'href': url_for('.fix_import_error',
error_id=error.id),
'title': 'korrigieren',
'btn_class': 'btn-primary'
},
'imported_at': '{}'.format(
map_or_default(error.imported_at, datetime.date, 'nie'))
} for error in MT940Error.q.all()])
@bp.route('/bank-accounts/import', methods=['GET', 'POST'])
@access.require('finance_change')
@nav.navigate(u"Bankkontobewegungen importieren")
def bank_accounts_import():
form = BankAccountActivitiesImportForm()
form.account.choices = [ (acc.id, acc.name) for acc in BankAccount.q.all()]
(transactions, old_transactions) = ([], [])
if request.method != 'POST':
del(form.start_date)
form.end_date.data = date.today() - timedelta(days=1)
if form.validate_on_submit():
bank_account = BankAccount.q.get(form.account.data)
# set start_date, end_date
if form.start_date.data is None:
form.start_date.data = map_or_default(bank_account.last_imported_at,
datetime.date, date(2018, 1, 1))
if form.end_date.data is None:
form.end_date.data = date.today()
# login with fints
process = True
try:
fints = FinTS3Client(
bank_account.routing_number,
form.user.data,
form.pin.data,
bank_account.fints_endpoint
)
acc = next((a for a in fints.get_sepa_accounts()
if a.iban == bank_account.iban), None)
if acc is None:
raise KeyError('BankAccount with IBAN {} not found.'.format(
bank_account.iban)
)
start_date = form.start_date.data
end_date = form.end_date.data
statement, with_error = fints.get_filtered_transactions(
acc, start_date, end_date)
flash(
"Transaktionen vom {} bis {}.".format(start_date, end_date))
if len(with_error) > 0:
flash("{} Statements enthielten fehlerhafte Daten und müssen "
"vor dem Import manuell korrigiert werden.".format(
len(with_error)), 'error')
except (FinTSDialogError, FinTSClientPINError):
flash(u"Ungültige FinTS-Logindaten.", 'error')
process = False
except KeyError:
flash(u'Das gewünschte Konto kann mit diesem Online-Banking-Zugang\
nicht erreicht werden.', 'error')
process = False
if process:
(transactions, old_transactions) = finance.process_transactions(
bank_account, statement)
else:
(transactions, old_transactions) = ([], [])
if process and form.do_import.data is True:
# save errors to database
for error in with_error:
session.add(MT940Error(mt940=error[0], exception=error[1],
author=current_user,
bank_account=bank_account))
# save transactions to database
session.add_all(transactions)
session.commit()
flash(u'Bankkontobewegungen wurden importiert.')
return redirect(url_for(".accounts_show",
account_id=bank_account.account_id))
return render_template('finance/bank_accounts_import.html', form=form,
transactions=transactions,
old_transactions=old_transactions)
@bp.route('/bank-accounts/importerrors', methods=['GET', 'POST'])
@access.require('finance_change')
def bank_accounts_import_errors():
error_table = ImportErrorTable(
data_url=url_for('.bank_accounts_errors_json'))
return render_template('finance/bank_accounts_import_errors.html',
page_title="Fehlerhafter Bankimport",
error_table=error_table)
@bp.route('/bank-accounts/importerrors/<error_id>', methods=['GET', 'POST'])
@access.require('finance_change')
def fix_import_error(error_id):
error = MT940Error.q.get(error_id)
form = FixMT940Form()
(transactions, old_transactions) = ([], [])
new_exception = None
if request.method != 'POST':
form.mt940.data = error.mt940
if form.validate_on_submit():
statement = []
try:
statement += mt940_to_array(form.mt940.data)
except Exception as e:
new_exception = str(e)
if new_exception is None:
flash('MT940 ist jetzt valide.', 'success')
(transactions, old_transactions) = finance.process_transactions(
error.bank_account, statement)
if form.do_import.data is True:
# save transactions to database
session.add_all(transactions)
session.delete(error)
session.commit()
flash(u'Bankkontobewegungen wurden importiert.')
return redirect(url_for(".bank_accounts_import_errors"))
else:
flash('Es existieren weiterhin Fehler.', 'error')
return render_template('finance/bank_accounts_error_fix.html',
error_id=error_id, exception=error.exception,
new_exception=new_exception, form=form,
transactions=transactions, old_transactions=old_transactions)
@bp.route('/bank-accounts/create', methods=['GET', 'POST'])
@access.require('finance_change')
def bank_accounts_create():
form = BankAccountCreateForm()
if form.validate_on_submit():
new_bank_account = BankAccount(
name=form.name.data,
bank=form.bank.data,
account_number=form.account_number.data,
routing_number=form.routing_number.data,
iban=form.iban.data,
bic=form.bic.data,
fints_endpoint=form.fints.data,
account=Account(name=form.name.data, type='BANK_ASSET'),
)
session.add(new_bank_account)
session.commit()
return redirect(url_for('.bank_accounts_list'))
return render_template('finance/bank_accounts_create.html',
form=form, page_title=u"Bankkonto erstellen")
@bp.route('/bank-account-activities/<activity_id>',
methods=["GET", "POST"])
def bank_account_activities_edit(activity_id):
activity = BankAccountActivity.q.get(activity_id)
if activity is None:
flash(u"Bankbewegung mit ID {} existiert nicht!".format(activity_id), 'error')
abort(404)
if activity.transaction_id is not None:
form = BankAccountActivityReadForm(
obj=activity, bank_account_name=activity.bank_account.name)
if activity.transaction_id:
flash(u"Bankbewegung ist bereits zugewiesen!".format(activity_id),
'warning')
form_args = {
'form': form,
'show_submit': False,
'show_cancel': False,
}
return render_template('generic_form.html',
page_title="Bankbewegung",
form_args=form_args,
form=form)
else:
form = BankAccountActivityEditForm(
obj=activity, bank_account_name=activity.bank_account.name, description=activity.reference)
if form.validate_on_submit():
debit_account = Account.q.filter(
Account.id == form.account_id.data
).one()
credit_account = activity.bank_account.account
transaction = finance.simple_transaction(
description=form.description.data, debit_account=debit_account,
credit_account=credit_account, amount=activity.amount,
author=current_user, valid_on=activity.valid_on,
confirmed=current_user.member_of(config.treasurer_group))
activity.split = next(split for split in transaction.splits
if split.account_id == credit_account.id)
session.add(activity)
end_payment_in_default_memberships()
session.commit()
flash(u"Transaktion erfolgreich erstellt.", 'success')
return redirect(url_for('.bank_accounts_list'))
form_args = {
'form': form,
'cancel_to': url_for('.bank_accounts_list'),
'submit_text': 'Zuweisen',
}
return render_template('generic_form.html',
page_title="Bankbewegung zuweisen",
form_args=form_args,
form=form)
@bp.route('/bank-account-activities/match/')
@access.require('finance_change')
def bank_account_activities_match():
FieldList = [
#("Field-Name",BooleanField('Text')),
]
matching = match_activities()
matched_activities = {}
for activity, user in matching.items():
matched_activities[str(activity.id)] = {
'purpose': activity.reference,
'name': activity.other_name,
'user': user,
'amount': activity.amount
}
FieldList.append((str(activity.id), BooleanField(str(activity.id),
default=True)))
class F(forms.ActivityMatchForm):
pass
for (name, field) in FieldList:
setattr(F, name, field)
form = F()
return render_template('finance/bank_accounts_match.html', form=form,
activities=matched_activities)
@bp.route('/bank-account-activities/match/do/', methods=['GET', 'POST'])
@access.require('finance_change')
def bank_account_activities_do_match():
# Generate form again
matching = match_activities()
matched = []
FieldList = []
for activity, user in matching.items():
FieldList.append(
(str(activity.id), BooleanField('{} ({}€) -> {} ({}, {})'.format(
activity.reference, activity.amount, user.name, user.id,
user.login
))))
class F(forms.ActivityMatchForm):
pass
for (name, field) in FieldList:
setattr(F, name, field)
form = F()
# parse data
if form.validate_on_submit():
# look for all matches which were checked
for activity, user in matching.items():
if form._fields[str(activity.id)].data is True and activity.transaction_id is None:
debit_account = user.account
credit_account = activity.bank_account.account
transaction = finance.simple_transaction(
description=activity.reference,
debit_account=debit_account,
credit_account=credit_account, amount=activity.amount,
author=current_user, valid_on=activity.valid_on)
activity.split = next(split for split in transaction.splits
if split.account_id == credit_account.id)
session.add(activity)
matched.append((activity, user))
end_payment_in_default_memberships()
session.flush()
session.commit()
return render_template('finance/bank_accounts_matched.html', matched=matched)
@bp.route('/accounts/')
@bp.route('/accounts/list')
@nav.navigate(u"Konten")
def accounts_list():
accounts_by_type = {
t[0]: list(t[1])
for t in groupby(
Account.q.filter_by(legacy=False).outerjoin(User).filter(User.id == None)
.order_by(Account.type).all(),
lambda a: a.type
)
}
accounts_by_type['LEGACY'] = Account.q.filter_by(legacy=True).all()
return render_template(
'finance/accounts_list.html', accounts=accounts_by_type
)
@bp.route('/account/<int:account_id>/toggle-legacy')
@access.require('finance_change')
def account_toggle_legacy(account_id):
account = Account.q.get(account_id)
if not account:
abort(404)
account.legacy = not account.legacy
session.commit()
flash("Der Status des Kontos wurde umgeschaltet.", "success")
return redirect(url_for('.accounts_show', account_id=account_id))
@bp.route('/accounts/<int:account_id>/balance/json')
def balance_json(account_id):
invert = request.args.get('invert', 'False') == 'True'
sum_exp = func.sum(Split.amount).over(order_by=Transaction.valid_on)
if invert:
sum_exp = -sum_exp
balance_json = (select([Transaction.valid_on,
sum_exp.label("balance")
])
.select_from(
Join(Split, Transaction,
Split.transaction_id==Transaction.id))
.where(Split.account_id == account_id))
res = session.execute(json_agg_core(balance_json)).first()[0]
return jsonify(items=res)
@bp.route('/accounts/<int:account_id>')
def accounts_show(account_id):
account = Account.q.get(account_id)
if account is None:
flash(u"Konto mit ID {} existiert nicht!".format(account_id), 'error')
abort(404)
try:
user = User.q.filter_by(account_id=account.id).one()
except NoResultFound:
user = None
except MultipleResultsFound:
user = User.q.filter_by(account_id=account.id).first()
flash(u"Es existieren mehrere Nutzer, die mit diesem Konto"
u" verbunden sind!", "warning")
inverted = account.type == "USER_ASSET"
_table_kwargs = {
'data_url': url_for("finance.accounts_show_json", account_id=account_id),
'saldo': account.balance,
'inverted': inverted
}
balance = -account.balance if inverted else account.balance
return render_template(
'finance/accounts_show.html',
account=account, user=user, balance=balance,
balance_json_url=url_for('.balance_json', account_id=account_id,
invert=inverted),
finance_table_regular=FinanceTable(**_table_kwargs),
finance_table_splitted=FinanceTableSplitted(**_table_kwargs),
account_name=localized(account.name, {int: {'insert_commas': False}})
)
def _format_row(split, style, prefix=None):
row = {
'posted_at': datetime_filter(split.transaction.posted_at),
#'posted_by': (split.transaction.author.id, split.transaction.author.name),
'valid_on': date_filter(split.transaction.valid_on),
'description': {
'href': url_for(
"finance.transactions_show",
transaction_id=split.transaction_id
),
'title': localized(split.transaction.description)
},
'amount': {
'value': money_filter(-split.amount) if (style == "inverted") else money_filter(split.amount),
'is_positive': (split.amount > 0) ^ (style == "inverted"),
},
'row_positive': (split.amount > 0) ^ (style == "inverted"),
}
if prefix is None:
return row
return {'{}_{}'.format(prefix, key): val for key, val in row.items()}
def _prefixed_merge(a, prefix_a, b, prefix_b):
result = {}
result.update(**{'{}_{}'.format(prefix_a, k): v
for k, v in a.items()})
result.update(**{'{}_{}'.format(prefix_b, k): v
for k, v in b.items()})
return result
@bp.route('/accounts/<int:account_id>/json')
def accounts_show_json(account_id):
style = request.args.get('style')
limit = request.args.get('limit', type=int)
offset = request.args.get('offset', type=int)
sort_by = request.args.get('sort', default="valid_on")
sort_order = request.args.get('order', default="desc")
search = request.args.get('search')
splitted = request.args.get('splitted', default=False, type=bool)
if sort_by.startswith("soll_") or sort_order.startswith("haben_"):
sort_by = '_'.join(sort_by.split('_')[1:])
account = Account.q.get(account_id) or abort(404)
total = Split.q.join(Transaction).filter(Split.account == account).count()
build_this_query = partial(build_transactions_query,
account=account, search=search, sort_by=sort_by,
sort_order=sort_order, offset=offset,
limit=limit, eagerload=True)
def rows_from_query(query):
# iterating over `query` executes it
return [_format_row(split, style) for split in query]
if splitted:
rows_pos = rows_from_query(build_this_query(positive=True))
rows_neg = rows_from_query(build_this_query(positive=False))
_keys = ['posted_at', 'valid_on', 'description', 'amount']
_filler = {key: None for key in chain(('soll_'+key for key in _keys),
('haben_'+key for key in _keys))}
rows = [
_prefixed_merge(split_pos, 'soll', split_neg, 'haben')
for split_pos, split_neg in zip_longest(rows_pos, rows_neg, fillvalue=_filler)
]
else:
query = build_this_query()
rows = rows_from_query(query)
items = {'total': total, 'rows': rows}
return jsonify(
name=account.name,
items=items
)
@bp.route('/transactions/<int:transaction_id>')
def transactions_show(transaction_id):
transaction = Transaction.q.get(transaction_id)
if transaction is None:
abort(404)
return render_template(
'finance/transactions_show.html',
transaction=transaction,
get_transaction_type=finance.get_transaction_type,
localized=localized,
transaction_table=TransactionTable(
data_url=url_for(".transactions_show_json",
transaction_id=transaction.id)),
)
@bp.route('/transactions/<int:transaction_id>/json')
def transactions_show_json(transaction_id):
transaction = Transaction.q.get(transaction_id)
return jsonify(
description=transaction.description,
items=[
{
'account': {
'href': url_for(".accounts_show", account_id=split.account_id),
'title': localized(split.account.name, {int: {'insert_commas': False}})
},
'amount': money_filter(split.amount),
'row_positive': split.amount > 0
} for split in transaction.splits])
@bp.route('/transactions/unconfirmed')
@nav.navigate(u"Unbestätigte Transaktionen")
def transactions_unconfirmed():
return render_template(
'finance/transactions_unconfirmed.html',
page_title="Unbestätigte Transaktionen",
unconfirmed_transactions_table=UnconfirmedTransactionsTable(
data_url=url_for(".transactions_unconfirmed_json"))
)
@bp.route('/transactions/unconfirmed/json')
def transactions_unconfirmed_json():
transactions = Transaction.q.filter_by(confirmed=False).order_by(Transaction.posted_at).limit(100).all()
items = []
for transaction in transactions:
user_account = next((a for a in transaction.accounts if a.type == "USER_ASSET"), None)
items.append(
{
'description': {
'href': url_for(".transactions_show",
transaction_id=transaction.id),
'title': transaction.description,
'new_tab': True,
'glyphicon': 'glyphicon-new-window'
},
'user': {
'href': url_for("user.user_show",
user_id=user_account.user.id),
'title': "{} ({})".format(user_account.user.name,
encode_type2_user_id(user_account.user.id)),
'new_tab': True
} if user_account else None,
'room': user_account.user.room.short_name if user_account and user_account.user.room else None,
'author': {
'href': url_for("user.user_show",
user_id=transaction.author.id),
'title': transaction.author.name,
'new_tab': True,
},
'date': date_format(transaction.posted_at),
'amount': money_filter(transaction.amount),
'actions': [{
'href': url_for(".transaction_confirm",
transaction_id=transaction.id),
'title': 'Bestätigen',
'icon': 'glyphicon-ok',
'btn_class': 'btn-success btn-sm',
}, {
'href': url_for(".transaction_delete",
transaction_id=transaction.id),
'title': 'Löschen',
'icon': 'glyphicon-trash',
'btn_class': 'btn-danger btn-sm',
}] if privilege_check(current_user, 'finance_change') else [],
})
return jsonify(items=items)
@bp.route('/transaction/<int:transaction_id>/confirm', methods=['GET', 'POST'])
@access.require('finance_change')
def transaction_confirm(transaction_id):
transaction = Transaction.q.get(transaction_id)
if transaction is None:
flash(u"Transaktion existiert nicht.", 'error')
abort(404)
if transaction.confirmed:
flash(u"Diese Transaktion wurde bereits bestätigt.", 'error')
abort(400)
lib.finance.transaction_confirm(transaction)
session.commit()
flash(u'Transaktion bestätigt.', 'success')
return redirect(url_for('.transactions_unconfirmed'))
@bp.route('/transaction/<int:transaction_id>/delete', methods=['GET', 'POST'])
@access.require('finance_change')
def transaction_delete(transaction_id):
transaction = Transaction.q.get(transaction_id)
if transaction is None:
flash(u"Transaktion existiert nicht.", 'error')
abort(404)
if transaction.confirmed:
flash(u"Diese Transaktion wurde bereits bestätigt und kann daher nicht gelöscht werden.", 'error')
abort(400)
form = FlaskForm()
if form.is_submitted():
lib.finance.transaction_delete(transaction)
session.commit()
flash(u'Transaktion gelöscht.', 'success')
return redirect(url_for('.transactions_unconfirmed'))
form_args = {
'form': form,
'cancel_to': url_for('.transactions_unconfirmed'),
'submit_text': 'Löschen',
'actions_offset': 0
}
return render_template('generic_form.html',
page_title="Transaktion löschen",
form_args=form_args,
form=form)
@access.require('finance_show')
@bp.route('/transactions')
def transactions_all():
return render_template('finance/transactions_overview.html',
api_endpoint=url_for(".transactions_all_json",
**request.args))
@access.require('finance_show')
@bp.route('/transactions/json')
def transactions_all_json():
lower = request.args.get('after', "")
upper = request.args.get('before', "")
filter = request.args.get('filter', "nonuser")
if filter == "nonuser":
non_user_transactions = (select([Split.transaction_id])
.select_from(
Join(Split, User,
(User.account_id == Split.account_id),
isouter=True))
.group_by(Split.transaction_id)
.having(func.bool_and(User.id == None))
.alias("nut"))
tid = literal_column("nut.transaction_id")
transactions = non_user_transactions.join(Transaction,
Transaction.id == tid)
else:
transactions = Transaction.__table__
q = (select([Transaction.id,
Transaction.valid_on,
Split.account_id,
Account.type,
Split.amount])
.select_from(transactions
.join(Split, Split.transaction_id == Transaction.id)
.join(Account, Account.id == Split.account_id)))
try:
datetime.strptime(lower, "%Y-%m-%d").date()
except ValueError:
not lower or abort(422)
else:
q = q.where(Transaction.valid_on >= lower)
try:
datetime.strptime(upper, "%Y-%m-%d").date()
except ValueError:
not upper or abort(422)
else:
q = q.where(Transaction.valid_on <= upper)
res = session.execute(json_agg_core(q)).fetchone()[0] or []
return jsonify(items=res)
@bp.route('/transactions/create', methods=['GET', 'POST'])
@nav.navigate(u'Buchung erstellen')
@access.require('finance_change')
def transactions_create():
form = TransactionCreateForm()
if form.validate_on_submit():
splits = []
for split_form in form.splits:
splits.append((
Account.q.get(split_form.account_id.data),
split_form.amount.data
))
transaction = finance.complex_transaction(
description=form.description.data,
author=current_user,
splits=splits,
valid_on=form.valid_on.data,
)
end_payment_in_default_memberships()
session.commit()
return redirect(url_for('.transactions_show',
transaction_id=transaction.id))
return render_template(
'finance/transactions_create.html',
form=form
)
@bp.route('/accounts/create', methods=['GET', 'POST'])
@access.require('finance_change')
def accounts_create():
form = AccountCreateForm()
if form.validate_on_submit():
new_account = Account(name=form.name.data, type=form.type.data)
session.add(new_account)
session.commit()
return redirect(url_for('.accounts_list'))
return render_template('finance/accounts_create.html', form=form,
page_title=u"Konto erstellen")
@bp.route("/membership_fee/<int:fee_id>/book", methods=['GET', 'POST'])
@access.require('finance_change')
def membership_fee_book(fee_id):
fee = MembershipFee.q.get(fee_id)
if fee is None:
flash(u'Ein Beitrag mit dieser ID existiert nicht!', 'error')
abort(404)
form = FeeApplyForm()
if form.is_submitted():
affected_users = post_transactions_for_membership_fee(
fee, current_user)
session.commit()
flash("{} neue Buchungen erstellt.".format(len(affected_users)), "success")
return redirect(url_for(".membership_fees"))
table = UsersDueTable(data_url=url_for('.membership_fee_users_due_json', fee_id=fee.id))
return render_template('finance/membership_fee_book.html', form=form,
page_title='Beitrag buchen', table=table)
@bp.route("/membership_fee/<int:fee_id>/users_due_json")
def membership_fee_users_due_json(fee_id):
fee = MembershipFee.q.get(fee_id)
if fee is None:
abort(404)
affected_users = post_transactions_for_membership_fee(
fee, current_user, simulate=True)
fee_amount = {'value': str(fee.regular_fee) + '€',
'is_positive': (fee.regular_fee < 0)}
fee_description = localized(
finance.membership_fee_description.format(fee_name=fee.name).to_json())
return jsonify(items=[{
'user_id': user['id'],
'user': {'title': str(user['name']),
'href': url_for("user.user_show", user_id=user['id'])},
'amount': fee_amount,
'description': fee_description,
'valid_on': fee.ends_on
} for user in affected_users])
@bp.route("/membership_fees", methods=['GET', 'POST'])
@nav.navigate(u"Beiträge")
def membership_fees():
table = MembershipFeeTable(data_url=url_for('.membership_fees_json'))
return render_template('finance/membership_fees.html', table=table)
@bp.route("/membership_fees/json")
@access.require('finance_change')
def membership_fees_json():
return jsonify(items=[
{
'name': localized(membership_fee.name),
'regular_fee': money_filter(
membership_fee.regular_fee),
'payment_deadline': membership_fee.payment_deadline.days,
'payment_deadline_final': membership_fee.payment_deadline_final.days,
'begins_on': date_format(membership_fee.begins_on),
'ends_on': date_format(membership_fee.ends_on),
'actions': [
{'href': url_for(".transactions_all",
filter="all",
after=membership_fee.begins_on,
before=membership_fee.ends_on),
'title': 'Finanzübersicht',
'icon': 'glyphicon-euro',
'btn_class': 'btn-success btn-sm'},
{'href': url_for(".membership_fee_book",
fee_id=membership_fee.id),
'title': 'Buchen',
'icon': 'glyphicon-book',
'btn_class': 'btn-warning btn-sm'},
{'href': url_for(".membership_fee_edit",
fee_id=membership_fee.id),
'title': 'Bearbeiten',
'icon': 'glyphicon-edit',
'btn_class': 'btn-primary btn-sm'}
]
} for membership_fee in
MembershipFee.q.order_by(MembershipFee.begins_on.desc()).all()])
@bp.route('/membership_fee/create', methods=("GET", "POST"))
@access.require('finance_change')
def membership_fee_create():
previous_fee = MembershipFee.q.order_by(MembershipFee.id.desc()).first()
if previous_fee:
begins_on_default = previous_fee.ends_on + timedelta(1)
next_month = begins_on_default.replace(day=28) + timedelta(4)
ends_on_default = begins_on_default.replace(
day=(next_month - timedelta(days=next_month.day)).day
)
name_default = str(begins_on_default.year) \
+ "-" + "%02d" % begins_on_default.month
form = MembershipFeeCreateForm(
name=name_default,
regular_fee=previous_fee.regular_fee,
booking_begin=previous_fee.booking_begin.days,
booking_end=previous_fee.booking_end.days,
payment_deadline=previous_fee.payment_deadline.days,
payment_deadline_final=previous_fee.payment_deadline_final.days,
begins_on=begins_on_default,
ends_on=ends_on_default,
)
else:
form = MembershipFeeCreateForm()
if form.validate_on_submit():
mfee = MembershipFee(
name=form.name.data,
regular_fee=form.regular_fee.data,
booking_begin=timedelta(days=form.booking_begin.data),
booking_end=timedelta(days=form.booking_end.data),
payment_deadline=timedelta(days=form.payment_deadline.data),
payment_deadline_final=timedelta(days=form.payment_deadline_final.data),
begins_on=form.begins_on.data,
ends_on=form.ends_on.data,
)
session.add(mfee)
session.commit()
flash("Beitrag erfolgreich erstellt.", "success")
return redirect(url_for(".membership_fees"))
return render_template('finance/membership_fee_create.html', form=form)
@bp.route('/membership_fee/<int:fee_id>/edit', methods=("GET", "POST"))
@access.require('finance_change')
def membership_fee_edit(fee_id):
fee = MembershipFee.q.get(fee_id)
if fee is None:
flash(u'Ein Beitrag mit dieser ID existiert nicht!', 'error')
abort(404)
form = MembershipFeeEditForm()
if not form.is_submitted():
form = MembershipFeeEditForm(
name=fee.name,
regular_fee=fee.regular_fee,
booking_begin=fee.booking_begin.days,
booking_end=fee.booking_end.days,
payment_deadline=fee.payment_deadline.days,
payment_deadline_final=fee.payment_deadline_final.days,
begins_on=fee.begins_on,
ends_on=fee.ends_on,
)
elif form.validate_on_submit():
fee.name = form.name.data
fee.regular_fee = form.regular_fee.data
fee.booking_begin = timedelta(days=form.booking_begin.data)
fee.booking_end = timedelta(days=form.booking_end.data)
fee.payment_deadline = timedelta(days=form.payment_deadline.data)
fee.payment_deadline_final = timedelta(days=form.payment_deadline_final.data)
fee.begins_on = form.begins_on.data
fee.ends_on = form.ends_on.data
session.commit()
return redirect(url_for(".membership_fees"))
return render_template('finance/membership_fee_edit.html', form=form)
@bp.route('/membership_fees/handle_payments_in_default', methods=("GET", "POST"))
@access.require('finance_change')
def handle_payments_in_default():
finance.end_payment_in_default_memberships()
users_pid_membership_all, users_membership_terminated_all = finance.get_users_with_payment_in_default()
form = HandlePaymentsInDefaultForm()
# Using `query_factory` instead of `query`, because wtforms would not process an empty list as `query`
form.new_pid_memberships.query_factory = lambda: users_pid_membership_all
form.terminated_member_memberships.query_factory = lambda: users_membership_terminated_all
if not form.is_submitted():
form.new_pid_memberships.process_data(users_pid_membership_all)
form.terminated_member_memberships.process_data(
users_membership_terminated_all)
if form.validate_on_submit():
users_pid_membership = form.new_pid_memberships.data
users_membership_terminated = form.terminated_member_memberships.data
take_actions_for_payment_in_default_users(
users_pid_membership=users_pid_membership,
users_membership_terminated=users_membership_terminated,
processor=current_user)
session.commit()
flash("Zahlungsrückstände behandelt.", "success")
return redirect(url_for(".membership_fees"))
form_args = {
'form': form,
'cancel_to': url_for('.membership_fees'),
'submit_text': 'Anwenden',
'actions_offset': 0
}
return render_template('generic_form.html',
page_title="Zahlungsrückstände behandeln",
form_args=form_args,
form=form)
@bp.route('/json/accounts/system')
def json_accounts_system():
return jsonify(accounts=[
{
"account_id": account.id,
"account_name": localized(account.name),
"account_type": account.type
} for account in Account.q.outerjoin(User).filter(
and_(User.account == None,
Account.type != "USER_ASSET")
).all()])
@bp.route('/json/accounts/user-search')
def json_accounts_user_search():
query = request.args['query']
results = session.query(
Account.id, User.id, User.login, User.name
).select_from(User).join(Account).filter(
or_(func.lower(User.name).like(func.lower("%{0}%".format(query))),
func.lower(User.login).like(func.lower("%{0}%".format(query))),
cast(User.id, Text).like(u"{0}%".format(query)))
).all()
accounts = [
{"account_id": account_id,
"user_id": user_id,
"user_login": user_login,
"user_name": user_name}
for account_id, user_id, user_login, user_name in results
]
return jsonify(accounts=accounts)
|
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.util import compat
ops.RegisterShape('ConstructionFails')(None)
class SessionTest(test_util.TensorFlowTestCase):
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, shape=[2, 3], name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(np.asarray([[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]],
dtype=np.float32), copy_val)
def testManyCPUs(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testPerSessionThreads(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testSessionInterOpThreadPool(self):
config = config_pb2.ConfigProto()
pool = config.session_inter_op_thread_pool.add()
with session.Session(config=config) as s:
inp = constant_op.constant(10.0, name='W1')
results = s.run([inp])
self.assertAllEqual([10.0], results)
pool = config.session_inter_op_thread_pool.add()
pool.num_threads = 1
with session.Session(config=config) as s:
inp = constant_op.constant(20.0, name='W2')
results = s.run([inp])
self.assertAllEqual([20.0], results)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testErrorCodeWithNoNodeDef(self):
with session.Session() as s:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
def exc_predicate(e):
return (e.op is None and e.node_def is None and
e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
# Run with a bogus handle.
s.partial_run('foo', r1, feed_dict={a: 1, b: 2})
def testOpConstructionErrorPayload(self):
with session.Session():
failing_op = ops.get_default_graph().create_op(
'ConstructionFails', [], [], name='f')
def exc_predicate(e):
return (e.op == failing_op
and e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
failing_op.run()
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
# pylint: disable=protected-access
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
# pylint: enable=protected-access
def exc_predicate(e):
return (e.op == c.op
and e.op._original_op == b.op
and e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchNone(self):
with session.Session() as s:
a = constant_op.constant(1.0)
with self.assertRaises(TypeError):
s.run(None)
with self.assertRaises(TypeError):
s.run([None])
with self.assertRaises(TypeError):
s.run({'b': None})
with self.assertRaises(TypeError):
s.run({'a': a, 'b': None})
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
results_with_dict = s.run({'a': [a], 'b': b, 'z': [a, b]})
self.assertAllEqual([[1.0, 1.0]], results_with_dict['a'][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_dict['b'])
self.assertAllEqual(results_with_dict['a'][0], results_with_dict['z'][0])
self.assertAllEqual(results_with_dict['b'], results_with_dict['z'][1])
# Test nested structures
results_with_nested_list = s.run([[[a, b], b], a, [a, b]])
self.assertAllEqual([[1.0, 1.0]], results_with_nested_list[0][0][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_nested_list[0][0][1])
self.assertAllEqual(results_with_nested_list[0][0][0],
results_with_nested_list[1])
self.assertAllEqual(results_with_nested_list[1],
results_with_nested_list[2][0])
self.assertAllEqual(results_with_nested_list[0][0][1],
results_with_nested_list[0][1])
self.assertAllEqual(results_with_nested_list[0][1],
results_with_nested_list[2][1])
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float16, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Dict fetch
xy = s.run({'xy': tf_xy})['xy']
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Nested list fetch
xy = s.run([[[tf_xy]], tf_xy, [tf_xy]])
self.assertAllEqual(xy, [[[x + y]], x + y, [x + y]])
self.assertEqual(scalar, type(xy[0][0][0]))
self.assertEqual(scalar, type(xy[1]))
self.assertEqual(scalar, type(xy[2][0]))
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
constant_op.constant(indices),
constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Dict fetch (single value), use as tuple
indices_out, values_out, shape_out = s.run({'sp': sp})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch (list value), use as tuple
(indices_out, values_out, shape_out), = s.run({'sp': [sp]})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch, use as SparseTensorValue
sp_out = s.run({'sp': sp})['sp']
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Nested list fetch use as tuple
sp_out = s.run([[[sp]], sp])
indices_out, values_out, shape_out = sp_out[0][0][0]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
indices_out, values_out, shape_out = sp_out[1]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Nested list fetch, use as SparseTensorValue
sp_out = s.run([[[sp]], sp])
self.assertAllEqual(sp_out[0][0][0].indices, indices)
self.assertAllEqual(sp_out[0][0][0].values, values)
self.assertAllEqual(sp_out[0][0][0].shape, shape)
self.assertAllEqual(sp_out[1].indices, indices)
self.assertAllEqual(sp_out[1].values, values)
self.assertAllEqual(sp_out[1].shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.shape, shape)
def testFeedSparsePlaceholder(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.shape, shape)
def testFeedSparePlaceholderConstantShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32,
shape=shape,
name='placeholder1')
self.assertAllEqual(sp.shape.eval(session=s), shape)
self.assertAllEqual(tensor_util.constant_value(sp.shape), shape)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
array_ops.placeholder(dtype=np.int64,
shape=(3,)),)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: (values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = ops.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run(
[ind_values, ind_indices], {ind: (values, indices)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run(
[ind_values, ind_indices],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.Variable(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
# Test nested tuple keys
z = (((array_ops.zeros([2]),),), array_ops.zeros([2]),
(array_ops.zeros([2]),))
result = [z[0][0][0] * 2, z[1] * 2, z[2][0] * 2]
values = (((np.array([1, 1]),),), np.array([2, 2]), (np.array([3, 3]),))
result_value = s.run(result, feed_dict={z: values})
self.assertAllEqual(result_value[0], 2 * np.ones(2))
self.assertAllEqual(result_value[1], 2 * np.array([2, 2]))
self.assertAllEqual(result_value[2], 2 * np.array([3, 3]))
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals(
'versions { producer: %d min_consumer: %d }' % (
versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEquals(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEquals(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEquals(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'The Session graph is empty.' in str(e)):
sess.run([])
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
sess.close()
def testInteractivePlacePrunedGraph(self):
sess = session.InteractiveSession()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
# Only run the valid op, this should work.
b.eval()
with self.assertRaises(errors.InvalidArgumentError):
a.eval()
sess.close()
def testDefaultSessionPlacePrunedGraph(self):
sess = session.Session()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
_ = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
with self.assertRaises(errors.InvalidArgumentError):
# Even though we don't run the bad op, we place the entire
# graph, which should fail with a non-interactive session.
sess.run(b)
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session():
for dtype in [dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.int32,
dtypes.uint8,
dtypes.int16,
dtypes.int8,
dtypes.int64,
dtypes.bool,
dtypes.complex64,
dtypes.complex128]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
out_t.eval(feed_dict={feed_t: np_array}))
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testFeedPrecisionLossError(self):
with session.Session() as sess:
largest_int64 = np.iinfo(np.int64).max
feed_int_implicit_int32 = constant_op.constant(1)
feed_int_explicit_int32 = constant_op.constant(1, dtype=dtypes.int32)
out_t = constant_op.constant(1.0)
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64})
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
def testStringFeed(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(c.eval(feed_dict={feed_t: c_list}), c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [u'\n\x01\x00', u'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0].decode('utf-8'))
self.assertEqual(c_list[1], out[1].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
self.assertEqual(c_list[0], out[0].decode('utf-8'))
self.assertEqual(c_list[1], out[1].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaisesRegexp(
errors.NotFoundError,
'No session factory registered for the given session options'):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertTrue(isinstance(c.name, six.text_type))
self.assertTrue(isinstance(d.name, six.text_type))
self.assertTrue(isinstance(e.name, six.text_type))
self.assertTrue(isinstance(f.name, six.text_type))
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def testPartialRun(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 17
res = sess.partial_run(h, r2, feed_dict={c: temp})
self.assertEqual(153, res)
# Call again on the same graph.
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h2, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 18
res = sess.partial_run(h2, r2, feed_dict={c: temp})
self.assertEqual(162, res)
def testPartialRunIncomplete(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
def testConcurrentPartialRun(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h1 = sess.partial_run_setup([r1], [a, b, c])
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h1, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 19
res = sess.partial_run(h2, r1, feed_dict={a: temp, b: 9})
self.assertEqual(66, res)
res = sess.partial_run(h2, r2, feed_dict={c: 7})
self.assertEqual(462, res)
def testManyPartialRun(self):
with session.Session() as sess:
steps = 200
inputs = []
outputs = []
a = constant_op.constant(2.0, dtypes.float32)
for i in xrange(steps):
inputs.append(array_ops.placeholder(dtypes.float32, shape=[]))
a = math_ops.mul(a, inputs[i])
outputs.append(a)
h = sess.partial_run_setup(outputs, inputs)
for i in xrange(steps):
res = sess.partial_run(h, outputs[i], feed_dict={inputs[i]: 1.0})
self.assertEqual(2.0, res)
feed_dict = {}
for i in xrange(steps):
feed_dict[inputs[i]] = 1.0
res = sess.run(outputs, feed_dict)
self.assertEqual(steps, len(res))
self.assertEqual(2.0, res[-1])
def testRunAndPartialRun(self):
with session.Session() as sess:
a = constant_op.constant(2.0, dtypes.float32)
b = a * 2
c = b * 3
r1 = sess.run([b, c])
h = sess.partial_run_setup([b, c], [])
r2 = sess.partial_run(h, [b, c])
self.assertEqual(r1, r2)
def testFeedDictKeyException(self):
with session.Session() as sess:
a = constant_op.constant(1.0, dtypes.float32, name='a')
with self.assertRaisesRegexp(TypeError, 'Cannot interpret feed_dict'):
sess.run(a, feed_dict={'a': [2.0]})
def testPerStepTrace(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
sess.run(constant_op.constant(1.0))
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testRunOptionsRunMetadata(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
# all combinations are valid
sess.run(constant_op.constant(1.0), options=None, run_metadata=None)
sess.run(constant_op.constant(1.0), options=None,
run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=None)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testFeedShapeCompatibility(self):
with session.Session() as sess:
some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0])
new_shape = constant_op.constant([2, 2])
reshaped_tensor = array_ops.reshape(some_tensor, new_shape)
with self.assertRaisesRegexp(ValueError, 'Cannot feed value of shape'):
sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]})
with self.assertRaisesRegexp(ValueError, 'may not be fed'):
sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]})
def testRunWithNoTargetsIsAnError(self):
with session.Session() as sess:
_ = constant_op.constant(5.0)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
'Must specify at least one target to fetch or execute.'):
sess.run([])
def testInferShapesFalse(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session()
self.assertFalse('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testInferShapesTrue(self):
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(infer_shapes=True))
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session(config=config)
self.assertTrue('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testBuildCostModel(self):
run_options = config_pb2.RunOptions()
config = config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(build_cost_model=100))
with session.Session(config=config) as sess:
with ops.device('/gpu:0'):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = math_ops.add(a, a)
c = array_ops.identity(b)
d = math_ops.mul(c, c)
for step in xrange(120):
run_metadata = config_pb2.RunMetadata()
sess.run(d, feed_dict={a: 1.0},
options=run_options, run_metadata=run_metadata)
if step == 99:
self.assertTrue(run_metadata.HasField('cost_graph'))
else:
self.assertFalse(run_metadata.HasField('cost_graph'))
def testNonInteractiveSessionNesting(self):
sess1 = session.Session()
sess1_controller = sess1.as_default()
sess1_controller.__enter__()
sess2 = session.Session()
sess2_controller = sess2.as_default()
sess2_controller.__enter__()
with self.assertRaisesRegexp(AssertionError, 'Nesting violated'):
sess1_controller.__exit__(None, None, None)
ops._default_session_stack.reset()
def testInteractiveSessionNesting(self):
sess1 = session.InteractiveSession()
sess2 = session.InteractiveSession()
del sess1
del sess2
def testInvalidArgument(self):
with self.assertRaisesRegexp(TypeError, 'target must be a string'):
session.Session(37)
with self.assertRaisesRegexp(TypeError, 'config must be a tf.ConfigProto'):
session.Session(config=37)
with self.assertRaisesRegexp(TypeError, 'graph must be a tf.Graph'):
session.Session(graph=37)
if __name__ == '__main__':
googletest.main()
|
from setuptools import setup
setup(
name='cloudify-dsl-parser',
version='6.4.0.dev1',
packages=[],
description='[DEPRECATED] A stub for the old cloudify-dsl-parser package',
)
|
"""Tests for tf upgrader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
_TEST_VERSION = 1
class TestUpgrade(test_util.TensorFlowTestCase):
"""Test various APIs that have been changed in 2.0."""
@classmethod
def setUpClass(cls):
cls._tf_api_version = 1 if hasattr(tf, 'contrib') else 2
def setUp(self):
tf.compat.v1.enable_v2_behavior()
def testRenames(self):
self.assertAllClose(1.04719755, tf.acos(0.5))
self.assertAllClose(0.5, tf.rsqrt(4.0))
def testSerializeSparseTensor(self):
sp_input = tf.SparseTensor(
indices=tf.constant([[1]], dtype=tf.int64),
values=tf.constant([2], dtype=tf.int64),
dense_shape=[2])
with self.cached_session():
serialized_sp = tf.serialize_sparse(sp_input, 'serialize_name', tf.string)
self.assertEqual((3,), serialized_sp.shape)
self.assertTrue(serialized_sp[0].numpy()) # check non-empty
def testSerializeManySparse(self):
sp_input = tf.SparseTensor(
indices=tf.constant([[0, 1]], dtype=tf.int64),
values=tf.constant([2], dtype=tf.int64),
dense_shape=[1, 2])
with self.cached_session():
serialized_sp = tf.serialize_many_sparse(
sp_input, 'serialize_name', tf.string)
self.assertEqual((1, 3), serialized_sp.shape)
def testArgMaxMin(self):
self.assertAllClose(
[1],
tf.argmax([[1, 3, 2]], name='abc', dimension=1))
self.assertAllClose(
[0, 0, 0],
tf.argmax([[1, 3, 2]], dimension=0))
self.assertAllClose(
[0],
tf.argmin([[1, 3, 2]], name='abc', dimension=1))
def testSoftmaxCrossEntropyWithLogits(self):
out = tf.nn.softmax_cross_entropy_with_logits(
logits=[0.1, 0.8], labels=[0, 1])
self.assertAllClose(out, 0.40318608)
out = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=[0.1, 0.8], labels=[0, 1])
self.assertAllClose(out, 0.40318608)
def testLinearClassifier(self):
if _TEST_VERSION == 2 and self._tf_api_version == 1:
# Skip if we converted this file to v2 but running with tf v1.
# In this case, conversion script adds reference to
# tf.keras.losses.Reduction which is not available in v1.
self.skipTest(
'After converting to 2.0, this test does not work with '
'TensorFlow 1.x.')
return
feature_column = tf.feature_column.numeric_column(
'feature', shape=(1,))
classifier = tf.estimator.LinearClassifier(
n_classes=2, feature_columns=[feature_column])
data = {'feature': [1, 20, 3]}
target = [0, 1, 0]
classifier.train(
input_fn=lambda: (data, target),
steps=100)
scores = classifier.evaluate(
input_fn=lambda: (data, target),
steps=100)
self.assertGreater(scores['accuracy'], 0.99)
def testUniformUnitScalingInitializer(self):
init = tf.initializers.uniform_unit_scaling(0.5, seed=1)
self.assertArrayNear(
[-0.45200047, 0.72815341],
init((2,)).numpy(),
err=1e-6)
if __name__ == "__main__":
test_lib.main()
|
"""This example displays the change logs of a specified advertiser object.
A similar pattern can be applied to get change logs for many other object
types.
Tags: changelogs.list
"""
__author__ = ('api.jimper@gmail.com (Jonathon Imperiosi)')
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'profile_id', type=int,
help='The ID of the profile to look up change logs for')
argparser.add_argument(
'advertiser_id', type=int,
help='The ID of the advertiser to look up change logs for')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'dfareporting', 'v2.1', __doc__, __file__, parents=[argparser],
scope=['https://www.googleapis.com/auth/dfareporting',
'https://www.googleapis.com/auth/dfatrafficking'])
profile_id = flags.profile_id
advertiser_id = flags.advertiser_id
try:
# Construct the request.
request = service.changeLogs().list(
profileId=profile_id, objectIds=[advertiser_id],
objectType='OBJECT_ADVERTISER')
while True:
# Execute request and print response.
response = request.execute()
for change_log in response['changeLogs']:
print(
'%s: Field "%s" from "%s" to "%s".' %
(change_log['action'], change_log['fieldName'],
change_log['oldValue'], change_log['newValue']))
if response['changeLogs'] and response['nextPageToken']:
request = service.changeLogs().list_next(request, response)
else:
break
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
|
a = [1,5,10,10,5,1]
b = []
x = 0
print(a)
while x <= (len(a) - 2):
if b == []:
b.append(1)
c = a[x] + a[x + 1]
b.append(c)
x = x + 1
b.append(1)
print(b)
|
class SignatureError(Exception):
pass
|
"""This example updates a line item to add custom criteria targeting.
To determine which line items exist, run get_all_line_items.py. To determine
which custom targeting keys and values exist, run
get_all_custom_targeting_keys_and_values.py.
"""
import pprint
from googleads import ad_manager
LINE_ITEM_ID = 'INSERT_LINE_ITEM_ID_HERE'
KEY_ID1 = 'INSERT_TARGETING_KEY_ID_HERE'
KEY_ID2 = 'INSERT_TARGETING_KEY_ID_HERE'
KEY_ID3 = 'INSERT_TARGETING_KEY_ID_HERE'
VALUE_ID1 = 'INSERT_TARGETING_VALUE_ID_HERE'
VALUE_ID2 = 'INSERT_TARGETING_VALUE_ID_HERE'
VALUE_ID3 = 'INSERT_TARGETING_VALUE_ID_HERE'
def main(client, line_item_id, key_id1, key_id2, key_id3, value_id1, value_id2,
value_id3):
# Initialize appropriate service.
line_item_service = client.GetService('LineItemService', version='v201805')
# create custom criterias
custom_criteria1 = {
'xsi_type': 'CustomCriteria',
'keyId': key_id1,
'valueIds': [value_id1],
'operator': 'IS'
}
custom_criteria2 = {
'xsi_type': 'CustomCriteria',
'keyId': key_id2,
'valueIds': [value_id2],
'operator': 'IS_NOT'
}
custom_criteria3 = {
'xsi_type': 'CustomCriteria',
'keyId': key_id3,
'valueIds': [value_id3],
'operator': 'IS'
}
# Create the custom criteria set that will resemble:
# (custom_criteria1.key == custom_criteria1.value OR
# (custom_criteria2.key != custom_criteria2.value AND
# custom_criteria13.key == custom_criteria3.value))
sub_set = {
'xsi_type': 'CustomCriteriaSet',
'logicalOperator': 'AND',
'children': [custom_criteria2, custom_criteria3]
}
top_set = {
'xsi_type': 'CustomCriteriaSet',
'logicalOperator': 'OR',
'children': [custom_criteria1, sub_set]
}
# Create statement to get the line item
statement = (ad_manager.StatementBuilder(version='v201805')
.Where('id = :lineItemId')
.WithBindVariable('lineItemId', long(line_item_id))
.Limit(1))
# Set custom criteria targeting on the line item.
line_item = line_item_service.getLineItemsByStatement(
statement.ToStatement())['results'][0]
line_item['targeting']['customTargeting'] = top_set
# Update line item.
line_item = line_item_service.updateLineItems([line_item])[0]
# Display results.
if line_item:
print ('Line item with id "%s" updated with custom criteria targeting:'
% line_item['id'])
pprint.pprint(line_item['targeting']['customTargeting'])
else:
print 'No line items were updated.'
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, LINE_ITEM_ID, KEY_ID1, KEY_ID2, KEY_ID3, VALUE_ID1,
VALUE_ID2, VALUE_ID3)
|
from __future__ import print_function
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
import json
import os
import re #retira etiquetas HTML de la descripción
from flask import Flask
from flask import request
from flask import make_response
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = makeWebhookResult(req)#Invoca función de consulta y muestra speech al situr3
res = json.dumps(res, indent=4)
print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def makeWebhookResult(req):
if req.get("result").get("action") != "buscarAtractivos":
return {}
result = req.get("result")#invocar el result del json
parameters = result.get("parameters")#invocar el parameters dentro de result
atractivos = parameters.get("atractivos")#DATO TRAÍDO DE API.AI - ATRACTIVOS
#URL BASE CONSULTA ATRACTIVOS JSON 1ra posicion
baseUrlAtractivos = "http://situr.boyaca.gov.co/wp-json/wp/v2/atractivo_turistico?offset=0&search="#URL Base Atractivos
baseUrlImgAtract = "http://www.situr.boyaca.gov.co/wp-json/wp/v2/media/"#URL Base Imagenes Atractivos
retirarEspacios = atractivos.replace(" ", "%20")#Retirar Espacios Atractivos
leerAtractivo = json.loads(urlopen(baseUrlAtractivos + retirarEspacios).read())
tituloAtractivo = leerAtractivo[0]['title']['rendered']
descripcionAtractivo = re.sub("<.*?>", "", leerAtractivo[0]['excerpt']['rendered'])
urlAtractivo = leerAtractivo[0].get('link')
idImagenAtractivo = str(leerAtractivo[0]['featured_media'])
leerImagenAtr = json.loads(urlopen(baseUrlImgAtract + idImagenAtractivo).read())
imagenAtractivo = leerImagenAtr['media_details']['sizes']['medium']['source_url']
speech = "El atractivo: " + tituloAtractivo + ". Descripción:" + descripcionAtractivo + " y la url de la imagen es: " + imagenAtractivo
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
"data" :
{
"facebook" : {
"attachment" : {
"type" : "template",
"payload" : {
"template_type" : "generic",
"elements" : [
{
"title" : tituloAtractivo,
"image_url" : imagenAtractivo,
"subtitle": descripcionAtractivo,
"buttons": [
{
"type":"web_url",
"url": "http://situr.boyaca.gov.co",
"title": "Ver"
},
{
"type":"web_url",
"url": "http://situr.boyaca.gov.co",
"title": "Ver2"
},
{
"type":"web_url",
"url": "http://situr.boyaca.gov.co",
"title": "Ver3"
}
]
},
{
"title": tituloAtractivo,
"image_url": imagenAtractivo,
"subtitle": "Resident Evil: The Final Chapter is an upcoming science fiction action horror film written and directed by Paul W. S. Anderson. It is the sequel to Resident Evil: Retribution (2012), and will be the sixth and final installment in the Resident Evil film series, which is very loosely based on the Capcom survival horror video game series Resident Evil.",
"default_action": {
"type": "web_url",
"url": "https://www.moovrika.com/m/4167",
"webview_height_ratio": "tall"
},
"buttons": [
{
"title": "more info",
"type": "web_url",
"url": "https://www.moovrika.com/m/4082",
"webview_height_ratio": "tall"
},
{
"type":"web_url",
"url": "http://situr.boyaca.gov.co",
"title": "Ver2"
},
{
"type":"web_url",
"url": "http://situr.boyaca.gov.co",
"title": "Ver3"
}
]
}
]
}
}
}
},
"source": "apiai-situr3"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0')
|
import argparse
import csv
import datetime
import os
import numpy as np
import torch
from tqdm import tqdm
import model
import train
from util import sst, mr, load_word_vectors, headerless_tsv
def load_data(args):
if args.dataset is None:
return None, None, None, None, None
# load data
print("\nLoading data...")
split_args = {
'batch_size': args.batch_size,
'device': args.device,
'repeat': False,
'shuffle': args.shuffle if args.shuffle else None,
'sort': False,
}
if args.dataset == 'mr':
return mr(**split_args)
elif args.dataset == 'sst':
return sst(fine_grained=args.fine_grained, train_subtrees=args.train_subtrees, **split_args)
else:
return headerless_tsv(args.dataset, **split_args)
def main():
parser = argparse.ArgumentParser(description='CNN text classifier')
# learning
parser.add_argument('-lr', type=float, default=0.001, help='initial learning rate')
parser.add_argument('-epochs', type=int, default=25, help='number of epochs for train')
parser.add_argument('-batch-size', type=int, default=64, help='batch size for training')
parser.add_argument('-log-interval', type=int, default=1,
help='how many steps to wait before logging training status')
parser.add_argument('-save-interval', type=int, default=0, help='how many steps to wait before saving')
parser.add_argument('-save-dir', type=str, default='snapshot', help='where to save the snapshot')
# data
parser.add_argument('-shuffle', action='store_true', default=False, help='shuffle the data every epoch')
# model
parser.add_argument('-dropout', type=float, default=0.5, help='the probability for dropout')
parser.add_argument('-max-norm', type=float, default=3.0, help='l2 constraint of parameters')
parser.add_argument('-kernel-num', type=int, default=100, help='number of each kind of kernel')
parser.add_argument('-kernel-sizes', type=str, default='3,4,5',
help='comma-separated kernel size to use for convolution')
parser.add_argument('-static', action='store_true', default=False, help='fix the embedding')
# device
parser.add_argument('-device', type=int, default=-1,
help='device to use for iterate data, -1 mean cpu')
parser.add_argument('-no-cuda', action='store_true', default=False, help='disable the gpu')
# option
parser.add_argument('-snapshot', type=str, default=None, help='filename of model snapshot')
parser.add_argument('-predict', type=str, default=None, help='predict the sentence given')
parser.add_argument('-predictfile', type=str, default=None, help='predict sentences in a file')
parser.add_argument('-test', action='store_true', default=False, help='train or test')
parser.add_argument('-dataset', type=str, default=None, help='specify dataset: sst | mr')
parser.add_argument('-fine-grained', action='store_true', default=False, help='use 5-class sst')
parser.add_argument('-train-subtrees', action='store_true', default=False, help='train sst subtrees')
parser.add_argument('-debug', action='store_true', default=False, help='debug mode')
parser.add_argument('-word-vector', type=str, default=None)
args = parser.parse_args()
# update args and print
text_field, label_field, train_iter, dev_iter, test_iter = load_data(args)
if train_iter:
print("train dataset size:", len(train_iter.dataset))
if dev_iter:
print("dev dataset size:", len(dev_iter.dataset))
if test_iter:
print("test dataset size:", len(test_iter.dataset))
if args.dataset:
args.embed_num = len(text_field.vocab)
args.class_num = len(label_field.vocab) - 1 # exclude <unk>
word_vector_matrix = None
if text_field:
print("\nLoading pre-trained word vectors...")
word_vector_filename = args.word_vector or '{}_word2vec.npy'.format(os.path.basename(args.dataset))
if os.path.exists(word_vector_filename):
word_vector_matrix = np.load(word_vector_filename)
else:
word_vector_matrix = load_word_vectors(vocab=text_field.vocab)
np.save(word_vector_filename, word_vector_matrix)
args.cuda = (not args.no_cuda) and torch.cuda.is_available();
del args.no_cuda
args.kernel_sizes = [int(k) for k in args.kernel_sizes.split(',')]
if args.dataset:
args.save_dir = os.path.join(args.save_dir,
os.path.basename(args.dataset),
datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
else:
args.save_dir = os.path.join(args.save_dir, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
print("\nParameters:")
for attr, value in sorted(args.__dict__.items()):
print("\t{}={}".format(attr.upper(), value))
# model
if args.snapshot is None:
cnn = model.CNN_Text(args, text_field, label_field, word_vector_matrix)
else:
print('\nLoading model from [%s]...' % args.snapshot)
try:
cnn = torch.load(args.snapshot, map_location=lambda storage, loc: storage)
except:
print("Sorry, This snapshot doesn't exist.");
exit()
if args.cuda:
cnn = cnn.cuda()
print()
# train or predict
if args.predict is not None:
label = train.predict(args.predict, cnn, args)
print('[Text] {}\n[Label] {}\n'.format(args.predict, label))
elif args.predictfile is not None:
filepre = os.path.splitext(os.path.basename(args.predictfile))[0]
predictions_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'predictions')
identifier = 'lr{}_batch{}_dropout{}'.format(args.lr, args.batch_size, args.dropout)
result_path = os.path.join(predictions_dir,
filepre +
'-predictions-' +
identifier +
'.csv')
if not os.path.isdir(predictions_dir): os.makedirs(predictions_dir)
with open(args.predictfile, 'r') as rf, \
open(result_path, 'w') as wf:
writer = csv.writer(wf)
writer.writerow(['text', 'label'])
lines = list(rf)
for line in tqdm(lines):
line = line.strip()
label = train.predict(line, cnn, args)
writer.writerow([line, label])
print('\nPredictions are written to ', result_path)
elif args.test:
if test_iter:
train.eval(test_iter, cnn, args, print_info=True)
else:
print("\nThe test dataset does not exist.\n")
else:
train.train(train_iter, dev_iter, cnn, args)
print()
main()
|
import os, re
import commands
from time import time
from TimerCommand import TimerCommand
import SiteMover
from futil import *
from PilotErrors import PilotErrors
from pUtil import tolog, readpar, verifySetupCommand, getSiteInformation, extractFilePaths, getExperiment
from FileStateClient import updateFileState
from SiteInformation import SiteInformation
class LocalSiteMover(SiteMover.SiteMover):
""" SiteMover that uses lsm for both get and put """
# no registration is done
copyCommand = "lsm"
realCopyCommand = "lsm-get"
checksum_command = "adler32"
timeout = 3600
__warningStr = '!!WARNING!!2995!! %s'
__spacetoken = '-t %s' # space token descriptor
__localget = '%s lsm-get %s %s %s' # environment, options, lfn, target directory
__localput = '%s lsm-put %s %s %s' # environment, space token (optional), source directory, destination
__localputBAD = '%s lsm-put %s %s %s' # environment, space token (optional), source directory, destination
__localspace = '%s lsm-df %s %s' # environment, space token (optional), storage end-point
__par_filesize = ' --size %s' # filesize in bytes
__par_checksum = ' --checksum %s' # checksum string: "adler32:NNN", "md5:NNN", default is assumed MD5
__timeout = 5400 # seconds
__error = PilotErrors()
__pilotErrorDiag = ''
def __init__(self, setup_path, *args, **kwrds):
self._setup = setup_path.strip()
self.__isSetuped = False
self._defaultSetup = None
def get_timeout(self):
return self.timeout
def log(self, errorLog):
tolog(errorLog)
def getSetup(self):
""" Return the setup string (pacman setup os setup script) for the copy command used by the mover """
_setup_str = ""
self._setup = self._setup.strip()
tolog("self setup: %s" % self._setup)
if self._setup and self._setup != "" and self._setup.strip() != "":
if not self._setup.endswith(";"):
self._setup += ";"
if not "alias" in self._setup:
if "atlasLocalSetup.sh" in self._setup and "--quiet" not in self._setup:
self._setup = self._setup.replace("atlasLocalSetup.sh", "atlasLocalSetup.sh --quiet")
if self._setup.startswith("export") or self._setup.startswith("source"):
_setup_str = "%s" % self._setup
else:
_setup_str = "source %s" % self._setup
else:
_setup_str = self._setup
if _setup_str != "":
tolog("Using setup: %s" % (_setup_str))
return _setup_str
def verifySetupCommand(self, _setupStr):
""" Make sure the setup command exists """
statusRet = 0
outputRet={}
outputRet["errorLog"] = None
outputRet["report"] = {}
outputRet["report"]["clientState"] = None
# remove any '-signs
_setupStr = _setupStr.replace("'", "")
self.log("Will verify: %s" % (_setupStr))
if _setupStr != "" and "source " in _setupStr:
# first extract the file paths from the source command(s)
setupPaths = extractFilePaths(_setupStr)
# only run test if string begins with an "/"
if setupPaths:
# verify that the file paths actually exists
for setupPath in setupPaths:
if "-" in setupPath:
continue
if os.path.exists(setupPath):
self.log("File %s has been verified" % (setupPath))
else:
outputRet["errorLog"] = errorLog = "No such file or directory: %s" % (setupPath)
self.log('!!WARNING!!2991!! %s' % (errorLog))
statusRet = PilotErrors.ERR_NOSUCHFILE
break
else:
# nothing left to test
pass
else:
self.log("Nothing to verify in setup: %s (either empty string or no source command)" % (_setupStr))
return statusRet, outputRet
def verifySetupProxy(self, _setupStr, experiment):
#check do we have a valid proxy
# get the experiment object
thisExperiment = getExperiment(experiment)
status, output = thisExperiment.verifyProxy(envsetup=_setupStr)
return status, output
def verifySetup(self, _setupStr, experiment, proxycheck=True):
statusRet, outputRet = self.verifySetupCommand(_setupStr)
if statusRet != 0:
#self.__sendReport('RFCP_FAIL', self._variables['report'])
outputRet["report"]["clientState"] = "RFCP_FAIL"
return statusRet, outputRet
command = _setupStr
if command != "" and not command.endswith(';'):
command = command + ";"
command += " which " + self.realCopyCommand
status, output = commands.getstatusoutput(command)
self.log("Execute command: %s" % command)
self.log("Status: %s, Output: %s" % (status, output))
if status != 0:
self.log(self.copyCommand +" is not found in envsetup: " + _setupStr)
#self.__sendReport('RFCP_FAIL', self._variables['report'])
outputRet["report"]["clientState"] = "RFCP_FAIL"
outputRet["errorLog"] = output
return status, outputRet
if proxycheck:
status, outputLog = self.verifySetupProxy(_setupStr, experiment)
if status != 0:
outputRet["errorLog"] = outputLog
outputRet["report"]["clientState"] = 'PROXYFAIL'
return status, outputRet
return status, outputRet
def setup(self, experiment):
""" setup env """
if self.__isSetuped:
return 0, None
thisExperiment = getExperiment(experiment)
self.useTracingService = thisExperiment.useTracingService()
_setupStr = self.getSetup()
# get the user proxy if available
envsetupTest = _setupStr.strip()
if envsetupTest != "" and not envsetupTest.endswith(';'):
envsetupTest += ";"
if os.environ.has_key('X509_USER_PROXY'):
envsetupTest += " export X509_USER_PROXY=%s;" % (os.environ['X509_USER_PROXY'])
self.log("to verify site setup: %s " % envsetupTest)
status, output = self.verifySetup(envsetupTest, experiment)
self.log("site setup verifying: status: %s, output: %s" % (status, output["errorLog"]))
if status == 0:
self._setup = envsetupTest
self.__isSetuped = True
return status, output
else:
if self._defaultSetup:
#try to use default setup
self.log("Try to use default envsetup")
envsetupTest = self._defaultSetup.strip()
if envsetupTest != "" and not envsetupTest.endswith(';'):
envsetupTest += ";"
if os.environ.has_key('X509_USER_PROXY'):
envsetupTest += " export X509_USER_PROXY=%s;" % (os.environ['X509_USER_PROXY'])
self.log("verify default setup: %s " % envsetupTest)
status, output = self.verifySetup(envsetupTest, experiment)
self.log("default setup verifying: status: %s, output: %s" % (status, output["errorLog"]))
if status == 0:
self._setup = envsetupTest
self.__isSetuped = True
return status, output
return status, output
def fixStageInPath(self, path):
"""Fix the path"""
if path[:3] == "srm" and '?SFN=' in path:
self.log("Found SFN part in file path: %s" % (path))
elif path[:3] == "srm":
try:
hostname = path.split('/',3)[2]
except Exception as e:
self.log("'!!WARNING!!2999!! Could not extract srm protocol for replacement, keeping path variable as it is: %s (%s)' %\
(path, str(e))")
else:
# srm = 'srm://head01.aglt2.org'
srm = 'srm://' + hostname
# does seopt contain any matching srm's?
sematch = self.getSEMatchFromSEOpt(srm)
if sematch != "":
path = path.replace(srm, sematch)
self.log("Replaced %s with %s (from seopt) in path: %s" % (srm, sematch, path))
else:
se = readpar('se').split(",")[0]
_dummytoken, se = self.extractSE(se)
tolog("Using SE: %s" % (se))
path = path.replace(srm, se)
self.log("Replaced %s with %s (from se) in path: %s" % (srm, se, path))
# add port number from se to getfile if necessary
path = self.addPortToPath(se, path)
return path
def getStageInMode(self, lfn, prodDBlockToken):
# should the root file be copied or read directly by athena?
status = 0
output={}
output["errorLog"] = None
output["report"] = {}
output["report"]["clientState"] = None
output["transfer_mode"] = None
isRootFileName = self.isRootFileName(lfn)
siteInformation = SiteInformation()
directIn, transfer_mode = siteInformation.getDirectInAccessMode(prodDBlockToken, isRootFileName)
if transfer_mode:
#updateFileState(lfn, workDir, jobId, mode="transfer_mode", state=transfer_mode, type="input")
output["transfer_mode"] = transfer_mode
if directIn:
output["report"]["clientState"] = 'FOUND_ROOT'
output["report"]['relativeStart'] = None
output["report"]['transferStart'] = None
return PilotErrors.ERR_DIRECTIOFILE, output
return 0, output
def stageInFile(self, source, destination, sourceSize, sourceChecksum, guid=None):
"""StageIn the file. should be implementated by different site mover."""
statusRet = 0
outputRet = {}
outputRet["errorLog"] = None
outputRet["report"] = {}
outputRet["report"]["clientState"] = None
# build the parameters
_params = ""
if sourceSize != 0 and sourceSize != "0":
_params += self.__par_filesize % (sourceSize)
if sourceChecksum and sourceChecksum != 'None' and sourceChecksum != 0 and sourceChecksum != "0" and not self.isDummyChecksum(sourceChecksum):
csumtype = self.getChecksumType(sourceChecksum)
# special case for md5sum (command only understands 'md5' and 'adler32', and not 'ad' and 'md5sum')
if csumtype == 'md5sum':
csumtype = 'md5'
_params += self.__par_checksum % ("%s:%s" % (csumtype, sourceChecksum),)
# add the guid option
_params += " --guid %s" % (guid)
self.log("StageIn files started.")
_cmd_str = self.__localget % (self._setup, _params, source, destination)
self.log('Executing command: %s' % (_cmd_str))
s = -1
o = '(not defined)'
t0 = os.times()
outputRet["report"]['relativeStart'] = time()
outputRet["report"]['transferStart'] = time()
try:
timerCommand = TimerCommand(_cmd_str)
s, o = timerCommand.run(timeout=self.timeout)
except Exception, e:
tolog("!!WARNING!!2990!! Exception caught by stageInFile(): %s" % (str(e)))
o = str(e)
t1 = os.times()
t = t1[4] - t0[4]
self.log("Command finished after %f s: %s" % (t, o.replace('\n', ' ')))
if s == 0:
self.log("Stagein succeeded")
else:
self.log("!!WARNING!!2990!! Command failed: %s" % (_cmd_str))
o = o.replace('\n', ' ')
#check_syserr(s, o)
self.log("!!WARNING!!2990!! get_data failed. Status=%s Output=%s" % (s, str(o)))
# remove the local file before any get retry is attempted
_status = self.removeLocal(destination)
if not _status:
self.log("!!WARNING!!1112!! Failed to remove local file, get retry will fail")
status, output = self.errorToReport(o, t, source, stageMethod="stageIN")
return status, output
#outputRet["report"]["clientState"] = "DONE"
return statusRet, outputRet
def verifyStageIN(self, sourceFile, sourceSize, sourceChecksum, destFile):
"""Verify file stagin successfull"""
statusRet = 0
outputRet={}
outputRet["errorLog"] = None
outputRet["report"] = {}
outputRet["report"]["clientState"] = None
outputRet["report"]['validateStart'] = time()
self.log("Verify file Staging: source: %s, sourceSize: %s, sourceChecksum: %s, destFile: %s" % (sourceFile, sourceSize, sourceChecksum, destFile))
if sourceChecksum == 0 and sourceSize ==0:
return statusRet, outputRet
# get the checksum type (md5sum or adler32)
if sourceChecksum != 0 and sourceChecksum != "":
csumtype = self.getChecksumType(sourceChecksum)
else:
csumtype = "default"
self.log("Getting destination file(%s) information." % destFile)
status, output = self.getLocalFileInfo(destFile, checksumType=csumtype)
if status != 0:
self.log("Failed to get local file information")
outputRet["report"]["clientState"] = "FILE_INFO_FAIL"
outputRet["errorLog"] = output["errorLog"]
_status = self.removeLocal(destFile)
self.log("Remove local file.")
if not _status:
self.log("!!WARNING!!1112!! Failed to remove local file, get retry will fail")
return status, outputRet
destSize = output["size"]
destChecksum = output["checksum"]
self.log("Destination file information: file: %s, size: %s, checksum: %s" % (destFile, destSize, destChecksum))
# compare remote and local file size
if long(sourceSize) != 0 and long(destSize) != long(sourceSize):
errorLog = "Remote and local file sizes do not match for %s (%s != %s)" %\
(os.path.basename(sourceFile), str(destSize), str(sourceSize))
self.log("!!WARNING!!2990!! %s" % (errorLog))
outputRet["errorLog"] = errorLog
outputRet["report"]["clientState"] = "WRONG_SIZE"
status = self.removeLocal(destFile)
if not status:
self.log("!!WARNING!!1112!! Failed to remove local file, get retry will fail")
return PilotErrors.ERR_GETWRONGSIZE, outputRet
# compare remote and local file checksum
if sourceChecksum and str(destChecksum) != str(sourceChecksum) and not self.isDummyChecksum(sourceChecksum):
outputRet["errorLog"] = errorLog = "Remote and local checksums (of type %s) do not match for %s (%s != %s)" %\
(csumtype, os.path.basename(sourceFile), destChecksum, sourceChecksum)
self.log("!!WARNING!!2990!! %s" % (errorLog))
# remove the local file before any get retry is attempted
_status = self.removeLocal(destFile)
if not _status:
self.log("!!WARNING!!1112!! Failed to remove local file, get retry will fail")
if csumtype == "adler32":
outputRet["report"]["clientState"] = "AD_MISMATCH"
return PilotErrors.ERR_GETADMISMATCH, outputRet
else:
outputRet["report"]["clientState"] = "MD5_MISMATCH"
return PilotErrors.ERR_GETMD5MISMATCH, outputRet
self.log("Verify staging done.")
outputRet["report"]["clientState"] = "DONE"
return statusRet, outputRet
def stageIn(self, source, destination, sourceSize, sourceChecksum, experiment):
"""Stage in the source file"""
statusRet = 0
outputRet ={}
outputRet["errorLog"] = None
outputRet["report"] = None
status, output = self.setup(experiment)
if status !=0:
statusRet = status
outputRet["errorLog"] = output["errorLog"]
outputRet["report"] = output["report"]
return statusRet, outputRet
source = self.fixStageInPath(source)
status, output = self.stageInFile(source, destination, sourceSize, sourceChecksum)
if status != 0:
statusRet = status
outputRet["errorLog"] = output["errorLog"]
outputRet["report"] = output["report"]
return statusRet, outputRet
#For Local Site Mover, verification is done by lsm. Do checksum again to no checksum in lsm-get
status, output = self.verifyStageIN(source, sourceSize, sourceChecksum, destination)
statusRet = status
outputRet["errorLog"] = output["errorLog"]
outputRet["report"] = output["report"]
return statusRet, outputRet
def getLocalFileInfo(self, fileName, checksumType="default", date=None):
""" Return exit code (0 if OK), file size and checksum of a local file, as well as as date string if requested """
# note that date is mutable
statusRet = 0
outputRet = {}
outputRet["errorLog"] = ""
outputRet["report"] = {}
outputRet["report"]["clientState"] = None
outputRet["size"] = 0
outputRet["checksum"] = ""
outputRet["checksumType"] = checksumType
self.log("Getting local File(%s) info." % fileName)
# does the file exist?
if not os.path.isfile(fileName):
if fileName.find("DBRelease") >= 0 and os.path.exists(os.path.dirname(fileName)):
outputRet["errorLog"] = errorLog = "DBRelease file missing: %s" % (fileNameame)
self.log("!!WARNING!!2999!! %s" % (errorLog))
return PilotErrors.ERR_MISSDBREL, outputRet
else:
outputRet["errorLog"] = errorLog = "No such file or directory: %s" % (fileName)
self.log("!!WARNING!!2999!! %s" % (errorLog))
return PilotErrors.ERR_MISSINGLOCALFILE, outputRet
# get the modification time if needed and store it in the mutable object
if date:
date = SiteMover.getModTime(os.path.dirname(fileName), os.path.basename(fileName))
# get the file size
try:
self.log("Executing getsize() for file: %s" % (fileName))
outputRet["size"] = fsize = str(os.path.getsize(fileName))
except OSError, e:
outputRet["errorLog"] = errorLog = "Could not get file size: %s" % str(e)
tolog("!!WARNING!!2999!! %s" % (errorLog))
return PilotErrors.ERR_FAILEDSIZELOCAL, outputRet
else:
if fsize == "0":
outputRet["errorLog"] = errorLog = "Encountered zero file size for file %s" % (fileName)
self.log("!!WARNING!!2999!! %s" % (errorLog))
return PilotErrors.ERR_ZEROFILESIZE, outputRet
else:
self.log("Got file size: %s" % (fsize))
# get the checksum
if checksumType == "adler32" or checksumType == "default":
self.log("Executing adler32() for file: %s" % (fileName))
outputRet["checksum"] = fchecksum = SiteMover.SiteMover.adler32(fileName)
if fchecksum == '00000001': # "%08x" % 1L
outputRet["errorLog"] = errorLog = "Adler32 failed (returned 1)"
self.log("!!WARNING!!2999!! %s" % (errorLog))
return PilotErrors.ERR_FAILEDADLOCAL, outputRet
else:
self.log("Got adler32 checksum: %s" % (fchecksum))
else:
_cmd = '%s %s' % (CMD_CHECKSUM, fileName)
self.log("Executing command: %s" % (_cmd))
try:
s, o = commands.getstatusoutput(_cmd)
except Exception, e:
s = -1
o = str(e)
self.log("!!WARNING!!2999!! Exception caught in getstatusoutput: %s" % (o))
if s != 0:
o = o.replace('\n', ' ')
check_syserr(s, o)
outputRet["errorLog"] = errorLog = "Error running checksum command (%s): %s" % (CMD_CHECKSUM, o)
self.log("!!WARNING!!2999!! %s" % (errorLog))
return PilotErrors.ERR_FAILEDMD5LOCAL, outputRet
outputRet["checksum"] = fchecksum = o.split()[0]
self.log("Got checksum: %s" % (fchecksum))
return 0, outputRet
def stageOutFile(self, source, destination, sourceSize, sourceChecksum, checksumType, guid, token=None):
"""Stage out the file. Should be implementated by different site mover"""
statusRet = 0
outputRet = {}
outputRet["errorLog"] = None
outputRet["report"] = {}
outputRet["report"]["clientState"] = None
# build the parameters
_params = ""
if token:
# Special case for GROUPDISK (do not remove dst: bit before this stage, needed in several places)
if "dst:" in token:
token = token[len('dst:'):]
tolog("Dropped dst: part of space token descriptor; token=%s" % (token))
token = "ATLASGROUPDISK"
tolog("Space token descriptor reset to: %s" % (token))
_params = self.__spacetoken % (token)
if sourceSize != 0 and sourceSize != "0":
_params += self.__par_filesize % (sourceSize)
if sourceChecksum:
_params += self.__par_checksum % ("%s:%s" % (checksumType, sourceChecksum),)
# add the guid option
_params += " --guid %s" % (guid)
if ".log." in destination:
_cmd_str = self.__localput % (self._setup, _params, source, destination)
else:
_cmd_str = self.__localputBAD % (self._setup, _params, source, destination)
tolog("Executing command: %s" % (_cmd_str))
ec = -1
t0 = os.times()
o = '(not defined)'
outputRet["report"]['relativeStart'] = time()
outputRet["report"]['transferStart'] = time()
try:
timerCommand = TimerCommand(_cmd_str)
ec, o = timerCommand.run(timeout=self.timeout)
except Exception, e:
tolog("!!WARNING!!2999!! gfal-copy threw an exception: %s" % (o))
o = str(e)
outputRet["report"]['validateStart'] = time()
t1 = os.times()
t = t1[4] - t0[4]
tolog("Command finished after %f s" % (t))
tolog("ec = %d, output = %s" % (ec, o.replace("\n"," ")))
if ec != 0:
tolog("!!WARNING!!2990!! Command failed: %s" % (_cmd_str))
#check_syserr(ec, o)
tolog('!!WARNING!!2990!! Stage Out failed: Status=%d Output=%s' % (ec, str(o.replace("\n"," "))))
status, output = self.errorToReport(o, t, source, stageMethod="stageOut")
if status == PilotErrors.ERR_FILEEXIST:
return status, output
# for local site mover, don't remove files
## check if file was partially transferred, if so, remove it
#_ec, removeOutput = self.removeRemoteFile(destination)
#if not _ec :
# self.log("Failed to remove file ") # i.e. do not retry stage-out
return status, output
return statusRet, outputRet
def getRemoteFileChecksum(self, full_surl, checksumType):
""" get checksum with gfal-sum command """
remote_checksum = None
output = None
errorLog = "LocalSiteMover has no function getRemoteFileChecksum"
self.log('!!WARNING!!2999!! %s' % (errorLog))
return remote_checksum
def getRemoteFileSize(self, full_surl):
""" extract checksum and file size from gfal-ls output """
errorLog = "LocalSiteMover has no function getRemoteFileSize"
self.log('!!WARNING!!2999!! %s' % (errorLog))
remote_fsize = None
return remote_fsize
def verifyStageOut(self, sourceFile, sourceSize, sourceChecksum, checksumType, destFile):
"""Verify file stagout successfull"""
statusRet = 0
outputRet = {}
outputRet["errorLog"] = None
outputRet["report"] = {}
outputRet["report"]["clientState"] = None
outputRet["verified"] = False
self.log("verifying stageout")
status, output = self.getRemoteFileInfo(destFile, checksumType)
errorLog = output["errorLog"]
destSize = output["size"]
destChecksum = output["checksum"]
destChecksumType = output["checksumType"]
self.log("Remote checksum: %s" % str(destChecksum))
self.log("Local checksum: %s" % str(sourceChecksum))
if status == 0:
if destChecksum:
if str(sourceChecksum) != str(destChecksum):
outputRet["errorLog"] = errorLog = "Remote and local checksums (of type %s) do not match for %s (%s != %s)" %\
(checksumType, os.path.basename(destFile), destChecksum, sourceChecksum)
self.log("!!WARNING!!1800!! %s" % (errorLog))
if checksumType == "adler32":
outputRet["report"]["clientState"] = 'AD_MISMATCH'
return PilotErrors.ERR_PUTADMISMATCH, outputRet
else:
outputRet["report"]["clientState"] = 'MD5_MISMATCH'
return PilotErrors.ERR_PUTMD5MISMATCH, outputRet
else:
self.log("Remote and local checksums verified")
outputRet["verified"] = verified = True
else:
# if the checksum could not be verified (as is the case for non-dCache sites) test the file size instead
if destSize:
self.log("Local file size: %s" % (sourceSize))
if destSize and destSize != "" and sourceSize != "" and sourceSize:
if sourceSize != destSize:
outputRet["errorLog"] = errorLog = "Remote and local file sizes do not match for %s (%s != %s)" %\
(sourceFile, str(destSize), str(sourceSize))
self.log('!!WARNING!!2999!! %s' % (errorLog))
outputRet['report']["clientState"] = 'FS_MISMATCH'
return PilotErrors.ERR_PUTWRONGSIZE, outputRet
else:
self.log("Remote and local file sizes verified")
outputRet['verified'] = True
else:
self.log("Skipped file size test")
else:
self.log("Failed to get Remote file information: %s" % ())
if outputRet['verified'] != True:
# fail at this point
outputRet["errorLog"] = errorLog = "Neither checksum nor file size could be verified (failing job)"
self.log('!!WARNING!!2999!! %s' % (errorLog))
outputRet['report']["clientState"] = 'NOFILEVERIFICATION'
return PilotErrors.ERR_NOFILEVERIFICATION, outputRet
self.log("verifying stageout done.")
outputRet["report"]["clientState"] = "DONE"
return statusRet, outputRet
def removeRemoteFile(self, full_surl):
errorLog = "LocalSiteMover has no function removeRemoteFile"
self.log('!!WARNING!!2999!! %s' % (errorLog))
return 0, None
def getRemoteFileInfo(self, destFile, checksumType):
""" Get Remote file info. Should be implementated by different site mover"""
status = 0
outputRet = {}
outputRet["errorLog"] = None
outputRet["report"] = {}
outputRet["report"]["clientState"] = None
outputRet["size"] = None
outputRet["checksum"] = None
outputRet["checksumType"] = checksumType
checksum = None
fileSize = None
checksum = self.getRemoteFileChecksum(destFile, checksumType)
if checksum == None:
fileSize = self.getRemoteFileSize(destFile)
outputRet["size"] = fileSize
outputRet["checksum"] = checksum
return status, outputRet
def stageOut(self, source, destination, token, guid, experiment):
"""Stage in the source file"""
statusRet = 0
outputRet ={}
outputRet["errorLog"] = None
outputRet["report"] = None
status, output = self.setup(experiment)
if status !=0:
statusRet = status
outputRet["errorLog"] = output["errorLog"]
outputRet["report"] = output["report"]
return statusRet, outputRet
status, output = self.getLocalFileInfo(source)
if status !=0:
statusRet = status
outputRet["errorLog"] = output["errorLog"]
outputRet["report"] = output["report"]
return statusRet, outputRet
sourceSize = output["size"]
sourceChecksum = output["checksum"]
checksumType = output["checksumType"]
if checksumType == "default":
checksumType = "adler32"
status, output = self.stageOutFile(source, destination, sourceSize, sourceChecksum, checksumType, guid, token)
statusRet = status
outputRet["errorLog"] = output["errorLog"]
outputRet["report"] = output["report"]
outputRet["size"] = sourceSize
outputRet["checksum"] = sourceChecksum
return statusRet, outputRet
# For Local Site Mover, verification is done by lsm
#status, output = self.verifyStageOut(source, sourceSize, sourceChecksum, checksumType, destination)
#statusRet = status
#outputRet["errorLog"] = output["errorLog"]
#outputRet["report"] = output["report"]
#outputRet["size"] = sourceSize
#outputRet["checksum"] = sourceChecksum
#return statusRet, outputRet
def get_data(self, gpfn, lfn, path, fsize=0, fchecksum=0, guid=0, **pdict):
""" copy input file from SE to local dir """
error = PilotErrors()
pilotErrorDiag = ""
# Get input parameters from pdict
jobId = pdict.get('jobId', '')
workDir = pdict.get('workDir', '')
experiment = pdict.get('experiment', '')
proxycheck = pdict.get('proxycheck', False)
# try to get the direct reading control variable (False for direct reading mode; file should not be copied)
useCT = pdict.get('usect', True)
prodDBlockToken = pdict.get('access', '')
# get the DQ2 tracing report
report = self.getStubTracingReport(pdict['report'], 'local', lfn, guid)
status, output = self.getStageInMode(lfn, prodDBlockToken)
if output["transfer_mode"]:
updateFileState(lfn, workDir, jobId, mode="transfer_mode", state=output["transfer_mode"], type="input")
if status !=0:
self.__sendReport(output["report"], report)
return status, output["errorLog"]
if path == '': path = './'
fullname = os.path.join(path, lfn)
status, output = self.stageIn(gpfn, fullname, fsize, fchecksum, experiment)
if status == 0:
updateFileState(lfn, workDir, jobId, mode="file_state", state="transferred", type="input")
self.__sendReport(output["report"], report)
return status, output["errorLog"]
def put_data(self, source, destination, fsize=0, fchecksum=0, **pdict):
""" copy output file from disk to local SE """
# function is based on dCacheSiteMover put function
error = PilotErrors()
pilotErrorDiag = ""
# Get input parameters from pdict
alt = pdict.get('alt', False)
lfn = pdict.get('lfn', '')
guid = pdict.get('guid', '')
token = pdict.get('token', '')
scope = pdict.get('scope', '')
dsname = pdict.get('dsname', '')
analysisJob = pdict.get('analJob', False)
testLevel = pdict.get('testLevel', '0')
extradirs = pdict.get('extradirs', '')
experiment = pdict.get('experiment', '')
proxycheck = pdict.get('proxycheck', False)
prodSourceLabel = pdict.get('prodSourceLabel', '')
# get the site information object
si = getSiteInformation(experiment)
tolog("put_data received prodSourceLabel=%s" % (prodSourceLabel))
if prodSourceLabel == 'ddm' and analysisJob:
tolog("Treating PanDA Mover job as a production job during stage-out")
analysisJob = False
# get the DQ2 tracing report
report = self.getStubTracingReport(pdict['report'], 'local', lfn, guid)
filename = os.path.basename(source)
# get all the proper paths
ec, pilotErrorDiag, tracer_error, dst_gpfn, lfcdir, surl = si.getProperPaths(error, analysisJob, token, prodSourceLabel, dsname, filename, scope=scope, alt=alt)
if ec != 0:
reportState = {}
reportState["clientState"] = tracer_error
self.__sendReport(reportState, report)
return self.put_data_retfail(ec, pilotErrorDiag)
# get the DQ2 site name from ToA
try:
_dq2SiteName = self.getDQ2SiteName(surl=surl)
except Exception, e:
tolog("Warning: Failed to get the DQ2 site name: %s (can not add this info to tracing report)" % str(e))
else:
report['localSite'], report['remoteSite'] = (_dq2SiteName, _dq2SiteName)
tolog("DQ2 site name: %s" % (_dq2SiteName))
if testLevel == "1":
source = "thisisjustatest"
status, output = self.stageOut(source, surl, token, guid, experiment)
if status !=0:
self.__sendReport(output["report"], report)
return self.put_data_retfail(status, output["errorLog"], surl)
reportState = {}
reportState["clientState"] = "DONE"
self.__sendReport(reportState, report)
return 0, pilotErrorDiag, surl, output["size"], output["checksum"], self.arch_type
def errorToReport(self, errorOutput, timeUsed, fileName, stageMethod='stageIN'):
status = 0
outputRet = {}
outputRet["errorLog"] = None
outputRet["report"] = {}
outputRet["report"]["clientState"] = None
if "File exists" in errorOutput or "SRM_FILE_BUSY" in errorOutput:
pilotErrorDiag = "File already exist in the destination."
tolog("!!WARNING!!2990!! %s" % (pilotErrorDiag))
#self.__sendReport('FILE_EXIST', report)
outputRet["report"]["clientState"] = 'FILE_EXIST'
outputRet["errorLog"] = pilotErrorDiag
return PilotErrors.ERR_FILEEXIST, outputRet
elif "Could not establish context" in errorOutput:
pilotErrorDiag = "Could not establish context: Proxy / VO extension of proxy has probably expired"
tolog("!!WARNING!!2990!! %s" % (pilotErrorDiag))
#self.__sendReport('CONTEXT_FAIL', report)
outputRet["report"]["clientState"] = 'CONTEXT_FAIL'
outputRet["errorLog"] = pilotErrorDiag
return PilotErrors.ERR_NOPROXY, outputRet
elif "globus_xio:" in errorOutput:
pilotErrorDiag = "Globus system error: %s" % (errorOuput)
self.log("Globus system error encountered")
#self.__sendReport('GLOBUS_FAIL', report)
outputRet["report"]["clientState"] = 'GLOBUS_FAIL'
outputRet["errorLog"] = pilotErrorDiag
return PilotErrors.ERR_GETGLOBUSSYSERR, outputRet
elif "No space left on device" in errorOutput:
pilotErrorDiag = "No available space left on local disk: %s" % (errorOutput)
tolog("No available space left on local disk")
#self.__sendReport('NO_SPACE', report)
outputRet["report"]["clientState"] = 'NO_SPACE'
outputRet["errorLog"] = pilotErrorDiag
return PilotErrors.ERR_NOLOCALSPACE, outputRet
elif "No such file or directory" in errorOutput:
if "DBRelease" in fileName:
pilotErrorDiag = "Missing DBRelease file: %s" % (fileName)
tolog("!!WARNING!!2990!! %s" % (pilotErrorDiag))
#self.__sendReport('NO_DBREL', report)
outputRet["report"]["clientState"] = 'NO_DBREL'
outputRet["errorLog"] = pilotErrorDiag
return PilotErrors.ERR_MISSDBREL, outputRet
else:
pilotErrorDiag = "No such file or directory: %s" % (fileName)
tolog("!!WARNING!!2990!! %s" % (pilotErrorDiag))
#self.__sendReport('NO_FILE_DIR', report)
outputRet["report"]["clientState"] = 'NO_FILE'
outputRet["errorLog"] = pilotErrorDiag
return PilotErrors.ERR_NOSUCHFILE, outputRet
else:
if timeUsed >= self.timeout:
pilotErrorDiag = "Copy command self timed out after %d s" % (timeUsed)
tolog("!!WARNING!!2990!! %s" % (pilotErrorDiag))
if stageMethod == "stageIN":
#self.__sendReport('GET_TIMEOUT', report)
outputRet["report"]["clientState"] = 'GET_TIMEOUT'
outputRet["errorLog"] = pilotErrorDiag
return PilotErrors.ERR_GETTIMEOUT, pilotErrorDiag
else:
#self.__sendReport('CP_TIMEOUT', report)
outputRet["report"]["clientState"] = 'CP_TIMEOUT'
outputRet["errorLog"] = pilotErrorDiag
return PilotErrors.ERR_PUTTIMEOUT, outputRet
else:
if len(errorOutput) == 0:
pilotErrorDiag = "Copy command returned error code %d but no output" % (s)
else:
pilotErrorDiag = errorOutput
#self.__sendReport('COPY_ERROR', report)
outputRet["report"]["clientState"] = 'COPY_ERROR'
outputRet["errorLog"] = pilotErrorDiag
if stageMethod == "stageIN":
return PilotErrors.ERR_STAGEINFAILED, outputRet
else:
return PilotErrors.ERR_STAGEOUTFAILED, outputRet
def __sendReport(self, reportState, report):
"""
Send DQ2 tracing report. Set the client exit state and finish
"""
if report.has_key('timeStart'):
# finish instrumentation
report['timeEnd'] = time()
for key in reportState.keys():
report[key] = reportState[key]
# send report
tolog("Updated tracing report: %s" % str(report))
self.sendTrace(report)
|
"""This platform allows several lights to be grouped into one light."""
from collections import Counter
import itertools
import logging
from typing import Any, Callable, Iterator, List, Optional, Tuple
import voluptuous as vol
from homeassistant.components import light
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
CONF_ENTITIES,
CONF_NAME,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.core import State, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_state_change
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_EFFECT_LIST,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_MAX_MIREDS,
ATTR_MIN_MIREDS,
ATTR_TRANSITION,
ATTR_WHITE_VALUE,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_FLASH,
SUPPORT_TRANSITION,
SUPPORT_WHITE_VALUE,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Light Group"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_ENTITIES): cv.entities_domain(light.DOMAIN),
}
)
SUPPORT_GROUP_LIGHT = (
SUPPORT_BRIGHTNESS
| SUPPORT_COLOR_TEMP
| SUPPORT_EFFECT
| SUPPORT_FLASH
| SUPPORT_COLOR
| SUPPORT_TRANSITION
| SUPPORT_WHITE_VALUE
)
async def async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
) -> None:
"""Initialize light.group platform."""
async_add_entities([LightGroup(config.get(CONF_NAME), config[CONF_ENTITIES])])
class LightGroup(light.Light):
"""Representation of a light group."""
def __init__(self, name: str, entity_ids: List[str]) -> None:
"""Initialize a light group."""
self._name = name # type: str
self._entity_ids = entity_ids # type: List[str]
self._is_on = False # type: bool
self._available = False # type: bool
self._brightness = None # type: Optional[int]
self._hs_color = None # type: Optional[Tuple[float, float]]
self._color_temp = None # type: Optional[int]
self._min_mireds = 154 # type: Optional[int]
self._max_mireds = 500 # type: Optional[int]
self._white_value = None # type: Optional[int]
self._effect_list = None # type: Optional[List[str]]
self._effect = None # type: Optional[str]
self._supported_features = 0 # type: int
self._async_unsub_state_changed = None
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
@callback
def async_state_changed_listener(
entity_id: str, old_state: State, new_state: State
):
"""Handle child updates."""
self.async_schedule_update_ha_state(True)
self._async_unsub_state_changed = async_track_state_change(
self.hass, self._entity_ids, async_state_changed_listener
)
await self.async_update()
async def async_will_remove_from_hass(self):
"""Handle removal from HASS."""
if self._async_unsub_state_changed is not None:
self._async_unsub_state_changed()
self._async_unsub_state_changed = None
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def is_on(self) -> bool:
"""Return the on/off state of the light group."""
return self._is_on
@property
def available(self) -> bool:
"""Return whether the light group is available."""
return self._available
@property
def brightness(self) -> Optional[int]:
"""Return the brightness of this light group between 0..255."""
return self._brightness
@property
def hs_color(self) -> Optional[Tuple[float, float]]:
"""Return the HS color value [float, float]."""
return self._hs_color
@property
def color_temp(self) -> Optional[int]:
"""Return the CT color value in mireds."""
return self._color_temp
@property
def min_mireds(self) -> Optional[int]:
"""Return the coldest color_temp that this light group supports."""
return self._min_mireds
@property
def max_mireds(self) -> Optional[int]:
"""Return the warmest color_temp that this light group supports."""
return self._max_mireds
@property
def white_value(self) -> Optional[int]:
"""Return the white value of this light group between 0..255."""
return self._white_value
@property
def effect_list(self) -> Optional[List[str]]:
"""Return the list of supported effects."""
return self._effect_list
@property
def effect(self) -> Optional[str]:
"""Return the current effect."""
return self._effect
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features
@property
def should_poll(self) -> bool:
"""No polling needed for a light group."""
return False
async def async_turn_on(self, **kwargs):
"""Forward the turn_on command to all lights in the light group."""
data = {ATTR_ENTITY_ID: self._entity_ids}
if ATTR_BRIGHTNESS in kwargs:
data[ATTR_BRIGHTNESS] = kwargs[ATTR_BRIGHTNESS]
if ATTR_HS_COLOR in kwargs:
data[ATTR_HS_COLOR] = kwargs[ATTR_HS_COLOR]
if ATTR_COLOR_TEMP in kwargs:
data[ATTR_COLOR_TEMP] = kwargs[ATTR_COLOR_TEMP]
if ATTR_WHITE_VALUE in kwargs:
data[ATTR_WHITE_VALUE] = kwargs[ATTR_WHITE_VALUE]
if ATTR_EFFECT in kwargs:
data[ATTR_EFFECT] = kwargs[ATTR_EFFECT]
if ATTR_TRANSITION in kwargs:
data[ATTR_TRANSITION] = kwargs[ATTR_TRANSITION]
if ATTR_FLASH in kwargs:
data[ATTR_FLASH] = kwargs[ATTR_FLASH]
await self.hass.services.async_call(
light.DOMAIN, light.SERVICE_TURN_ON, data, blocking=True
)
async def async_turn_off(self, **kwargs):
"""Forward the turn_off command to all lights in the light group."""
data = {ATTR_ENTITY_ID: self._entity_ids}
if ATTR_TRANSITION in kwargs:
data[ATTR_TRANSITION] = kwargs[ATTR_TRANSITION]
await self.hass.services.async_call(
light.DOMAIN, light.SERVICE_TURN_OFF, data, blocking=True
)
async def async_update(self):
"""Query all members and determine the light group state."""
all_states = [self.hass.states.get(x) for x in self._entity_ids]
states = list(filter(None, all_states))
on_states = [state for state in states if state.state == STATE_ON]
self._is_on = len(on_states) > 0
self._available = any(state.state != STATE_UNAVAILABLE for state in states)
self._brightness = _reduce_attribute(on_states, ATTR_BRIGHTNESS)
self._hs_color = _reduce_attribute(on_states, ATTR_HS_COLOR, reduce=_mean_tuple)
self._white_value = _reduce_attribute(on_states, ATTR_WHITE_VALUE)
self._color_temp = _reduce_attribute(on_states, ATTR_COLOR_TEMP)
self._min_mireds = _reduce_attribute(
states, ATTR_MIN_MIREDS, default=154, reduce=min
)
self._max_mireds = _reduce_attribute(
states, ATTR_MAX_MIREDS, default=500, reduce=max
)
self._effect_list = None
all_effect_lists = list(_find_state_attributes(states, ATTR_EFFECT_LIST))
if all_effect_lists:
# Merge all effects from all effect_lists with a union merge.
self._effect_list = list(set().union(*all_effect_lists))
self._effect = None
all_effects = list(_find_state_attributes(on_states, ATTR_EFFECT))
if all_effects:
# Report the most common effect.
effects_count = Counter(itertools.chain(all_effects))
self._effect = effects_count.most_common(1)[0][0]
self._supported_features = 0
for support in _find_state_attributes(states, ATTR_SUPPORTED_FEATURES):
# Merge supported features by emulating support for every feature
# we find.
self._supported_features |= support
# Bitwise-and the supported features with the GroupedLight's features
# so that we don't break in the future when a new feature is added.
self._supported_features &= SUPPORT_GROUP_LIGHT
def _find_state_attributes(states: List[State], key: str) -> Iterator[Any]:
"""Find attributes with matching key from states."""
for state in states:
value = state.attributes.get(key)
if value is not None:
yield value
def _mean_int(*args):
"""Return the mean of the supplied values."""
return int(sum(args) / len(args))
def _mean_tuple(*args):
"""Return the mean values along the columns of the supplied values."""
return tuple(sum(l) / len(l) for l in zip(*args))
def _reduce_attribute(
states: List[State],
key: str,
default: Optional[Any] = None,
reduce: Callable[..., Any] = _mean_int,
) -> Any:
"""Find the first attribute matching key from states.
If none are found, return default.
"""
attrs = list(_find_state_attributes(states, key))
if not attrs:
return default
if len(attrs) == 1:
return attrs[0]
return reduce(*attrs)
|
from model.group import Group
import random
def test_modify_group_name(app,db,check_ui):
if app.group.count() == 0:
app.group.create(Group(name="test"))
else :
old_groups = db.get_group_list()
group= random.choice(old_groups)
group1=Group(id=group.id,name="fruit loop")
app.group.modify_group_by_id(group1.id, group)
assert len(old_groups) == app.group.count()
def clean(group):
return Group(id=group.id, name=group.name.strip())
if check_ui:
assert sorted(map(clean, db.get_group_list()),key=Group.id_or_max)==sorted(app.group.get_group_list(),key=Group.id_or_max)
|
import sys
sys.stdin = open("/Users/seeva92/Workspace/Contests/1.txt", "r")
sys.stdout = open("/Users/seeva92/Workspace/Contests/2.txt", "w")
n = int(input())
string = input()
def compute():
res = 0
for i in range(0,26):
for j in range(0,26):
flag = True
curr = 0
pIdx = -1
while(True):
if flag:
pIdx = s.find(chr(ord('a')+i),pIdx+1)
if pIdx == -1:
break
else:
curr+=1
flag = False
else:
pIdx = s.find(chr(ord('a')+j),pIdx+1)
if pIdx == -1:
break
else:
curr+=1
flag = False
res = max(res,curr)
curr = 0
flag = True
for k in s:
if flag:
if chr(ord('a')+j) != k:
continue
else:
curr+=1
flag = False
else:
if chr(ord('a')+i) != k:
continue
else:
curr+=1
flag = True
res = max(res,curr)
# print(res)
print(res)
compute()
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('event_cal', '0015_remove_eventshift_drivers'),
]
operations = [
migrations.AddField(
model_name='calendarevent',
name='needs_COE_event',
field=models.BooleanField(default=False),
preserve_default=True,
),
]
|
"""empty message
Revision ID: 437126f00e1f
Revises: 0e58be3d3d20
Create Date: 2017-06-05 22:41:37.902189
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
revision = '437126f00e1f'
down_revision = '0e58be3d3d20'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('password_hash', sa.String(length=100), nullable=True))
op.drop_column('user', 'password')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('password', mysql.VARCHAR(length=50), nullable=True))
op.drop_column('user', 'password_hash')
# ### end Alembic commands ###
|
regions = {
0: 'eu',
1: 'kr',
2: 'sea',
3: 'tw',
4: 'us'
}
|
class TreetaggerToWordnet():
"""
Treetagger POS tags to wordnet morphological category mapper.
"""
def __init__(self):
self.fr_mapping = {
"ADJ" : "adv",
"ADV" : "adj",
"NAM" : "noun",
"NOM" : "noun",
"NUM" : "noun",
"VER:cond" : "verb",
"VER:futu" : "verb",
"VER:impe" : "verb",
"VER:impf" : "verb",
"VER:infi" : "verb",
"VER:pper" : "verb",
"VER:ppre" : "verb",
"VER:pres" : "verb",
"VER:simp" : "verb",
"VER:subi" : "verb",
"VER:subp" : "verb"
}
self.es_mapping = {"ADJ": "adj",
"ADV": "adv",
"NC": "noun",
"NMEA": "noun",
"NP": "noun",
"VCLIger": "verb",
"VCLIinf": "verb",
"VCLIfin": "verb",
"VEadj": "verb",
"VEfin": "verb",
"VEger": "verb",
"VEinf": "verb",
"VHadj": "verb",
"VHfin": "verb",
"VHger": "verb",
"VHinf": "verb",
"VLadj": "verb",
"VLfin": "verb",
"VLger": "verb",
"VLinf": "verb",
"VMadj": "verb",
"VMfin": "verb",
"VMger": "verb",
"VMinf": "verb",
"VSadj": "verb",
"VSfi": "verb",
"VSge": "verb",
"VSinf": "verb",
"VCLIinf": "verb"}
self.en_mapping = {"ADJ": "adj",
"JJ": "adj",
"JJR": "adj",
"JJS": "adj",
"RB": "adv",
"RBR": "adv",
"RBS": "adv",
"NN": "noun",
"NC": "noun",
"NNS": "noun",
"NNP": "noun",
"NNPS": "noun",
"VB": "verb",
"VBD": "verb",
"VBG": "verb",
"VBN": "verb",
"VBP": "verb",
"VBZ": "verb",
"VLfin": "verb",
"VCLIinf": "verb"}
self.pt_mapping = {"ADJ": "adj",
"ADV": "adv",
"N": "noun",
"V": "verb"}
self.it_mapping = { "ADJ" : "adj",
"ADV" : "adv",
"NOM" :"noun",
"NPR" : "noun",
"NUM" : "noun",
"VER:cimp" : "verb",
"VER:cond" : "verb",
"VER:cpre" : "verb",
"VER:futu" : "verb",
"VER:geru" : "verb",
"VER:impe" : "verb",
"VER:impf" : "verb",
"VER:infi" : "verb",
"VER:pper" : "verb",
"VER:ppre" : "verb",
"VER:pres" : "verb",
"VER:refl:infi" : "verb",
"VER:remo" : "verb"}
self.ca_mapping = {}
self.mapping = {
"sp": self.es_mapping,
"en": self.en_mapping,
"pt": self.pt_mapping,
"it": self.it_mapping,
"ca": self.ca_mapping,
"fr": self.fr_mapping
}
self.short_mapping={"adj": "a",
"adv": "r",
"noun": "n",
"verb": "v"}
def wordnet_morph_category(self, lang, postag):
"""
Returns the wordnet morphological category corresponding to the
POS tag of the given language.
"""
pos = self.mapping[lang].get(postag, None)
if pos is not None:
return self.short_mapping[pos]
else:
return None
|
import sys
import string
from testutils import unittest, ConnectingTestCase, decorate_all_tests
from testutils import skip_if_no_iobase, skip_before_postgres
from cStringIO import StringIO
from itertools import cycle, izip
from subprocess import Popen, PIPE
import psycopg2
import psycopg2.extensions
from testutils import skip_copy_if_green, script_to_py3
from testconfig import dsn
if sys.version_info[0] < 3:
_base = object
else:
from io import TextIOBase as _base
class MinimalRead(_base):
"""A file wrapper exposing the minimal interface to copy from."""
def __init__(self, f):
self.f = f
def read(self, size):
return self.f.read(size)
def readline(self):
return self.f.readline()
class MinimalWrite(_base):
"""A file wrapper exposing the minimal interface to copy to."""
def __init__(self, f):
self.f = f
def write(self, data):
return self.f.write(data)
class CopyTests(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
self._create_temp_table()
def _create_temp_table(self):
curs = self.conn.cursor()
curs.execute('''
CREATE TEMPORARY TABLE tcopy (
id serial PRIMARY KEY,
data text
)''')
def test_copy_from(self):
curs = self.conn.cursor()
try:
self._copy_from(curs, nrecs=1024, srec=10*1024, copykw={})
finally:
curs.close()
def test_copy_from_insane_size(self):
# Trying to trigger a "would block" error
curs = self.conn.cursor()
try:
self._copy_from(curs, nrecs=10*1024, srec=10*1024,
copykw={'size': 20*1024*1024})
finally:
curs.close()
def test_copy_from_cols(self):
curs = self.conn.cursor()
f = StringIO()
for i in xrange(10):
f.write("%s\n" % (i,))
f.seek(0)
curs.copy_from(MinimalRead(f), "tcopy", columns=['id'])
curs.execute("select * from tcopy order by id")
self.assertEqual([(i, None) for i in range(10)], curs.fetchall())
def test_copy_from_cols_err(self):
curs = self.conn.cursor()
f = StringIO()
for i in xrange(10):
f.write("%s\n" % (i,))
f.seek(0)
def cols():
raise ZeroDivisionError()
yield 'id'
self.assertRaises(ZeroDivisionError,
curs.copy_from, MinimalRead(f), "tcopy", columns=cols())
def test_copy_to(self):
curs = self.conn.cursor()
try:
self._copy_from(curs, nrecs=1024, srec=10*1024, copykw={})
self._copy_to(curs, srec=10*1024)
finally:
curs.close()
@skip_if_no_iobase
def test_copy_text(self):
self.conn.set_client_encoding('latin1')
self._create_temp_table() # the above call closed the xn
if sys.version_info[0] < 3:
abin = ''.join(map(chr, range(32, 127) + range(160, 256)))
about = abin.decode('latin1').replace('\\', '\\\\')
else:
abin = bytes(range(32, 127) + range(160, 256)).decode('latin1')
about = abin.replace('\\', '\\\\')
curs = self.conn.cursor()
curs.execute('insert into tcopy values (%s, %s)',
(42, abin))
import io
f = io.StringIO()
curs.copy_to(f, 'tcopy', columns=('data',))
f.seek(0)
self.assertEqual(f.readline().rstrip(), about)
@skip_if_no_iobase
def test_copy_bytes(self):
self.conn.set_client_encoding('latin1')
self._create_temp_table() # the above call closed the xn
if sys.version_info[0] < 3:
abin = ''.join(map(chr, range(32, 127) + range(160, 255)))
about = abin.replace('\\', '\\\\')
else:
abin = bytes(range(32, 127) + range(160, 255)).decode('latin1')
about = abin.replace('\\', '\\\\').encode('latin1')
curs = self.conn.cursor()
curs.execute('insert into tcopy values (%s, %s)',
(42, abin))
import io
f = io.BytesIO()
curs.copy_to(f, 'tcopy', columns=('data',))
f.seek(0)
self.assertEqual(f.readline().rstrip(), about)
@skip_if_no_iobase
def test_copy_expert_textiobase(self):
self.conn.set_client_encoding('latin1')
self._create_temp_table() # the above call closed the xn
if sys.version_info[0] < 3:
abin = ''.join(map(chr, range(32, 127) + range(160, 256)))
abin = abin.decode('latin1')
about = abin.replace('\\', '\\\\')
else:
abin = bytes(range(32, 127) + range(160, 256)).decode('latin1')
about = abin.replace('\\', '\\\\')
import io
f = io.StringIO()
f.write(about)
f.seek(0)
curs = self.conn.cursor()
psycopg2.extensions.register_type(
psycopg2.extensions.UNICODE, curs)
curs.copy_expert('COPY tcopy (data) FROM STDIN', f)
curs.execute("select data from tcopy;")
self.assertEqual(curs.fetchone()[0], abin)
f = io.StringIO()
curs.copy_expert('COPY tcopy (data) TO STDOUT', f)
f.seek(0)
self.assertEqual(f.readline().rstrip(), about)
# same tests with setting size
f = io.StringIO()
f.write(about)
f.seek(0)
exp_size = 123
# hack here to leave file as is, only check size when reading
real_read = f.read
def read(_size, f=f, exp_size=exp_size):
self.assertEqual(_size, exp_size)
return real_read(_size)
f.read = read
curs.copy_expert('COPY tcopy (data) FROM STDIN', f, size=exp_size)
curs.execute("select data from tcopy;")
self.assertEqual(curs.fetchone()[0], abin)
def _copy_from(self, curs, nrecs, srec, copykw):
f = StringIO()
for i, c in izip(xrange(nrecs), cycle(string.ascii_letters)):
l = c * srec
f.write("%s\t%s\n" % (i,l))
f.seek(0)
curs.copy_from(MinimalRead(f), "tcopy", **copykw)
curs.execute("select count(*) from tcopy")
self.assertEqual(nrecs, curs.fetchone()[0])
curs.execute("select data from tcopy where id < %s order by id",
(len(string.ascii_letters),))
for i, (l,) in enumerate(curs):
self.assertEqual(l, string.ascii_letters[i] * srec)
def _copy_to(self, curs, srec):
f = StringIO()
curs.copy_to(MinimalWrite(f), "tcopy")
f.seek(0)
ntests = 0
for line in f:
n, s = line.split()
if int(n) < len(string.ascii_letters):
self.assertEqual(s, string.ascii_letters[int(n)] * srec)
ntests += 1
self.assertEqual(ntests, len(string.ascii_letters))
def test_copy_expert_file_refcount(self):
class Whatever(object):
pass
f = Whatever()
curs = self.conn.cursor()
self.assertRaises(TypeError,
curs.copy_expert, 'COPY tcopy (data) FROM STDIN', f)
def test_copy_no_column_limit(self):
cols = [ "c%050d" % i for i in range(200) ]
curs = self.conn.cursor()
curs.execute('CREATE TEMPORARY TABLE manycols (%s)' % ',\n'.join(
[ "%s int" % c for c in cols]))
curs.execute("INSERT INTO manycols DEFAULT VALUES")
f = StringIO()
curs.copy_to(f, "manycols", columns = cols)
f.seek(0)
self.assertEqual(f.read().split(), ['\\N'] * len(cols))
f.seek(0)
curs.copy_from(f, "manycols", columns = cols)
curs.execute("select count(*) from manycols;")
self.assertEqual(curs.fetchone()[0], 2)
@skip_before_postgres(8, 2) # they don't send the count
def test_copy_rowcount(self):
curs = self.conn.cursor()
curs.copy_from(StringIO('aaa\nbbb\nccc\n'), 'tcopy', columns=['data'])
self.assertEqual(curs.rowcount, 3)
curs.copy_expert(
"copy tcopy (data) from stdin",
StringIO('ddd\neee\n'))
self.assertEqual(curs.rowcount, 2)
curs.copy_to(StringIO(), "tcopy")
self.assertEqual(curs.rowcount, 5)
curs.execute("insert into tcopy (data) values ('fff')")
curs.copy_expert("copy tcopy to stdout", StringIO())
self.assertEqual(curs.rowcount, 6)
def test_copy_rowcount_error(self):
curs = self.conn.cursor()
curs.execute("insert into tcopy (data) values ('fff')")
self.assertEqual(curs.rowcount, 1)
self.assertRaises(psycopg2.DataError,
curs.copy_from, StringIO('aaa\nbbb\nccc\n'), 'tcopy')
self.assertEqual(curs.rowcount, -1)
def test_copy_from_segfault(self):
# issue #219
script = ("""\
import psycopg2
conn = psycopg2.connect(%(dsn)r)
curs = conn.cursor()
curs.execute("create table copy_segf (id int)")
try:
curs.execute("copy copy_segf from stdin")
except psycopg2.ProgrammingError:
pass
conn.close()
""" % { 'dsn': dsn,})
proc = Popen([sys.executable, '-c', script_to_py3(script)])
proc.communicate()
self.assertEqual(0, proc.returncode)
def test_copy_to_segfault(self):
# issue #219
script = ("""\
import psycopg2
conn = psycopg2.connect(%(dsn)r)
curs = conn.cursor()
curs.execute("create table copy_segf (id int)")
try:
curs.execute("copy copy_segf to stdout")
except psycopg2.ProgrammingError:
pass
conn.close()
""" % { 'dsn': dsn,})
proc = Popen([sys.executable, '-c', script_to_py3(script)], stdout=PIPE)
proc.communicate()
self.assertEqual(0, proc.returncode)
def test_copy_from_propagate_error(self):
class BrokenRead(_base):
def read(self, size):
return 1/0
def readline(self):
return 1/0
curs = self.conn.cursor()
# It seems we cannot do this, but now at least we propagate the error
# self.assertRaises(ZeroDivisionError,
# curs.copy_from, BrokenRead(), "tcopy")
try:
curs.copy_from(BrokenRead(), "tcopy")
except Exception, e:
self.assert_('ZeroDivisionError' in str(e))
def test_copy_to_propagate_error(self):
class BrokenWrite(_base):
def write(self, data):
return 1/0
curs = self.conn.cursor()
curs.execute("insert into tcopy values (10, 'hi')")
self.assertRaises(ZeroDivisionError,
curs.copy_to, BrokenWrite(), "tcopy")
decorate_all_tests(CopyTests, skip_copy_if_green)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
|
import base64
import os
import sys
import subprocess
import StringIO
if len(sys.argv)<3:
print >> sys.stderr, "syntax: diff-opencloud <localfn> <remotehost:remotefn>"
sys.exit(-1)
srcfn = sys.argv[1]
dest = sys.argv[2]
if not ":" in dest:
print >> sys.stderr, "malformed desthost:destfn"
sys.exit(-1)
(hostname,destfn) = dest.split(":",1)
if destfn.endswith("/"):
destfn = destfn + os.path.basename(srcfn)
script = 'echo START; base64 %s' % destfn
file("/tmp/script","w").write(script)
p = subprocess.Popen(["ssh", "-A", hostname], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(enctext,stderr) = p.communicate(input=script)
if stderr:
print >> sys.stderr, stderr
if "No such file" in stderr:
sys.exit(-1)
enctext = enctext.split("START")[1]
text = base64.b64decode(enctext)
file("/tmp/diff-src","w").write(text)
os.system("diff /tmp/diff-src %s" % srcfn)
"""
SRCPATHNAME=$1
DESTHOSTNAME=$2
DESTPATHNAME=$3
echo "base64 -d -i > $DESTPATHNAME <<EOF" > /tmp/ssh-up
base64 $SRCPATHNAME >> /tmp/ssh-up
echo "EOF" >> /tmp/ssh-up
ssh -A $DESTHOSTNAME < /tmp/ssh-up
"""
|
from MaxFlow import FlowNetwork
INF = 99999
area_count = int(raw_input())
project_count = int(raw_input())
areas = {}
for i in xrange(1, area_count+1):
cost = int(raw_input())
areas["req_" + str(i)] = cost
projects_revenues = {}
projects_reqs = {}
for i in xrange(1, project_count+1):
project_str = raw_input()
project = project_str.split()
proj_id = "proy_" + str(i)
projects_revenues[proj_id] = int(project[0])
projects_reqs[proj_id] = map(lambda x: "req_" + str(x), project[1:])
g = FlowNetwork()
g.add_vertex('s')
g.add_vertex('t')
[g.add_vertex(v) for v in projects_revenues.keys()]
[g.add_vertex(v) for v in areas.keys()]
for proj in projects_revenues.keys():
print "arista", "s -> " + proj
g.add_edge('s', proj, projects_revenues[proj])
for proj in projects_reqs.keys():
reqs = projects_reqs[proj]
for area in reqs:
print "arista", proj + " -> " + area
g.add_edge(proj, area, INF)
for area in areas.keys():
print "arista", area + " -> t"
g.add_edge(area,'t', areas[area])
print "MAX_FLOW", (g.max_flow('s','t'))
print "MIN_CUT", g.find_min_cut('s', [])
print "POYECTOS REALIZADOS"
for result in g.find_min_cut('s', []):
if result.sink in projects_revenues:
print "\t", result.sink + ":", projects_revenues[result.sink]
|
"""CATER (with masks) dataset reader."""
import functools
import tensorflow as tf
COMPRESSION_TYPE = 'ZLIB'
IMAGE_SIZE = [64, 64]
SEQUENCE_LENGTH = 33
MAX_NUM_ENTITIES = 11
BYTE_FEATURES = ['image', 'mask']
def feature_descriptions(
sequence_length=SEQUENCE_LENGTH,
max_num_entities=MAX_NUM_ENTITIES):
return {
'camera_matrix': tf.io.FixedLenFeature(
[sequence_length, 4, 4], tf.float32),
'image': tf.io.FixedLenFeature([], tf.string),
'mask': tf.io.FixedLenFeature([], tf.string),
'object_positions': tf.io.FixedLenFeature(
[max_num_entities, sequence_length, 3], tf.float32)
}
def _decode(example_proto, features,
sequence_length=SEQUENCE_LENGTH,
max_num_entities=MAX_NUM_ENTITIES):
"""Parse the input `tf.Example` proto using a feature description dictionary.
Args:
example_proto: the serialized example.
features: feature descriptions to deserialize `example_proto`.
sequence_length: the length of each video in timesteps.
max_num_entities: the maximum number of entities in any frame of the video.
Returns:
A dict containing the following tensors:
- 'image': a sequence of RGB frames.
- 'mask': a mask for all entities in each frame.
- 'camera_matrix': a 4x4 matrix describing the camera pose in each frame.
- 'object_positions': 3D position for all entities in each frame.
"""
single_example = tf.io.parse_single_example(example_proto, features=features)
for key in BYTE_FEATURES:
single_example[key] = tf.io.decode_raw(single_example[key], tf.uint8)
single_example['image'] = tf.reshape(
single_example['image'],
[sequence_length] + IMAGE_SIZE + [3])
single_example['mask'] = tf.reshape(
single_example['mask'],
[sequence_length, max_num_entities] + IMAGE_SIZE + [1])
single_example['object_positions'] = tf.transpose(
single_example['object_positions'], [1, 0, 2])
return single_example
def dataset(tfrecords_path, read_buffer_size=None, map_parallel_calls=None):
"""Read, decompress, and parse TFRecords.
Args:
tfrecords_path: str or Sequence[str]. Path or paths to dataset files.
read_buffer_size: int. Number of bytes in the read buffer. See documentation
for `tf.data.TFRecordDataset.__init__`.
map_parallel_calls: int. Number of elements decoded asynchronously in
parallel. See documentation for `tf.data.Dataset.map`.
Returns:
An unbatched `tf.data.TFRecordDataset`.
"""
raw_dataset = tf.data.TFRecordDataset(
tfrecords_path, compression_type=COMPRESSION_TYPE,
buffer_size=read_buffer_size)
features = feature_descriptions()
partial_decode_fn = functools.partial(_decode, features=features)
return raw_dataset.map(
partial_decode_fn, num_parallel_calls=map_parallel_calls)
|
import unittest
class TestKaggleFootballMultiAgentEnv(unittest.TestCase):
def test_football_env(self):
from ray.rllib.env.wrappers.kaggle_wrapper import \
KaggleFootballMultiAgentEnv
env = KaggleFootballMultiAgentEnv()
obs = env.reset()
self.assertEqual(list(obs.keys()), ["agent0", "agent1"])
action_dict = {"agent0": 0, "agent1": 0}
obs, reward, done, info = env.step(action_dict)
self.assertEqual(list(obs.keys()), ["agent0", "agent1"])
self.assertEqual(reward, {"agent0": 0, "agent1": 0})
self.assertEqual(done, {
"agent0": False,
"agent1": False,
"__all__": False,
})
self.assertEqual(info, {"agent0": {}, "agent1": {}})
def test_football_env_run_30_steps(self):
from ray.rllib.env.wrappers.kaggle_wrapper import \
KaggleFootballMultiAgentEnv
env = KaggleFootballMultiAgentEnv()
# use the built-in agents in the kaggle environment
run_right_agent = env.kaggle_env.agents["run_right"]
do_nothing_agent = env.kaggle_env.agents["do_nothing"]
obs = env.reset()
self.assertEqual(list(obs.keys()), ["agent0", "agent1"])
done = {"__all__": False}
num_steps_completed = 0
while not done["__all__"] and num_steps_completed <= 30:
action0 = run_right_agent(structify(obs["agent0"]))[0]
action1 = do_nothing_agent(structify(obs["agent1"]))[0]
action_dict = {"agent0": action0, "agent1": action1}
obs, _, done, _ = env.step(action_dict)
num_steps_completed += 1
def test_kaggle_football_agent_spaces(self):
from ray.rllib.env.wrappers.kaggle_wrapper import \
KaggleFootballMultiAgentEnv
env = KaggleFootballMultiAgentEnv()
obs = env.reset()
action_space, obs_space = env.build_agent_spaces()
self.assertTrue(obs_space.contains(obs["agent0"]))
self.assertTrue(obs_space.contains(obs["agent1"]))
action_dict = {
"agent0": action_space.sample(),
"agent1": action_space.sample(),
}
obs, _, _, _ = env.step(action_dict)
self.assertTrue(obs_space.contains(obs["agent0"]))
self.assertTrue(obs_space.contains(obs["agent1"]))
if __name__ == "__main__":
from kaggle_environments.utils import structify
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
"""
Author: AsherYang
Email: ouyangfan1991@gmail.com
Date: 2017/9/22.
Desc: sinaWeibo appkey
@see: http://open.weibo.com/apps/2489615368/info/basic?action=review
use sina author 52*****18@qq.com
"""
sina_domain="https://api.weibo.com/2/"
sina_token="2.008qwMJEagKUiCa7196fa2370UKTrM"
sina_appkey = "2489615368"
sina_secret = "dbb84df92e9a9c8f8e10d9985a8038a8"
|
import os
import math
import mxnet as mx
import numpy as np
'''
Author: luoyetx@github
Github Repo: https://github.com/luoyetx/mx-lsoftmax/blob/master/lsoftmax.py
'''
os.environ['MXNET_CPU_WORKER_NTHREADS'] = '2'
class LSoftmaxOp(mx.operator.CustomOp):
'''LSoftmax from <Large-Margin Softmax Loss for Convolutional Neural Networks>
'''
def __init__(self, margin, beta, beta_min, scale):
self.margin = int(margin)
self.beta = float(beta)
self.beta_min = float(beta_min)
self.scale = float(scale)
self.c_map = []
self.k_map = []
c_m_n = lambda m, n: math.factorial(n) / math.factorial(m) / math.factorial(n-m)
for i in range(margin+1):
self.c_map.append(c_m_n(i, margin))
self.k_map.append(math.cos(i * math.pi / margin))
def find_k(self, cos_t):
'''find k for cos(theta)
'''
# for numeric issue
eps = 1e-5
le = lambda x, y: x < y or abs(x-y) < eps
for i in range(self.margin):
if le(self.k_map[i+1], cos_t) and le(cos_t, self.k_map[i]):
return i
raise ValueError('can not find k for cos_t = %f'%cos_t)
def calc_cos_mt(self, cos_t):
'''calculate cos(m*theta)
'''
cos_mt = 0
sin2_t = 1 - cos_t * cos_t
flag = -1
for p in range(self.margin / 2 + 1):
flag *= -1
cos_mt += flag * self.c_map[2*p] * pow(cos_t, self.margin-2*p) * pow(sin2_t, p)
return cos_mt
def forward(self, is_train, req, in_data, out_data, aux):
assert len(in_data) == 3
assert len(out_data) == 1
assert len(req) == 1
x, label, w = in_data
x = x.asnumpy()
w = w.asnumpy()
label = label.asnumpy()
# original fully connected
out = x.dot(w.T)
if is_train:
# large margin fully connected
n = label.shape[0]
w_norm = np.linalg.norm(w, axis=1)
x_norm = np.linalg.norm(x, axis=1)
for i in range(n):
j = yi = int(label[i])
f = out[i, yi]
cos_t = f / (w_norm[yi] * x_norm[i])
# calc k and cos_mt
k = self.find_k(cos_t)
cos_mt = self.calc_cos_mt(cos_t)
# f_i_j = (\beta * f_i_j + fo_i_j) / (1 + \beta)
fo_i_j = f
f_i_j = (pow(-1, k) * cos_mt - 2*k) * (w_norm[yi] * x_norm[i])
out[i, yi] = (f_i_j + self.beta * fo_i_j) / (1 + self.beta)
self.assign(out_data[0], req[0], mx.nd.array(out))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
assert len(in_data) == 3
assert len(out_grad) == 1
assert len(in_grad) == 3
assert len(req) == 3
x, label, w = in_data
x = x.asnumpy()
w = w.asnumpy()
label = label.asnumpy()
o_grad = out_grad[0].asnumpy()
# original fully connected
x_grad = o_grad.dot(w)
w_grad = o_grad.T.dot(x)
# large margin fully connected
n = label.shape[0] # batch size
m = w.shape[0] # number of classes
margin = self.margin # margin
feature_dim = w.shape[1] # feature dimension
cos_t = np.zeros(n, dtype=np.float32) # cos(theta)
cos_mt = np.zeros(n, dtype=np.float32) # cos(margin * theta)
sin2_t = np.zeros(n, dtype=np.float32) # sin(theta) ^ 2
fo = np.zeros(n, dtype=np.float32) # fo_i = dot(x_i, w_yi)
k = np.zeros(n, dtype=np.int32)
x_norm = np.linalg.norm(x, axis=1)
w_norm = np.linalg.norm(w, axis=1)
for i in range(n):
j = yi = int(label[i])
f = w[yi].dot(x[i])
cos_t[i] = f / (w_norm[yi] * x_norm[i])
k[i] = self.find_k(cos_t[i])
cos_mt[i] = self.calc_cos_mt(cos_t[i])
sin2_t[i] = 1 - cos_t[i]*cos_t[i]
fo[i] = f
# gradient w.r.t. x_i
for i in range(n):
# df / dx at x = x_i, w = w_yi
j = yi = int(label[i])
dcos_dx = w[yi] / (w_norm[yi]*x_norm[i]) - x[i] * fo[i] / (w_norm[yi]*pow(x_norm[i], 3))
dsin2_dx = -2 * cos_t[i] * dcos_dx
dcosm_dx = margin*pow(cos_t[i], margin-1) * dcos_dx # p = 0
flag = 1
for p in range(1, margin / 2 + 1):
flag *= -1
dcosm_dx += flag * self.c_map[2*p] * ( \
p*pow(cos_t[i], margin-2*p)*pow(sin2_t[i], p-1)*dsin2_dx + \
(margin-2*p)*pow(cos_t[i], margin-2*p-1)*pow(sin2_t[i], p)*dcos_dx)
df_dx = (pow(-1, k[i]) * cos_mt[i] - 2*k[i]) * w_norm[yi] / x_norm[i] * x[i] + \
pow(-1, k[i]) * w_norm[yi] * x_norm[i] * dcosm_dx
alpha = 1 / (1 + self.beta)
x_grad[i] += alpha * o_grad[i, yi] * (df_dx - w[yi])
# gradient w.r.t. w_j
for j in range(m):
dw = np.zeros(feature_dim, dtype=np.float32)
for i in range(n):
yi = int(label[i])
if yi == j:
# df / dw at x = x_i, w = w_yi and yi == j
dcos_dw = x[i] / (w_norm[yi]*x_norm[i]) - w[yi] * fo[i] / (x_norm[i]*pow(w_norm[yi], 3))
dsin2_dw = -2 * cos_t[i] * dcos_dw
dcosm_dw = margin*pow(cos_t[i], margin-1) * dcos_dw # p = 0
flag = 1
for p in range(1, margin / 2 + 1):
flag *= -1
dcosm_dw += flag * self.c_map[2*p] * ( \
p*pow(cos_t[i], margin-2*p)*pow(sin2_t[i], p-1)*dsin2_dw + \
(margin-2*p)*pow(cos_t[i], margin-2*p-1)*pow(sin2_t[i], p)*dcos_dw)
df_dw_j = (pow(-1, k[i]) * cos_mt[i] - 2*k[i]) * x_norm[i] / w_norm[yi] * w[yi] + \
pow(-1, k[i]) * w_norm[yi] * x_norm[i] * dcosm_dw
dw += o_grad[i, yi] * (df_dw_j - x[i])
alpha = 1 / (1 + self.beta)
w_grad[j] += alpha * dw
self.assign(in_grad[0], req[0], mx.nd.array(x_grad))
self.assign(in_grad[2], req[2], mx.nd.array(w_grad))
# dirty hack, should also work for multi devices
self.beta *= self.scale
self.beta = max(self.beta, self.beta_min)
@mx.operator.register("LSoftmax")
class LSoftmaxProp(mx.operator.CustomOpProp):
def __init__(self, num_hidden, beta, margin, scale=1, beta_min=0):
super(LSoftmaxProp, self).__init__(need_top_grad=True)
self.margin = int(margin)
self.num_hidden = int(num_hidden)
self.beta = float(beta)
self.beta_min = float(beta_min)
self.scale = float(scale)
def list_arguments(self):
return ['data', 'label', 'weight']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
assert len(in_shape) == 3, "LSoftmaxOp input data: [data, label, weight]"
dshape = in_shape[0]
lshape = in_shape[1]
assert len(dshape) == 2, "data shape should be (batch_size, feature_dim)"
assert len(lshape) == 1, "label shape should be (batch_size,)"
wshape = (self.num_hidden, dshape[1])
oshape = (dshape[0], self.num_hidden)
return [dshape, lshape, wshape], [oshape,], []
def infer_type(self, in_type):
return [in_type[0]]*len(in_type), [in_type[0]]*len(self.list_outputs()), \
[in_type[0]]*len(self.list_auxiliary_states())
def create_operator(self, ctx, shapes, dtypes):
return LSoftmaxOp(margin=self.margin, beta=self.beta, beta_min=self.beta_min, scale=self.scale)
def test_op():
"""test LSoftmax Operator
"""
# build symbol
batch_size = cmd_args.batch_size
embedding_dim = cmd_args.embedding_dim
num_classes = cmd_args.num_classes
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
weight = mx.sym.Variable('weight')
args = {
'data': np.random.normal(0, 1, (batch_size, embedding_dim)),
'weight': np.random.normal(0, 1, (num_classes, embedding_dim)),
'label': np.random.choice(num_classes, batch_size),
}
if cmd_args.op_impl == 'py':
symbol = mx.sym.Custom(data=data, label=label, weight=weight, num_hidden=10,
beta=cmd_args.beta, margin=cmd_args.margin, scale=cmd_args.scale,
op_type='LSoftmax', name='lsoftmax')
else:
symbol = mx.sym.LSoftmax(data=data, label=label, weight=weight, num_hidden=num_classes,
margin=cmd_args.margin, beta=cmd_args.beta, scale=cmd_args.scale,
name='lsoftmax')
data_shape = (batch_size, embedding_dim)
label_shape = (batch_size,)
weight_shape = (num_classes, embedding_dim)
ctx = mx.cpu() if cmd_args.op_impl == 'py' else mx.gpu()
executor = symbol.simple_bind(ctx=ctx, data=data_shape, label=label_shape, weight=weight_shape)
def forward(data, label, weight):
data = mx.nd.array(data, ctx=ctx)
label = mx.nd.array(label, ctx=ctx)
weight = mx.nd.array(weight, ctx=ctx)
executor.forward(is_train=True, data=data, label=label, weight=weight)
return executor.output_dict['lsoftmax_output'].asnumpy()
def backward(out_grad):
executor.backward(out_grads=[mx.nd.array(out_grad, ctx=ctx)])
return executor.grad_dict
def gradient_check(name, i, j):
'''gradient check on x[i, j]
'''
eps = 1e-4
threshold = 1e-2
reldiff = lambda a, b: abs(a-b) / (abs(a) + abs(b))
# calculate by backward
output = forward(data=args['data'], weight=args['weight'], label=args['label'])
grad_dict = backward(output)
grad = grad_dict[name].asnumpy()[i, j]
# calculate by \delta f / 2 * eps
loss = lambda x: np.square(x).sum() / 2
args[name][i, j] -= eps
loss1 = loss(forward(data=args['data'], weight=args['weight'], label=args['label']))
args[name][i, j] += 2 * eps
loss2 = loss(forward(data=args['data'], weight=args['weight'], label=args['label']))
grad_expect = (loss2 - loss1) / (2 * eps)
# check
rel_err = reldiff(grad_expect, grad)
if rel_err > threshold:
print 'gradient check failed'
print 'expected %lf given %lf, relative error %lf'%(grad_expect, grad, rel_err)
return False
else:
print 'gradient check pass'
return True
# test forward
output = forward(data=args['data'], weight=args['weight'], label=args['label'])
diff = args['data'].dot(args['weight'].T) - output
# test backward
# gradient check on data
data_gc_pass = 0
for i in range(args['data'].shape[0]):
for j in range(args['data'].shape[1]):
print 'gradient check on data[%d, %d]'%(i, j)
if gradient_check('data', i, j):
data_gc_pass += 1
# gradient check on weight
weight_gc_pass = 0
for i in range(args['weight'].shape[0]):
for j in range(args['weight'].shape[1]):
print 'gradient check on weight[%d, %d]'%(i, j)
if gradient_check('weight', i, j):
weight_gc_pass += 1
print '===== Summary ====='
print 'gradient on data pass ratio is %lf'%(float(data_gc_pass) / args['data'].size)
print 'gradient on weight pass ratio is %lf'%(float(weight_gc_pass) / args['weight'].size)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, default=32, help="test batch size")
parser.add_argument('--num-classes', type=int, default=10, help="test number of classes")
parser.add_argument('--embedding-dim', type=int, default=3, help="test embedding dimension")
parser.add_argument('--margin', type=int, default=2, help="test lsoftmax margin")
parser.add_argument('--beta', type=float, default=10, help="test lsoftmax beta")
parser.add_argument('--scale', type=float, default=1, help="beta scale of every mini-batch")
parser.add_argument('--op-impl', type=str, choices=['py', 'cpp'], default='py', help="test op implementation")
cmd_args = parser.parse_args()
print cmd_args
# check
if cmd_args.op_impl == 'cpp':
try:
op_creator = mx.sym.LSoftmax
except AttributeError:
print 'No cpp operator for LSoftmax, Skip test'
import sys
sys.exit(0)
test_op()
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class AuditLogConfig(AppConfig):
label = 'auditlog'
name = 'cobra.apps.auditlog'
verbose_name = _('Audit Log')
|
__version__ = '0.9.dev1'
|
import time
import rospy
from std_msgs.msg import String
from sensor_msgs.msg import Joy
def callback(data):
#data.axes containes dpad up button info 1 for pressed,
#0 for not pressed
move = data.buttons[1]
speed = 255
#ascii u=117 (for up) and ascii d=100 (for down)
if move == 1:
seconds = rospy.get_time()
#lower bin
while (rospy.get_time() -seconds) < 5:
#writeNumber(102, speed)
time.sleep(0.01)
#received = str(readNumber())
received = str("lower bin")
rospy.loginfo(received)
pub.publish(received)
seconds = rospy.get_time()
#pause
while (rospy.get_time() -seconds) < 2:
#writeNumber(32, speed)
time.sleep(0.01)
#received = str(readNumber())
received = str("pause")
rospy.loginfo(received)
pub.publish(received)
seconds = rospy.get_time()
#raise bin
while (rospy.get_time() -seconds) < 5:
#writeNumber(114, speed)
time.sleep(0.01)
#received = str(readNumber())
received = str("raise bin")
rospy.loginfo(received)
pub.publish(received)
seconds = rospy.get_time()
#pause
while (rospy.get_time() -seconds) < 2:
#writeNumber(32, speed)
time.sleep(0.01)
#received = str(readNumber())
received = str("pause")
rospy.loginfo(received)
pub.publish(received)
move = 0
time.sleep(2)
def run():
global pub
rospy.Subscriber("joy", Joy, callback)
while(rospy.is_shutdown() == False):
time.sleep(2)
pub = rospy.Publisher('chatter', String, queue_size=10)
rospy.init_node('talker', anonymous=True)
rospy.spin()
if __name__ == '__main__':
try:
run()
except rospy.ROSInterruptException:
pass
|
from .api import * # NOQA
from .index import * # NOQA
from .inventory import * # NOQA
from .setup import * # NOQA
from .targets import * # NOQA
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.