hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9b710777f920eefd3c308238ecf035afa9798731 | 2,585 | py | Python | roadsandlibraries.py | rresender/python-samples | 2fb2330f59f3cc0c6b975381e22268a758773b69 | [
"MIT"
] | null | null | null | roadsandlibraries.py | rresender/python-samples | 2fb2330f59f3cc0c6b975381e22268a758773b69 | [
"MIT"
] | null | null | null | roadsandlibraries.py | rresender/python-samples | 2fb2330f59f3cc0c6b975381e22268a758773b69 | [
"MIT"
] | null | null | null | from collections import defaultdict
class Graph:
def __init__(self, size):
# self.graph = defaultdict(list)
self.graph = {x: list() for x in range(1, size + 1)}
def addEdge(self, u, v):
self.graph[u].append(v)
self.graph[v].append(u)
def dfs_util(self, v, visited, adjacents=None):
if adjacents == None:
adjacents = list()
visited[v] = True
adjacents.append(v)
for i in self.graph[v]:
if visited[i] == False:
self.dfs_util(i, visited, adjacents)
return adjacents
def dfs(self):
length = len(self.graph) + 1
visited =[False] * (length)
clusters = list()
for i in range(1, length):
if visited[i] == False:
clusters.append(self.dfs_util(i, visited))
return clusters
def roadsAndLibraries(n, c_lib, c_road, cities):
g = Graph(n)
for u,v in cities:
g.addEdge(min(u, v), max(u, v))
if c_lib < c_road:
return c_lib * n
clusters = g.dfs()
print(clusters)
total = 0
for c in clusters:
total += len(c) - 1
return (len(clusters) * c_lib) + (total * c_road)
if __name__ == '__main__':
# 2
# 3 3 2 1
# 1 2
# 3 1
# 2 3
# 6 6 2 5
# 1 3
# 3 4
# 2 4
# 1 2
# 2 3
# 5 6
# q = 1
# for q_itr in range(q):
# nmC_libC_road = "6 6 2 5".split()
# n = int(nmC_libC_road[0])
# m = int(nmC_libC_road[1])
# c_lib = int(nmC_libC_road[2])
# c_road = int(nmC_libC_road[3])
# cities = [(1, 3), (3, 4), (2, 4), (1, 2), (2, 3), (5, 6)]
# result = roadsAndLibraries(n, c_lib, c_road, cities)
# print(str(result) + '\n')
# q = 1
# for q_itr in range(q):
# nmC_libC_road = "3 3 2 1".split()
# n = int(nmC_libC_road[0])
# m = int(nmC_libC_road[1])
# c_lib = int(nmC_libC_road[2])
# c_road = int(nmC_libC_road[3])
# cities = [(1,2), (3,1), (2, 3)]
# result = roadsAndLibraries(n, c_lib, c_road, cities)
# print(str(result) + '\n')
# 1
# 5 3 6 1
# 1 2
# 1 3
# 1 4
q = 1
for q_itr in range(q):
nmC_libC_road = "5 3 6 1".split()
n = int(nmC_libC_road[0])
m = int(nmC_libC_road[1])
c_lib = int(nmC_libC_road[2])
c_road = int(nmC_libC_road[3])
cities = [(1,2), (1,3), (1, 4)]
result = roadsAndLibraries(n, c_lib, c_road, cities)
print(str(result) + '\n')
| 19.007353 | 67 | 0.49942 |
e92a3f6a71c7225b293a941020749046d2c628d5 | 12,558 | py | Python | fairseq/model_parallel/modules/multihead_attention.py | lukeiscoding/fairseq | 3748359f597a4785b7769af588b4d939138b177a | [
"MIT"
] | 651 | 2015-03-14T23:18:44.000Z | 2022-01-19T14:08:28.000Z | fairseq/model_parallel/modules/multihead_attention.py | lukeiscoding/fairseq | 3748359f597a4785b7769af588b4d939138b177a | [
"MIT"
] | 362 | 2015-01-26T16:20:28.000Z | 2022-01-26T06:19:23.000Z | fairseq/model_parallel/modules/multihead_attention.py | lukeiscoding/fairseq | 3748359f597a4785b7769af588b4d939138b177a | [
"MIT"
] | 169 | 2015-09-28T17:06:28.000Z | 2021-12-18T16:02:49.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from fairseq import utils
from torch import Tensor, nn
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.fairseq_dropout import FairseqDropout
try:
from fairseq.model_parallel.megatron.mpu import (
get_cuda_rng_tracker,
get_model_parallel_world_size,
ColumnParallelLinear,
RowParallelLinear,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
@with_incremental_state
class ModelParallelMultiheadAttention(nn.Module):
"""Model parallel Multi-headed attention.
This performs the Multi-headed attention over multiple gpus.
See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
self_attention=False,
encoder_decoder_attention=False,
):
super().__init__()
if not has_megatron_submodule:
raise ImportError(
'\n\nPlease install the megatron submodule:'
'\n\n git submodule update --init '
'fairseq/model_parallel/megatron'
)
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.model_parallel_size = get_model_parallel_world_size()
self.num_heads_partition = num_heads // self.model_parallel_size
assert (
self.num_heads_partition * self.model_parallel_size == num_heads
), "Number of heads must be divisble by model parallel size"
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and value to be of the same size"
)
self.k_proj = ColumnParallelLinear(self.kdim, embed_dim, bias=bias, gather_output=False)
self.v_proj = ColumnParallelLinear(self.vdim, embed_dim, bias=bias, gather_output=False)
self.q_proj = ColumnParallelLinear(embed_dim, embed_dim, bias=bias, gather_output=False)
self.out_proj = RowParallelLinear(embed_dim, embed_dim, bias=bias, input_is_parallel=True)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
**unused_kwargs,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
"""
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads_partition, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads_partition, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads_partition, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads_partition, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads_partition, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads_partition, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = ModelParallelMultiheadAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
saved_state["prev_key"] = k.view(bsz, self.num_heads_partition, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.num_heads_partition, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
src_len = k.size(1)
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [bsz * self.num_heads_partition, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads_partition, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf")
)
attn_weights = attn_weights.view(bsz * self.num_heads_partition, tgt_len, src_len)
attn_weights_float = utils.softmax(
attn_weights, dim=-1
)
attn_weights = attn_weights_float.type_as(attn_weights)
with get_cuda_rng_tracker().fork():
attn_probs = self.dropout_module(attn_weights)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads_partition, tgt_len, self.head_dim]
embed_dim_partition = embed_dim // self.model_parallel_size
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim_partition)
attn = self.out_proj(attn)
# return attn_weights None to keep the return type same as single gpu multihead attention
# This will be deprecated.
attn_weights: Optional[Tensor] = None
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
filler = torch.zeros(batch_size, src_len - prev_key_padding_mask.size(1))
if prev_key_padding_mask.is_cuda:
filler = filler.cuda()
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
elif key_padding_mask is not None:
filler = torch.zeros(batch_size, src_len - key_padding_mask.size(1))
if key_padding_mask.is_cuda:
filler = filler.cuda()
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
def reorder_incremental_state(
self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order
):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
if input_buffer[k] is not None:
input_buffer[k] = input_buffer[k].index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
| 40.121406 | 98 | 0.616659 |
9171e7a453ab5b2b56f0f5146063b8fc5aa6c842 | 5,944 | py | Python | sdk/python/pulumi_sumologic/get_collector.py | pulumi/pulumi-sumologic | 962fa056ee4b96e61a200e7bf2308bfad723c3af | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-10-13T03:50:41.000Z | 2021-10-13T03:50:41.000Z | sdk/python/pulumi_sumologic/get_collector.py | pulumi/pulumi-sumologic | 962fa056ee4b96e61a200e7bf2308bfad723c3af | [
"ECL-2.0",
"Apache-2.0"
] | 28 | 2021-05-21T11:00:45.000Z | 2022-03-31T15:47:13.000Z | sdk/python/pulumi_sumologic/get_collector.py | pulumi/pulumi-sumologic | 962fa056ee4b96e61a200e7bf2308bfad723c3af | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'GetCollectorResult',
'AwaitableGetCollectorResult',
'get_collector',
'get_collector_output',
]
@pulumi.output_type
class GetCollectorResult:
"""
A collection of values returned by getCollector.
"""
def __init__(__self__, category=None, description=None, fields=None, id=None, name=None, timezone=None):
if category and not isinstance(category, str):
raise TypeError("Expected argument 'category' to be a str")
pulumi.set(__self__, "category", category)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if fields and not isinstance(fields, dict):
raise TypeError("Expected argument 'fields' to be a dict")
pulumi.set(__self__, "fields", fields)
if id and not isinstance(id, int):
raise TypeError("Expected argument 'id' to be a int")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if timezone and not isinstance(timezone, str):
raise TypeError("Expected argument 'timezone' to be a str")
pulumi.set(__self__, "timezone", timezone)
@property
@pulumi.getter
def category(self) -> str:
return pulumi.get(self, "category")
@property
@pulumi.getter
def description(self) -> str:
return pulumi.get(self, "description")
@property
@pulumi.getter
def fields(self) -> Mapping[str, str]:
return pulumi.get(self, "fields")
@property
@pulumi.getter
def id(self) -> int:
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def timezone(self) -> str:
return pulumi.get(self, "timezone")
class AwaitableGetCollectorResult(GetCollectorResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCollectorResult(
category=self.category,
description=self.description,
fields=self.fields,
id=self.id,
name=self.name,
timezone=self.timezone)
def get_collector(id: Optional[int] = None,
name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCollectorResult:
"""
Provides a way to retrieve Sumo Logic collector details (id, names, etc) for a collector.
## Example Usage
```python
import pulumi
import pulumi_sumologic as sumologic
this = sumologic.get_collector(name="MyCollector")
```
```python
import pulumi
import pulumi_sumologic as sumologic
that = sumologic.get_collector(id=1234567890)
```
A collector can be looked up by either `id` or `name`. One of those attributes needs to be specified.
If both `id` and `name` have been specified, `id` takes precedence.
## Attributes reference
The following attributes are exported:
- `id` - The internal ID of the collector. This can be used to attach sources to the collector.
- `name` - The name of the collector.
- `description` - The description of the collector.
- `category` - The default source category for any source attached to this collector.
- `timezone` - The time zone to use for this collector. The value follows the [tzdata][2] naming convention.
"""
__args__ = dict()
__args__['id'] = id
__args__['name'] = name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('sumologic:index/getCollector:getCollector', __args__, opts=opts, typ=GetCollectorResult).value
return AwaitableGetCollectorResult(
category=__ret__.category,
description=__ret__.description,
fields=__ret__.fields,
id=__ret__.id,
name=__ret__.name,
timezone=__ret__.timezone)
@_utilities.lift_output_func(get_collector)
def get_collector_output(id: Optional[pulumi.Input[Optional[int]]] = None,
name: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetCollectorResult]:
"""
Provides a way to retrieve Sumo Logic collector details (id, names, etc) for a collector.
## Example Usage
```python
import pulumi
import pulumi_sumologic as sumologic
this = sumologic.get_collector(name="MyCollector")
```
```python
import pulumi
import pulumi_sumologic as sumologic
that = sumologic.get_collector(id=1234567890)
```
A collector can be looked up by either `id` or `name`. One of those attributes needs to be specified.
If both `id` and `name` have been specified, `id` takes precedence.
## Attributes reference
The following attributes are exported:
- `id` - The internal ID of the collector. This can be used to attach sources to the collector.
- `name` - The name of the collector.
- `description` - The description of the collector.
- `category` - The default source category for any source attached to this collector.
- `timezone` - The time zone to use for this collector. The value follows the [tzdata][2] naming convention.
"""
...
| 33.393258 | 131 | 0.658816 |
0a49cfde7d4289ca21ab22bfecd48c932d068308 | 233 | py | Python | app/main/errors.py | joseck12/one-minute-pitch | 2b74500c08b60bbf08b94c0b400479b2c6ef594a | [
"MIT",
"Unlicense"
] | null | null | null | app/main/errors.py | joseck12/one-minute-pitch | 2b74500c08b60bbf08b94c0b400479b2c6ef594a | [
"MIT",
"Unlicense"
] | null | null | null | app/main/errors.py | joseck12/one-minute-pitch | 2b74500c08b60bbf08b94c0b400479b2c6ef594a | [
"MIT",
"Unlicense"
] | null | null | null | from flask import render_template
from . import main
from ..import db
@main.app_errorhandler(404)
def four_Ow_four(error):
'''
Function to render the 404 error page
'''
return render_template('fourOwfour.html'), 404
| 21.181818 | 50 | 0.72103 |
5103ce5a77c8e8d29d92c5dd654ec1e4c2a90896 | 6,979 | py | Python | examples/contrib/gp/sv-dkl.py | fehiepsi/pyro | c2a88c3c3c2ff58802026377c08be7f7c8e59785 | [
"MIT"
] | null | null | null | examples/contrib/gp/sv-dkl.py | fehiepsi/pyro | c2a88c3c3c2ff58802026377c08be7f7c8e59785 | [
"MIT"
] | 1 | 2017-12-15T14:01:01.000Z | 2017-12-17T03:09:06.000Z | examples/contrib/gp/sv-dkl.py | fehiepsi/pyro | c2a88c3c3c2ff58802026377c08be7f7c8e59785 | [
"MIT"
] | null | null | null | """
An example to use Gaussian Process (GP) module to classify MNIST. Follow the idea
from reference [1], we will combine a convolutional neural network with a RBF kernel
to create a "deep" kernel. Then we train a SparseVariationalGP model using SVI. Note
that the model is trained end-to-end in mini-batch.
With default arguments (trained on CPU), the accuracy is 98.59%.
Reference:
[1] Stochastic Variational Deep Kernel Learning
Andrew G. Wilson, Zhiting Hu, Ruslan R. Salakhutdinov, Eric P. Xing
"""
# Code adapted from https://github.com/pytorch/examples/tree/master/mnist
from __future__ import absolute_import, division, print_function
import argparse
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
import pyro
import pyro.contrib.gp as gp
import pyro.infer as infer
from pyro.contrib.examples.util import get_data_loader, get_data_directory
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2(x), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def train(args, train_loader, gpmodule, optimizer, loss_fn, epoch):
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
gpmodule.set_data(data, target)
optimizer.zero_grad()
loss = loss_fn(gpmodule.model, gpmodule.guide)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print("Train Epoch: {:2d} [{:5d}/{} ({:2.0f}%)]\tLoss: {:.6f}"
.format(epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss))
def test(args, test_loader, gpmodule):
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
# get prediction of GP model on new data
f_loc, f_var = gpmodule(data)
# use its likelihood to give prediction class
pred = gpmodule.likelihood(f_loc, f_var)
# compare prediction and target to count accuaracy
correct += pred.eq(target).long().cpu().sum()
print("\nTest set: Accuracy: {}/{} ({:.0f}%)\n"
.format(correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset)))
def main(args):
data_dir = args.data_dir if args.data_dir is not None else get_data_directory(__file__)
train_loader = get_data_loader(dataset_name='MNIST',
data_dir=data_dir,
batch_size=args.batch_size,
dataset_transforms=[transforms.Normalize((0.1307,), (0.3081,))],
is_training_set=True,
shuffle=True)
test_loader = get_data_loader(dataset_name='MNIST',
data_dir=data_dir,
batch_size=args.batch_size,
dataset_transforms=[transforms.Normalize((0.1307,), (0.3081,))],
is_training_set=False,
shuffle=True)
cnn = CNN()
# Create deep kernel by warping RBF with CNN.
# CNN will transform a high dimension image into a low dimension 2D tensors for RBF kernel.
# This kernel accepts inputs are inputs of CNN and gives outputs are covariance matrix of RBF
# on outputs of CNN.
rbf = gp.kernels.RBF(input_dim=10, lengthscale=torch.ones(10))
deep_kernel = gp.kernels.Warping(rbf, iwarping_fn=cnn)
# init inducing points (taken randomly from dataset)
Xu = next(iter(train_loader))[0][:args.num_inducing]
# use MultiClass likelihood for 10-class classification problem
likelihood = gp.likelihoods.MultiClass(num_classes=10)
# Because we use Categorical distribution in MultiClass likelihood, we need GP model returns
# a list of probabilities of each class. Hence it is required to use latent_shape = 10.
# Turns on "whiten" flag will help optimization for variational models.
gpmodule = gp.models.VariationalSparseGP(X=Xu, y=None, kernel=deep_kernel, Xu=Xu,
likelihood=likelihood, latent_shape=torch.Size([10]),
num_data=60000, whiten=True)
if args.cuda:
gpmodule.cuda()
optimizer = torch.optim.Adam(gpmodule.parameters(), lr=args.lr)
elbo = infer.JitTraceMeanField_ELBO() if args.jit else infer.TraceMeanField_ELBO()
loss_fn = elbo.differentiable_loss
for epoch in range(1, args.epochs + 1):
start_time = time.time()
train(args, train_loader, gpmodule, optimizer, loss_fn, epoch)
with torch.no_grad():
test(args, test_loader, gpmodule)
print("Amount of time spent for epoch {}: {}s\n"
.format(epoch, int(time.time() - start_time)))
if __name__ == '__main__':
assert pyro.__version__.startswith('0.3.0')
parser = argparse.ArgumentParser(description='Pyro GP MNIST Example')
parser.add_argument('--data-dir', type=str, default=None, metavar='PATH',
help='default directory to cache MNIST data')
parser.add_argument('--num-inducing', type=int, default=70, metavar='N',
help='number of inducing input (default: 70)')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--jit', action='store_true', default=False,
help='enables PyTorch jit')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
pyro.set_rng_seed(args.seed)
main(args)
| 43.893082 | 99 | 0.622152 |
acf179ff5d99bf7448a00e61737a029c826e656f | 3,333 | py | Python | src/unv/deploy/components/app/__init__.py | c137digital/unv_deploy | 7cb8e6902f4b3eff7e095f83e71d10582f023ab9 | [
"MIT"
] | null | null | null | src/unv/deploy/components/app/__init__.py | c137digital/unv_deploy | 7cb8e6902f4b3eff7e095f83e71d10582f023ab9 | [
"MIT"
] | null | null | null | src/unv/deploy/components/app/__init__.py | c137digital/unv_deploy | 7cb8e6902f4b3eff7e095f83e71d10582f023ab9 | [
"MIT"
] | null | null | null | import asyncio
from pathlib import Path
from typing import Generator
from watchgod import awatch
from ...tasks import DeployTasks, nohost, register
from ...settings import DeployComponentSettings
from ..systemd import SystemdTasksMixin
class AppSettings(DeployComponentSettings):
NAME = 'app'
SCHEMA = {
'instance': {'type': 'integer'},
'bin': {'type': 'string', 'required': True},
'user': {'type': 'string', 'required': False},
'settings': {'type': 'string'},
'systemd': SystemdTasksMixin.SCHEMA,
'watch': {
'type': 'list',
'schema': {'type': 'dict', 'schema': {
'local': {'type': 'string'},
'remote': {'type': 'string'},
'exclude': {'type': 'list', 'schema': {'type': 'string'}}
}}
},
}
DEFAULT = {
'bin': 'app.sh',
'instance': 1,
'settings': '',
'systemd': {
'template': 'app.service',
'name': '{settings.NAME}_{instance}.service',
'boot': True,
'type': 'simple',
'instances': {'count': 0, 'percent': 0},
'context': {
'limit_nofile': 2000,
'description': "Application description",
}
},
'watch': [
{'local': './somedir', 'remote': './some'}
]
}
@property
def bin(self):
bin_path = self._data['bin'].format(settings=self)
if not bin_path.startswith('/'):
return self.home_abs / bin_path
return Path(bin_path)
@property
def module(self):
return self._data['settings']
@property
def instance(self):
return self._data['instance']
@property
def watch_dirs(self):
for info in self._data['watch']:
yield {
'local': Path(info['local']),
'remote': info['remote'],
'exclude': info.get('exclude', [])
}
SETTINGS = AppSettings()
class AppTasks(DeployTasks, SystemdTasksMixin):
SETTINGS = AppSettings
@register
@nohost
async def watch(self):
await asyncio.gather(*[
self._watch_and_sync_dir(dir_info, task)
for dir_info in self.settings.watch_dirs
for task in self.get_all_manager_tasks(self.get_namespace())
])
async def _watch_and_sync_dir(self, dir_info, task):
async for _ in awatch(dir_info['local']):
with self._set_host(task.host), self._set_user(task.user):
await self._upload(
dir_info['local'], dir_info['remote'],
exclude=dir_info['exclude']
)
await self.restart()
async def build(self):
"""Define build instructions for your app"""
await self._apt_install('rsync')
await self._create_user()
@register
async def sync(self, type_=''):
await self._upload(
self.settings.local_root / self.settings.bin.name,
self.settings.home_abs
)
await self._run(f'chmod +x {self.settings.bin}')
await self._sync_systemd_units()
@register
async def setup(self):
await self.build()
await self.sync()
await self.start()
| 28.008403 | 73 | 0.533753 |
807d0a01a46ffbd00440a5c498d0682c7d537cdc | 3,446 | py | Python | lib/python/frugal/tests/gae/server/test_webapp2_handler.py | ariasheets-wk/frugal | 81d41af7fb573c1f97afea99a1b4dfa6ccae29e8 | [
"Apache-2.0"
] | 144 | 2017-08-17T15:51:58.000Z | 2022-01-14T21:36:55.000Z | lib/python/frugal/tests/gae/server/test_webapp2_handler.py | ariasheets-wk/frugal | 81d41af7fb573c1f97afea99a1b4dfa6ccae29e8 | [
"Apache-2.0"
] | 930 | 2017-08-17T17:53:30.000Z | 2022-03-28T14:04:49.000Z | lib/python/frugal/tests/gae/server/test_webapp2_handler.py | ariasheets-wk/frugal | 81d41af7fb573c1f97afea99a1b4dfa6ccae29e8 | [
"Apache-2.0"
] | 77 | 2017-08-17T15:54:31.000Z | 2021-12-25T15:18:34.000Z | # Copyright 2017 Workiva
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import unittest
import mock
from thrift.protocol import TBinaryProtocol
import webapp2
import webtest
from frugal.protocol import FProtocolFactory
from frugal.gae.server.webapp2_handler import new_webapp2_handler
class FWebapp2HttpHandlerTest(unittest.TestCase):
def setUp(self):
self.mock_processor = mock.Mock()
prot_factory = FProtocolFactory(
TBinaryProtocol.TBinaryProtocolFactory())
app = webapp2.WSGIApplication([('/frugal', new_webapp2_handler(
self.mock_processor, prot_factory))])
self.test_app = webtest.TestApp(app)
self.request_data = bytearray([2, 3, 4])
self.request_frame = bytearray([0, 0, 0, 3]) + self.request_data
self.request_payload = base64.b64encode(self.request_frame)
self.response_data = bytearray([6, 7, 8, 9, 10, 11])
self.response_frame = bytearray([0, 0, 0, 6]) + self.response_data
def process_data(_, oprot):
oprot.get_transport().write(self.response_data)
self.mock_processor.process.side_effect = process_data
def test_basic(self):
response = self.test_app.post('/frugal', params=self.request_payload)
self.assertEqual(200, response.status_int)
self.assertTrue(self.mock_processor.process.called)
iprot, _ = self.mock_processor.process.call_args[0]
self.assertEqual(self.request_data, iprot.get_transport().getvalue())
expected_response_payload = base64.b64encode(self.response_frame)
self.assertEqual(expected_response_payload, response.normal_body)
self.assertEqual('application/x-frugal',
response.headers['content-type'])
self.assertEqual('base64',
response.headers['content-transfer-encoding'])
def test_response_too_large(self):
headers = {
'x-frugal-payload-limit': '5',
}
response = self.test_app.post('/frugal', params=self.request_payload,
headers=headers, status='*')
self.assertEqual(413, response.status_int)
self.assertTrue(self.mock_processor.process.called)
iprot, _ = self.mock_processor.process.call_args[0]
self.assertEqual(self.request_data, iprot.get_transport().getvalue())
def test_request_too_short(self):
request_frame = base64.b64encode(bytearray([0]))
response = self.test_app.post('/frugal', params=request_frame,
status='*')
self.assertEqual(400, response.status_int)
def test_frame_size_mismatch(self):
request_frame = base64.b64encode(bytearray([0, 0, 0, 10, 1, 1, 2]))
response = self.test_app.post('/frugal', params=request_frame,
status='*')
self.assertEqual(400, response.status_int)
| 41.02381 | 77 | 0.676146 |
374d51f5f0b88f07e77e4d9374be85d7bf14ffdf | 1,425 | py | Python | preprocess/video.py | NTHU-CVLab/ActivityProps | 68392fb38d87afdc92f6f054e83e9166121401a5 | [
"Apache-2.0"
] | 1 | 2017-10-31T15:36:55.000Z | 2017-10-31T15:36:55.000Z | preprocess/video.py | NTHU-CVLab/ActivityProps | 68392fb38d87afdc92f6f054e83e9166121401a5 | [
"Apache-2.0"
] | null | null | null | preprocess/video.py | NTHU-CVLab/ActivityProps | 68392fb38d87afdc92f6f054e83e9166121401a5 | [
"Apache-2.0"
] | null | null | null | import cv2
import numpy as np
from scipy.misc import imresize
class Video:
def __init__(self, path):
self.path = path
self.cap = cv2.VideoCapture(self.path)
self.frames = None
def __enter__(self):
if not self.cap.isOpened():
raise Exception('Cannot open video: {}'.format(self.path))
return self
def __len__(self):
if not self.frames:
self.load()
return len(self.frames)
def load(self):
frames = []
while True:
ret, frame = self.cap.read()
if not ret:
break
frames.append(frame)
self.frames = frames
return self
def read(self):
return self.load().frames
def resize(self, size=None):
if size:
self.frames = [imresize(f, (size[1], size[0])) for f in self.frames]
return self.frames
@classmethod
def np_array(cls, frames, dim_ordering='th'):
video = np.array(frames, dtype=np.float32)
if dim_ordering == 'th':
video = video.transpose(3, 0, 1, 2)
return video
def __exit__(self, exc_type, exc_val, exc_tb):
self.cap.release()
def save_video(filepath, fps, w, h, data):
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(filepath, fourcc, fps, (w, h))
for frame in data:
out.write(frame)
out.release()
| 24.568966 | 80 | 0.569825 |
cd99b818f93e31d49ef01ce8a00e530ea6da7233 | 3,031 | py | Python | toolchain/riscv/MSYS/python/Lib/test/time_hashlib.py | zhiqiang-hu/bl_iot_sdk | 154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d | [
"Apache-2.0"
] | 207 | 2018-10-01T08:53:01.000Z | 2022-03-14T12:15:54.000Z | toolchain/riscv/MSYS/python/Lib/test/time_hashlib.py | zhiqiang-hu/bl_iot_sdk | 154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d | [
"Apache-2.0"
] | 8 | 2019-06-29T14:18:51.000Z | 2022-02-19T07:30:27.000Z | toolchain/riscv/MSYS/python/Lib/test/time_hashlib.py | zhiqiang-hu/bl_iot_sdk | 154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d | [
"Apache-2.0"
] | 53 | 2019-03-12T16:50:21.000Z | 2022-03-15T23:16:18.000Z | # It's intended that this script be run by hand. It runs speed tests on
# hashlib functions; it does not test for correctness.
import sys
import time
import hashlib
def creatorFunc():
raise RuntimeError("eek, creatorFunc not overridden")
def test_scaled_msg(scale, name):
iterations = 106201//scale * 20
longStr = b'Z'*scale
localCF = creatorFunc
start = time.perf_counter()
for f in range(iterations):
x = localCF(longStr).digest()
end = time.perf_counter()
print(('%2.2f' % (end-start)), "seconds", iterations, "x", len(longStr), "bytes", name)
def test_create():
start = time.perf_counter()
for f in range(20000):
d = creatorFunc()
end = time.perf_counter()
print(('%2.2f' % (end-start)), "seconds", '[20000 creations]')
def test_zero():
start = time.perf_counter()
for f in range(20000):
x = creatorFunc().digest()
end = time.perf_counter()
print(('%2.2f' % (end-start)), "seconds", '[20000 "" digests]')
hName = sys.argv[1]
#
# setup our creatorFunc to test the requested hash
#
if hName in ('_md5', '_sha'):
exec('import '+hName)
exec('creatorFunc = '+hName+'.new')
print("testing speed of old", hName, "legacy interface")
elif hName == '_hashlib' and len(sys.argv) > 3:
import _hashlib
exec('creatorFunc = _hashlib.%s' % sys.argv[2])
print("testing speed of _hashlib.%s" % sys.argv[2], getattr(_hashlib, sys.argv[2]))
elif hName == '_hashlib' and len(sys.argv) == 3:
import _hashlib
exec('creatorFunc = lambda x=_hashlib.new : x(%r)' % sys.argv[2])
print("testing speed of _hashlib.new(%r)" % sys.argv[2])
elif hasattr(hashlib, hName) and hasattr(getattr(hashlib, hName), '__call__'):
creatorFunc = getattr(hashlib, hName)
print("testing speed of hashlib."+hName, getattr(hashlib, hName))
else:
exec("creatorFunc = lambda x=hashlib.new : x(%r)" % hName)
print("testing speed of hashlib.new(%r)" % hName)
try:
test_create()
except ValueError:
print()
print("pass argument(s) naming the hash to run a speed test on:")
print(" '_md5' and '_sha' test the legacy builtin md5 and sha")
print(" '_hashlib' 'openssl_hName' 'fast' tests the builtin _hashlib")
print(" '_hashlib' 'hName' tests builtin _hashlib.new(shaFOO)")
print(" 'hName' tests the hashlib.hName() implementation if it exists")
print(" otherwise it uses hashlib.new(hName).")
print()
raise
test_zero()
test_scaled_msg(scale=106201, name='[huge data]')
test_scaled_msg(scale=10620, name='[large data]')
test_scaled_msg(scale=1062, name='[medium data]')
test_scaled_msg(scale=424, name='[4*small data]')
test_scaled_msg(scale=336, name='[3*small data]')
test_scaled_msg(scale=212, name='[2*small data]')
test_scaled_msg(scale=106, name='[small data]')
test_scaled_msg(scale=creatorFunc().digest_size, name='[digest_size data]')
test_scaled_msg(scale=10, name='[tiny data]')
| 34.05618 | 92 | 0.649291 |
429973bbdb851c6a56f3b3e80eb5c9cf8419312c | 625 | py | Python | personal/Tommaso/Scripts/pos_example.py | edervishaj/spotify-recsys-challenge | 4077201ac7e4ed9da433bd10a92c183614182437 | [
"Apache-2.0"
] | 3 | 2018-10-12T20:19:57.000Z | 2019-12-11T01:11:38.000Z | personal/Tommaso/Scripts/pos_example.py | kiminh/spotify-recsys-challenge | 5e7844a77ce3c26658400f161d2d74d682f30e69 | [
"Apache-2.0"
] | null | null | null | personal/Tommaso/Scripts/pos_example.py | kiminh/spotify-recsys-challenge | 5e7844a77ce3c26658400f161d2d74d682f30e69 | [
"Apache-2.0"
] | 4 | 2018-10-27T20:30:18.000Z | 2020-10-14T07:43:27.000Z | import sys
from scipy import sparse
import numpy as np
import utils.pre_processing as pre
from utils.definitions import *
from utils.datareader import Datareader
from utils.evaluator import Evaluator
from utils.pre_processing import *
from utils.post_processing import *
from fast_import import *
dr = Datareader(mode='offline', only_load=True, verbose=False)
ev = Evaluator(dr)
urm = dr.get_urm_with_position(1)
urm_std = dr.get_urm()
rec = CF_UB_BM25(urm=urm, datareader=dr, verbose_evaluation=False)
rec.model(alpha=1, beta=0, k=250)
rec.urm = urm_std
rec.fast_recommend()
res = rec.fast_evaluate_eurm()
print(res[1])
| 25 | 66 | 0.7888 |
74fbf84065944b7c89e6e425dee26346b6f93983 | 3,539 | py | Python | src/richie/apps/courses/models/role.py | Verisage/richie | 8a78d3bc6cde74d6252cbfc51a2d195e26be64fc | [
"MIT"
] | null | null | null | src/richie/apps/courses/models/role.py | Verisage/richie | 8a78d3bc6cde74d6252cbfc51a2d195e26be64fc | [
"MIT"
] | null | null | null | src/richie/apps/courses/models/role.py | Verisage/richie | 8a78d3bc6cde74d6252cbfc51a2d195e26be64fc | [
"MIT"
] | null | null | null | """
Declare and configure the models for the courses application
"""
from django.contrib.auth.models import Group
from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.api import Page
from filer.models import Folder
from ..defaults import ROLE_CHOICES
class PageRole(models.Model):
"""A model to define and control by roles the permissions related to a page."""
role = models.CharField(
choices=ROLE_CHOICES,
max_length=20,
verbose_name=_("role"),
help_text=_(
"A role describes all the permissions that should be "
"granted to the user group."
),
)
page = models.ForeignKey(
to=Page,
related_name="roles",
verbose_name=_("page"),
help_text=_("Page to which this role grants permissions."),
on_delete=models.CASCADE,
limit_choices_to={
# permissions work with draft instances
"publisher_is_draft": True
},
)
group = models.OneToOneField(
to=Group,
related_name="role",
verbose_name=_("group"),
help_text=_("User group that this role controls."),
on_delete=models.PROTECT,
blank=True,
editable=False,
)
folder = models.OneToOneField(
to=Folder,
related_name="role",
verbose_name=_("filer folder"),
help_text=_("Filer folder that this role controls."),
on_delete=models.PROTECT,
blank=True,
editable=False,
)
class Meta:
db_table = "richie_page_role"
verbose_name = _("page role")
unique_together = ("page", "role")
def __str__(self):
"""Human representation of a page role."""
return _("{:s} | {:s}").format(self.get_role_display(), self.page.get_title())
def save(
self, force_insert=False, force_update=False, using=None, update_fields=None
):
"""
Validate the object before saving it and create a group if it does not exist yet.
"""
self.full_clean(exclude=["group"])
page_id = str(self.page.id)
# Create the related group the first time the instance is saved
if not self.group_id:
name = str(self)[: Group._meta.get_field("name").max_length]
if Group.objects.filter(name=name).exists():
name = f"{name:s} [{page_id:s}]"[
: Group._meta.get_field("name").max_length
]
self.group = Group.objects.create(name=name)
# Create the related filer folder the first time the instance is saved.
# Why create this folder at the root and not below a parent `organization` folder?
# - the only way to refer to an existing parent folder is by its name... but it could be
# changed by a user via the interface and break the functionality,
# - the filer search functionality only finds folders at the root, not nested folders.
if not self.folder_id:
name = str(self)[: Folder._meta.get_field("name").max_length]
if Folder.objects.filter(name=name).exists():
name = f"{name:s} [{page_id:s}]"[
: Folder._meta.get_field("name").max_length
]
self.folder = Folder.objects.create(name=name)
super().save(
force_insert=force_insert,
force_update=force_update,
using=using,
update_fields=update_fields,
)
| 33.386792 | 96 | 0.604973 |
c4acd2c57e4dda34d235bfde736d1de8ea32b6e2 | 1,501 | py | Python | pelicanconf.py | hatdropper1977/john.sobanski.io | 78c42e7b8976816b25df2ff4a31ca8e4f33146a6 | [
"Apache-2.0"
] | null | null | null | pelicanconf.py | hatdropper1977/john.sobanski.io | 78c42e7b8976816b25df2ff4a31ca8e4f33146a6 | [
"Apache-2.0"
] | null | null | null | pelicanconf.py | hatdropper1977/john.sobanski.io | 78c42e7b8976816b25df2ff4a31ca8e4f33146a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
SITENAME = 'Coins N\' Clouds'
SITEURL = 'https://john.sobanski.io'
#SITEURL = 'http://52.54.218.55:8000'
#HEADER_COVER = 'images/sobanski.jpg'
COLOR_SCHEME_CSS = 'monokai.css'
PATH = 'content'
TIMEZONE = 'America/New_York'
DEFAULT_LANG = 'en'
DEFAULT_PAGINATION = 10
CATEGORY_URL = 'category/{slug}'
CATEGORY_SAVE_AS = 'category/{slug}/index.html'
TAG_SAVE_AS = 'tag/{slug}.html'
TAGS_SAVE_AS = 'tags.html'
AUTHORS_BIO = {
"john-sobanski": {
"name": "John Sobanski",
"cover": "https://john.sobanski.io/images/bsod_cropped.jpg",
"image": "https://john.sobanski.io/images/sobanski.jpg",
"website": "https://github.com/hatdropper1977/john.sobanski.io",
"location": "Washington, DC",
"bio": "Electrical Engineer turned Cloud Architect.<p><img src=\'https://john.sobanski.io/images/AWS_Badge.png\' alt=\'Cert\'></p><p>License <a href=\'https://aw.certmetrics.com/amazon/public/verification.aspx\'>R25L4B4K1FF1Q9WP</a> (July 1st 2016, Re-certified June 29th 2018)",
"linkedin": "johnsobanski/",
"github": "hatdropper1977",
}
}
#Comments
DISQUS_SITENAME = 'freshlex'
MENUITEMS = (
('Fork me on GitHub!', 'https://github.com/hatdropper1977/john.sobanski.io'),
('AWS Architecture', '/category/howto'),
('Coins', '/category/coins'),
('Data Science', '/category/data-science'),
('Protocols', '/category/ietf'),
)
| 31.270833 | 283 | 0.65956 |
294e873142e897d5300de0ba2ed163761dd01aeb | 3,866 | py | Python | disco/postprocess/plots.py | NREL/disco | 19afa1c397c6c24e37222f6cbf027eb88833beda | [
"BSD-3-Clause"
] | 2 | 2022-03-11T20:04:34.000Z | 2022-03-14T22:25:29.000Z | disco/postprocess/plots.py | NREL/disco | 19afa1c397c6c24e37222f6cbf027eb88833beda | [
"BSD-3-Clause"
] | 4 | 2022-03-11T17:48:50.000Z | 2022-03-17T21:39:47.000Z | disco/postprocess/plots.py | NREL/disco | 19afa1c397c6c24e37222f6cbf027eb88833beda | [
"BSD-3-Clause"
] | null | null | null | import logging
import os
import sys
import pandas as pd
import matplotlib.pyplot as plt
logger = logging.getLogger(__name__)
def plot_voltage(output_dir, scenario=None):
"""
Create 3 plots based on the first feeder,
1. compare voltage primary and secondary.
2. compare pf1 and volt-var
3. heatmap for max and min hosting capacity
Parameters
----------
output_dir : str | pathlib.Path
The output directory that contains the metrics and hosting capacity results.
scenario : str
The scenario name of simulation, default None.
"""
voltage_metrics_table = os.path.join(output_dir, "voltage_metrics_table.csv")
voltage_metrics = pd.read_csv(voltage_metrics_table)
feeder_example = voltage_metrics['feeder'].unique()[0]
voltage_metrics = voltage_metrics[voltage_metrics['feeder']==feeder_example]
fig, ax = plt.subplots(figsize=(8,8))
ax.scatter(
voltage_metrics[voltage_metrics['node_type']=='primaries']['penetration_level'],
voltage_metrics[voltage_metrics['node_type']=='primaries']["max_voltage"],
facecolors='none',
edgecolors='C0',
label="primary"
)
ax.scatter(
voltage_metrics[voltage_metrics['node_type']=='secondaries']['penetration_level'],
voltage_metrics[voltage_metrics['node_type']=='secondaries']["max_voltage"],
facecolors='none',
edgecolors='C1',
label="secondary"
)
ax.legend()
ax.set_title(feeder_example)
ax.set_xlabel("Penetration level")
ax.set_ylabel("max_voltage (pu)")
fig.savefig(os.path.join(output_dir,"max_voltage_pri_sec.png"))
fig, ax = plt.subplots(figsize=(8,8))
ax.scatter(
voltage_metrics[voltage_metrics['scenario']=='pf1']['penetration_level'],
voltage_metrics[voltage_metrics['scenario']=='pf1']["max_voltage"],
facecolors='none',
edgecolors='C0',
label="base_case:pf1"
)
ax.scatter(
voltage_metrics[voltage_metrics['scenario']=='control_mode']['penetration_level'],
voltage_metrics[voltage_metrics['scenario']=='control_mode']["max_voltage"],
facecolors='none',
edgecolors='C1',
label="control_mode:volt-var"
)
ax.legend()
ax.set_title(feeder_example)
ax.set_xlabel("Penetration level")
ax.set_ylabel("max_voltage (pu)")
fig.savefig(os.path.join(output_dir,"max_voltage_pf1_voltvar.png"))
logger.info("Voltage plot created.")
def plot_hc(output_dir, scenario):
"""
Plot hosting capacity heatmap for all feeders,
Parameters
----------
output_dir : str | pathlib.Path
The output directory that contains the metrics and hosting capacity results.
scenario : str
The scenario name of simulation.
"""
overall_file = os.path.join(output_dir, f"hosting_capacity_overall__{scenario}.json")
if not os.path.exists(overall_file):
logger.error("Overall hosting capacity JSON file does not exist, please check your scenario.")
sys.exit(1)
overall = pd.read_json(overall_file).transpose()
_, ax = plt.subplots(figsize=(8,8))
y_pos = overall.index.to_list()
ax.barh(
y_pos,
overall['min_hc_pct'],
label="no violation",
color='limegreen'
)
ax.barh(
y_pos,
overall['max_hc_pct']-overall['min_hc_pct'],
left=overall['min_hc_pct'],
label="some violation",
color='gold'
)
ax.barh(
y_pos,
200-overall['max_hc_pct'],
left=overall['max_hc_pct'],
label="violation",
color='tomato'
)
ax.set_title(f"HCA heatmap: {scenario}")
ax.set_xlabel("Penetration level (%)")
ax.legend(ncol=3)
plt.savefig(os.path.join(output_dir,f"hca__{scenario}.png"))
logger.info("Hosting capacity plot created.")
| 32.216667 | 102 | 0.65701 |
7e72178e419bccd22e473e396ac8e4e5040ac4c2 | 9,845 | py | Python | brainite/utils.py | neurospin-deepinsight/brainite | 18aafe5d1522f1a4a4081d43f120464afe6cd0a7 | [
"CECILL-B"
] | null | null | null | brainite/utils.py | neurospin-deepinsight/brainite | 18aafe5d1522f1a4a4081d43f120464afe6cd0a7 | [
"CECILL-B"
] | null | null | null | brainite/utils.py | neurospin-deepinsight/brainite | 18aafe5d1522f1a4a4081d43f120464afe6cd0a7 | [
"CECILL-B"
] | 1 | 2021-09-16T08:29:19.000Z | 2021-09-16T08:29:19.000Z | # -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2021
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Utility functions for VAE.
"""
# Imports
import os
import copy
from operator import itemgetter
import numpy as np
from scipy import stats
from PIL import Image, ImageDraw
import torch
from torch.distributions import Normal, kl_divergence
from torchvision.utils import make_grid
def get_traversal_range(mean=0, std=1, max_traversal=0.475):
""" Return the corresponding traversal range in absolute terms.
Parameters
----------
mean: float, default 0
normal distribution mean.
std: float, default 1
normal distribution sigma.
max_traversal: float, default 0.475
the maximum displacement induced by a latent traversal. Symmetrical
traversals are assumed. If m >= 0.5 then uses absolute value traversal,
if m < 0.5 uses a percentage of the distribution (quantile),
e.g. for the prior the distribution is a standard normal so m = 0.45
corresponds to an absolute value of 1.645 because 2m = 90% of a
standard normal is between -1.645 and 1.645. Note in the case
of the posterior, the distribution is not standard normal anymore.
Returns
-------
out: 2-uplet
traversal range.
"""
if max_traversal < 0.5:
max_traversal = (1 - 2 * max_traversal) / 2
max_traversal = stats.norm.ppf(max_traversal, loc=mean, scale=std)
return (-max_traversal, max_traversal)
def traverse_line(model, idx, n_samples, data=None, max_traversal=0.475):
""" Return latent samples corresponding to a traversal of a latent
variable indicated by idx.
Parameters
----------
model: nn.Module
the trained network.
idx: int
index of continuous dimension to traverse. If the continuous latent
vector is 10 dimensional and idx = 7, then the 7th dimension
will be traversed while all others are fixed.
n_samples: int
number of samples to generate.
data: torch.Tensor (N, C, H, W), default None
data to use for computing the posterior. If 'None' then use the
mean of the prior (all zeros) for all other dimensions.
max_traversal: float, default 0.475
the maximum displacement induced by a latent traversal. Symmetrical
traversals are assumed. If m >= 0.5 then uses absolute value traversal,
if m < 0.5 uses a percentage of the distribution (quantile),
e.g. for the prior the distribution is a standard normal so m = 0.45
corresponds to an absolute value of 1.645 because 2m = 90% of a
standard normal is between -1.645 and 1.645. Note in the case
of the posterior, the distribution is not standard normal anymore.
Returns
-------
samples: torch.Tensor (n_samples, latent_size)
"""
if data is None:
samples = torch.zeros(n_samples, model.latent_dim)
traversals = torch.linspace(*get_traversal_range(
max_traversal=max_traversal), steps=n_samples)
else:
if data.size(dim=0) > 1:
raise ValueError(
"Every value should be sampled from the same posterior")
with torch.no_grad():
posterior = model.encode(data)
samples = posterior.sample()
samples = samples.cpu().repeat(n_samples, 1)
post_mean_idx = posterior.loc.cpu()[0, idx]
post_std_idx = posterior.scale.cpu()[0, idx]
traversals = torch.linspace(*get_traversal_range(
mean=post_mean_idx, std=post_std_idx, max_traversal=max_traversal),
steps=n_samples)
samples[:, idx] = traversals
return samples
def traversals(model, device, data=None, n_per_latent=8, n_latents=None,
max_traversal=0.475):
""" Plot traverse through all latent dimensions (prior or posterior) one
by one and plots a grid of images where each row corresponds to a latent
traversal of one latent dimension.
Parameters
----------
model: nn.Module
the trained network.
device: torch.device
the device.
data: torch.Tensor (N, C, H, W), default None
data to use for computing the posterior. If 'None' then use the
mean of the prior (all zeros) for all other dimensions.
n_per_latent: int, default 8
the number of points to include in the traversal of a latent dimension,
i.e. the number of columns.
n_latents: int, default None
the number of latent dimensions to display, i.e. the number of rows.
If 'None' uses all latents.
max_traversal: float, default 0.475
the maximum displacement induced by a latent traversal. Symmetrical
traversals are assumed. If m >= 0.5 then uses absolute value traversal,
if m < 0.5 uses a percentage of the distribution (quantile),
e.g. for the prior the distribution is a standard normal so m = 0.45
corresponds to an absolute value of 1.645 because 2m = 90% of a
standard normal is between -1.645 and 1.645. Note in the case
of the posterior, the distribution is not standard normal anymore.
"""
sampling_type = "prior" if data is None else "posterior"
n_latents = n_latents or model.latent_dim
size = (n_latents, n_per_latent)
latent_samples = [
traverse_line(model, dim, n_per_latent, data=data,
max_traversal=max_traversal)
for dim in range(n_latents)]
latent_samples = torch.cat(latent_samples, dim=0).to(device)
decoded_traversal = model.p_to_prediction(model.decode(latent_samples))
n_images, *img_shape = decoded_traversal.shape
n_rows = n_images // n_per_latent
decoded_traversal = decoded_traversal.reshape(
n_rows, n_per_latent, *img_shape)
return decoded_traversal
def reconstruct_traverse(model, data, n_per_latent=8, n_latents=None,
is_posterior=False, filename=None):
""" Creates a figure whith first row for original images, second are
reconstructions, rest are traversals (prior or posterior) of the latent
dimensions.
Parameters
----------
model: nn.Module
the trained network.
data: torch.Tensor (N, C, H, W)
data to be reconstructed.
n_per_latent: int, default 8
the number of points to include in the traversal of a latent
dimension, i.e. the number of columns.
n_latents: int, default None
the number of latent dimensions to display, i.e. the number of rows.
If 'None' uses all latents.
is_posterior: bool, default False
whether to sample from the posterior.
filename: str, default None
path to save the final image.
"""
device = data.get_device()
if device < 0:
device = torch.device("cpu")
n_latents = n_latents or model.latent_dim
q = model.encode(data[:n_per_latent])
dimension_wise_kl_loss = kl_divergence(
q, Normal(0, 1)).mean(dim=0)[:n_latents]
reconstruction = model.reconstruct(data[:n_per_latent], sample=False)
reconstruction = np.expand_dims(reconstruction, axis=0)
original = data[:n_per_latent].cpu().numpy()
original = np.expand_dims(original, axis=0)
traversal = traversals(
model, device, data=data[:1, ...] if is_posterior else None,
n_per_latent=n_per_latent, n_latents=n_latents)
traversal = np.asarray([arr for _, arr in sorted(
zip(dimension_wise_kl_loss, traversal), key=itemgetter(0))])
concatenated = np.concatenate(
(original, reconstruction, traversal), axis=0)
mosaic = make_mosaic_img(concatenated)
concatenated = Image.fromarray(mosaic)
labels = ["orig", "recon"]
traversal_labels = [
"dim={0} KL={1:.4f}".format(dim + 1, kl)
for dim, kl in enumerate(dimension_wise_kl_loss)]
traversal_labels = [label for _, label in sorted(
zip(dimension_wise_kl_loss, traversal_labels), key=itemgetter(0))]
labels += traversal_labels
concatenated = add_labels(concatenated, labels)
if filename is not None:
concatenated.save(filename)
return concatenated
def add_labels(input_image, labels):
""" Adds labels next to rows of an image.
Parameters
----------
input_image: PIL.Image
the image to which to add the labels.
labels: list
the list of labels to plot.
"""
n_labels = len(labels)
width, height = (input_image.width, input_image.height)
new_width = width + 100
new_size = (new_width, height)
new_img = Image.new("RGB", new_size, color="white")
new_img.paste(input_image, (0, 0))
draw = ImageDraw.Draw(new_img)
for idx, text in enumerate(labels):
draw.text(xy=(new_width - 100 + 0.005,
int((idx / n_labels + 1 / (2 * n_labels)) * height)),
text=text, fill=(0, 0, 0))
return new_img
def make_mosaic_img(arr):
""" Converts a grid of image array into a single mosaic.
Parameters
----------
arr: numpy.ndarray (ROWS, COLS, C, H, W)
organized images all of the same size to generate the mosaic.
"""
img_shape = arr.shape[2:]
nrow = arr.shape[1]
tensor = torch.from_numpy(arr.reshape(-1, *img_shape))
grid = make_grid(tensor, nrow=nrow, normalize=True, range=(0, 1),
padding=2, pad_value=1, scale_each=True)
mosaic = grid.mul_(255).clamp_(0, 255).permute(1, 2, 0)
mosaic = mosaic.to("cpu", torch.uint8).numpy()
return mosaic
| 39.697581 | 79 | 0.653022 |
92a2b1e4f5445217fa03131db6d7262bce62a5cf | 4,055 | py | Python | delivery/delivery/ext/auth/controller.py | raiizoor/Web-delivery | 67a61c9d5c31cb25b6090128f374adc18e2d541d | [
"Unlicense"
] | null | null | null | delivery/delivery/ext/auth/controller.py | raiizoor/Web-delivery | 67a61c9d5c31cb25b6090128f374adc18e2d541d | [
"Unlicense"
] | 3 | 2021-02-11T02:31:31.000Z | 2021-12-29T22:13:18.000Z | delivery/delivery/ext/auth/controller.py | raiizoor/Web-delivery | 67a61c9d5c31cb25b6090128f374adc18e2d541d | [
"Unlicense"
] | 1 | 2021-01-20T20:23:12.000Z | 2021-01-20T20:23:12.000Z | import os
import click
import cv2
import numpy as np
import warnings
from flask import redirect, url_for
from flask_login import current_user
from werkzeug.security import generate_password_hash
from werkzeug.utils import secure_filename
from flask import current_app as app
from delivery.ext.db.models import User, Category, Store, Items, Address, Order
from delivery.ext.db import db, models
ALG = "pbkdf2:sha256"
def list_users():
"""Lista de usuários registrados"""
users = models.User.query.all()
click.echo(f"Lista de usuarios {users}")
def list_categorys():
"""Lista de categorias registradas"""
categorys = models.Category.query.all()
click.echo(f"Lista de categoria {categorys}")
def list_stores():
"""Lista de lojas"""
stores = models.Store.query.all()
click.echo(f"Lista de lojas {stores}")
def list_itens():
"""Lista de itens registrados"""
items = models.Items.query.all()
click.echo(f"Lista de itens {items}")
def list_address():
"""Lista de endereços registrados"""
address = models.Address.query.all()
click.echo(f"Lista de endereços com CEP {address}")
def list_order():
"""Lista de ordem de compras"""
order = models.Order.query.all()
click.echo(f"Lista de ordem de compras {order}")
def create_user(name: str, email: str, password: str, admin: bool = False) -> User:
user = User(
name=name,
email=email,
password=generate_password_hash(password, ALG),
admin=admin
)
db.session.add(user)
db.session.commit()
return user
def create_category(name: str, on_menu: bool) -> Category:
category = Category(
name=name,
on_menu=on_menu
)
db.session.add(category)
db.session.commit()
return category
def create_store(name_store: str, user_id: int, category_id: str, active: bool) -> Store:
store = Store(
name_store=name_store,
user_id=user_id,
category_id=category_id,
active=active
)
db.session.add(store)
db.session.commit()
return store
def create_item(name: str, price: float, quantity: int, description: str, store_id: str, available: bool = True) -> Items:
items = Items(
name=name,
price=price,
quantity=quantity,
description=description,
store_id=store_id,
available=available,
)
db.session.add(items)
db.session.commit()
return items
def create_address(zip_code: str, state: str, city: str, address: str, number_house: int, user_id: str) -> Address:
address = Address(
zip_code=zip_code,
state=state,
city=city,
address=address,
number_house=number_house,
user_id=user_id
)
db.session.add(address)
db.session.commit()
return address
def create_order(created_at: str, user_id: str, store_id: str, address_id: str, completed: bool=False) -> Order:
order = Order(
created_at=created_at,
user_id=user_id,
store_id=store_id,
address_id=address_id,
completed=completed,
)
db.session.add(order)
db.session.commit()
return order
def list_image(id):
for name_image in os.listdir(app.config["UPLOAD_FOLDER"]):
if f'{id}.jpg' in name_image:
return name_image
def delete_image(id):
arquivo = list_image(id)
os.remove(os.path.join(app.config['UPLOAD_FOLDER'], arquivo))
def save_user_picture(filename, filestore):
filename = os.path.join(
app.config["UPLOAD_FOLDER"],
secure_filename(filename)
)
filestore.save(filename)
# imgresizer(filename) this will decrease image
# TODO:
# 1) Verificar se o diretório existe.
# 2) criar o diretório se não existir
def imgresizer(path):
img = cv2.imread(path)
img = cv2.resize(img, (128, 128))
path = path[0:-4]
cv2.imwrite(path+"_reduced.png", img)
# Use nesta funcao o parametro caminho absoluto
#ex: imgresizer("C:/Users/e-ron/code/Web-delivery/delivery/uploads/teste.jpg")
| 25.34375 | 122 | 0.664365 |
b7ddefcd720fd0e7f2ab3afb08240314a7fe618d | 3,226 | py | Python | tutorial/tutotrial/settings.py | aiueocode/djangorest | fa408050a44d58e47929b4183c89bb6b5890702e | [
"BSD-3-Clause"
] | null | null | null | tutorial/tutotrial/settings.py | aiueocode/djangorest | fa408050a44d58e47929b4183c89bb6b5890702e | [
"BSD-3-Clause"
] | null | null | null | tutorial/tutotrial/settings.py | aiueocode/djangorest | fa408050a44d58e47929b4183c89bb6b5890702e | [
"BSD-3-Clause"
] | null | null | null | """
Django settings for tutotrial project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3kqxdxod91h0dp@ck4^l=35be24*sona=)hqib-^cg**6h3s+#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tutotrial.urls'
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tutotrial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| 25.401575 | 91 | 0.699628 |
c870581096484aea2e288fd30d459497ffa189d3 | 2,369 | py | Python | pygoogle/src/google.py | Unam3dd/Train-2018-2020 | afb6ae70fe338cbe55a21b74648d91996b818fa2 | [
"MIT"
] | 4 | 2021-04-23T15:39:17.000Z | 2021-12-27T22:53:24.000Z | pygoogle/src/google.py | Unam3dd/Train-2018-2020 | afb6ae70fe338cbe55a21b74648d91996b818fa2 | [
"MIT"
] | null | null | null | pygoogle/src/google.py | Unam3dd/Train-2018-2020 | afb6ae70fe338cbe55a21b74648d91996b818fa2 | [
"MIT"
] | 2 | 2021-04-19T08:28:54.000Z | 2022-01-19T13:23:29.000Z | #!/usr/bin/python3
#coding:utf-8
from googlesearch import search
import time
from datetime import datetime
import argparse
import os
import requests
banner = '''
\033[1;96m dP""b8 \033[1;91m dP"Yb \033[1;93m dP"Yb \033[1;96m dP""b8 \033[1;92m88 \033[1;91m888888\033[00m
\033[1;96mdP `" \033[1;91mdP Yb \033[1;93mdP Yb \033[1;96mdP `" \033[1;92m88 \033[1;91m88__\033[00m
\033[1;96mYb "88 \033[1;91mYb dP \033[1;93mYb dP \033[1;96mYb "88 \033[1;92m88 .o \033[1;91m88""\033[00m
\033[1;96m YboodP \033[1;91m YbodP \033[1;93m YbodP \033[1;96m YboodP \033[1;92m88ood8 \033[1;91m888888\033[00m
By Dxvistxr - Simple Google Search With Python 3
2019
'''
def check_internet():
try:
r = requests.get('https://www.google.com')
except:
print('\033[1;91m[!] Error Internet Not Found !')
def isearch(query,max_stop_website):
check_internet()
print('\033[1;93m[*] Waiting Moment Please Search..')
try:
if max_stop_website ==None:
for website in search(query, tld="co.in", num=10, stop=20, pause=2):
t = datetime.now().strftime('%H:%M:%S')
print('\033[1;92m[\033[1;94m%s\033[1;92m] %s ' % (t,website))
print('\n')
print('\033[1;92m[\033[1;94m*\033[1;92m] Search Finish At %s ' % (t))
print('\n')
print('\n')
else:
for website in search(query, tld="co.in", num=10, stop=max_stop_website, pause=2):
t = datetime.now().strftime('%H:%M:%S')
print('\033[1;92m[\033[1;94m%s\033[1;92m] %s' % (t,website))
print('\n')
print('\033[1;92m[\033[1;94m*\033[1;92m] Search Finish At %s ' % (t))
print('\n')
print('\n')
except Exception as error_search:
print(error_search)
def main():
try:
print(banner)
parser = argparse.ArgumentParser()
parser.add_argument('searchquery',type=str, help='Set Query')
parser.add_argument('maxpage',type=int, help='Set MaxPage')
args = parser.parse_args()
isearch(args.searchquery,args.maxpage)
except Exception as error_main:
print(error_main)
if __name__ == '__main__':
main()
| 33.366197 | 126 | 0.55382 |
32871b18de390e2612dc77a309757246c5768a00 | 14,685 | py | Python | library/iworkflow_service_template.py | Larsende/f5_ansible | 93b0747ba663128e2c8dfc456dad4653cdde4f38 | [
"Apache-2.0"
] | 12 | 2016-12-29T16:09:21.000Z | 2019-06-29T14:12:17.000Z | library/iworkflow_service_template.py | Larsende/f5_ansible | 93b0747ba663128e2c8dfc456dad4653cdde4f38 | [
"Apache-2.0"
] | 24 | 2017-05-24T07:56:56.000Z | 2017-11-30T09:31:56.000Z | library/iworkflow_service_template.py | Larsende/f5_ansible | 93b0747ba663128e2c8dfc456dad4653cdde4f38 | [
"Apache-2.0"
] | 26 | 2017-05-31T17:15:32.000Z | 2021-03-29T03:45:06.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'
}
DOCUMENTATION = '''
---
module: iworkflow_service_template
short_description: Manages Service Templates on iWorkflow
description:
- Manages Service Templates on iWorkflow. Service templates are created
by the iWorkflow administrator and are consumed by iWorkflow tenants
in the form of L4/L7 services. The Service Template can be configured
to allow tenants to change certain values of the template such as
the IP address of a VIP, or the port that a Virtual Server listens on.
version_added: "2.4"
options:
name:
description:
- Name of the service template.
required: True
parameters:
description:
- A dictionary containing the values of input parameters that the
Service Template contains. You will see these in iWorkflow's UI
labeled as "Application Tier Information" and "Sections". This
is the way by which you customize the Service Template and specify
which values are tenant editable. Since this value can be particularly
large, the recommended practice is to put it in an external file
and include it with the Ansible C(file) or C(template) lookup plugins.
This option is required when C(state) is C(present).
connector:
description:
- The cloud connector associated with this Service Template. If you want
to have this Service Template associated with all clouds, then specify
a C(connector) of C(all). When creating a new Service Template, if no
connector is specified, then C(all) clouds will be the default.
base_template:
description:
- The iApp template that you want to base this Service Template off
of. Note that, while iWorkflow's UI also allows you to specify another
Service Template for the C(base_template), this module does not yet
let you do that. This option is required when C(state) is C(present).
notes:
- Requires the f5-sdk Python package on the remote host. This is as easy as
pip install f5-sdk
- Requires the deepdiff Python package on the Ansible controller host. This
is as easy as pip install deepdiff.
requirements:
- f5-sdk >= 2.3.0
- iWorkflow >= 2.1.0
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
'''
RETURN = '''
'''
from ansible.module_utils.f5_utils import *
from deepdiff import DeepDiff
import copy
class Parameters(AnsibleF5Parameters):
api_map = {
'templateName': 'name',
'properties': 'connector',
'overrides': 'parameters'
}
returnables = ['vars']
api_attributes = [
'overrides', 'templateName', 'parentReference', 'properties'
]
updatables = ['tables', 'vars']
def __init__(self, params=None):
self._values = defaultdict(lambda: None)
if params:
self.update(params=params)
def update(self, params=None):
if params:
for k, v in iteritems(params):
if self.api_map is not None and k in self.api_map:
map_key = self.api_map[k]
else:
map_key = k
# Handle weird API parameters like `dns.proxy.__iter__` by
# using a map provided by the module developer
class_attr = getattr(type(self), map_key, None)
if isinstance(class_attr, property):
# There is a mapped value for the api_map key
if class_attr.fset is None:
# If the mapped value does not have an associated setter
self._values[map_key] = v
else:
# The mapped value has a setter
setattr(self, map_key, v)
else:
# If the mapped value is not a @property
self._values[map_key] = v
def _get_connector_collection(self):
return self.client.api.cm.cloud.connectors.locals.get_collection()
def _get_connector_selflink(self, connector, collection):
for resource in collection:
if str(resource.displayName) != "BIG-IP":
continue
if str(resource.name) != connector:
continue
return str(resource.selfLink)
return None
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
@property
def tables(self):
result = []
if not self._values['tables']:
return None
tables = copy.deepcopy(self._values['tables'])
for table in tables:
tmp = dict()
name = table.pop('name', None)
if name is None:
raise F5ModuleError(
"One of the provided tables does not have a name"
)
tmp['name'] = str(name)
columns = table.pop('columns', None)
if columns:
tmp['columns'] = []
for column in columns:
tmp['columns'].append(
dict((str(k), str(v)) for k, v in iteritems(column))
)
# You cannot have rows without columns
rows = table.pop('rows', None)
if rows:
tmp['rows'] = []
for row in rows:
tmp['rows'].append([str(x) for x in row])
# For the remaining items in the table dict, add them to the tmp
# dictionary
tmp.update(table)
result.append(tmp)
result = sorted(result, key=lambda k: k['name'])
return result
@tables.setter
def tables(self, value):
self._values['tables'] = value
@property
def vars(self):
result = []
if not self._values['vars']:
return None
variables = self._values['vars']
for variable in variables:
tmp = dict((str(k), v) for k, v in iteritems(variable))
result.append(tmp)
result = sorted(result, key=lambda k: k['name'])
return result
@vars.setter
def vars(self, value):
self._values['vars'] = value
@property
def parameters(self):
return dict(
tables=self.tables,
vars=self.vars
)
@parameters.setter
def parameters(self, value):
if value is None:
return
if 'tables' in value:
self.tables = value['tables']
if 'vars' in value:
self.vars = value['vars']
@property
def connector(self):
connector = None
if self._values['connector'] is None:
return self._values['connector']
elif self._values['connector'] == 'all':
connector = 'all'
elif isinstance(self._values['connector'], basestring):
collection = self._get_connector_collection()
result = self._get_connector_selflink(str(self._values['connector']), collection)
connector = result
elif 'provider' in self._values['connector'][0]:
# Case for the REST API
item = self._values['connector'][0]['provider']
connector = str(item)
if connector is None:
raise F5ModuleError(
"The specified connector was not found"
)
elif connector == 'all':
result = [
dict(
id="cloudConnectorReference",
isRequired=True,
defaultValue=""
)
]
return result
else:
result = [
dict(
id="cloudConnectorReference",
isRequired=True,
provider=connector
)
]
return result
@property
def parentReference(self):
if not self._parent_template_exists():
raise F5ModuleError(
"The specified base_template '{0}' not not exist".format(
self.base_template
)
)
return dict(
link="https://localhost/mgmt/cm/cloud/templates/iapp/{0}".format(
self._values['base_template']
)
)
@parentReference.setter
def parentReference(self, value):
self._values['base_template'] = value['link']
def _parent_template_exists(self):
result = self.client.api.cm.cloud.templates.iapps.iapp.exists(
name=self.base_template
)
return result
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters()
self.want.client = self.client
self.want.update(self.client.module.params)
self.changes = Parameters()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters()
self.changes.client = self.client
self.changes.update(changed)
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = str(DeepDiff(attr1, attr2))
if changed:
self.changes = Parameters()
self.changes.client = self.client
self.changes.update(changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def exists(self):
result = self.client.api.cm.cloud.provider.templates.iapps.iapp.exists(
name=self.want.name
)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def create(self):
self._set_changed_options()
if self.want.connector is None:
self.want.update({'connector': 'all'})
if self.client.check_mode:
return True
if self.want.base_template is None:
raise F5ModuleError(
"A 'base_template' is required when creating a new Service Template"
)
self.create_on_device()
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
params = self.want.api_params()
resource = self.client.api.cm.cloud.provider.templates.iapps.iapp.load(
name=self.want.name
)
resource.update(**params)
def read_current_from_device(self):
resource = self.client.api.cm.cloud.provider.templates.iapps.iapp.load(
name=self.want.name,
)
result = resource.attrs
result['parameters'] = result.pop('overrides', None)
params = Parameters()
params.client = self.client
params.update(result)
return params
def create_on_device(self):
params = self.want.api_params()
self.client.api.cm.cloud.provider.templates.iapps.iapp.create(
isF5Example=False,
**params
)
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.client.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the iApp service")
return True
def remove_from_device(self):
resource = self.client.api.cm.cloud.provider.templates.iapps.iapp.load(
name=self.want.name,
)
if resource:
resource.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
name=dict(required=True),
base_template=dict(),
parameters=dict(
type='dict'
),
connector=dict(),
state=dict(
default='present',
choices=['absent', 'present']
)
)
self.f5_product_name = 'iworkflow'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| 31.512876 | 93 | 0.573647 |
91e41d06f430a7c3d2c150abee5bd2db3ab25c2d | 2,836 | py | Python | ros/src/tl_detector/light_classification/tl_classifier.py | toonday/CarND-Capstone | 467ce65c761f552511588987f573e32d4ee135ca | [
"MIT"
] | null | null | null | ros/src/tl_detector/light_classification/tl_classifier.py | toonday/CarND-Capstone | 467ce65c761f552511588987f573e32d4ee135ca | [
"MIT"
] | null | null | null | ros/src/tl_detector/light_classification/tl_classifier.py | toonday/CarND-Capstone | 467ce65c761f552511588987f573e32d4ee135ca | [
"MIT"
] | null | null | null | import rospy
import numpy as np
import tensorflow as tf
from styx_msgs.msg import TrafficLight
class TLClassifier(object):
def __init__(self, file_path):
#TODO load classifier
self.classifier_model = tf.Graph()
self.min_score_threshold = 0.5
with self.classifier_model.as_default():
graph_def = tf.GraphDef()
with tf.gfile.GFile(file_path, 'rb') as mf:
ser_graph = mf.read()
graph_def.ParseFromString(ser_graph)
tf.import_graph_def(graph_def, name='')
rospy.loginfo("loaded graph from frozen model")
self.image_tensor = self.classifier_model.get_tensor_by_name('image_tensor:0')
self.d_boxes = self.classifier_model.get_tensor_by_name('detection_boxes:0')
self.d_scores = self.classifier_model.get_tensor_by_name('detection_scores:0')
self.d_classes = self.classifier_model.get_tensor_by_name('detection_classes:0')
self.num_d = self.classifier_model.get_tensor_by_name('num_detections:0')
config = tf.ConfigProto()
jl = tf.OptimizerOptions.ON_1
config.graph_options.optimizer_options.global_jit_level = jl
self.sess = tf.Session(config=config, graph=self.classifier_model)
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#TODO implement light color prediction
light_state = TrafficLight.UNKNOWN
with self.classifier_model.as_default():
img = np.expand_dims(image, axis=0)
(boxes, scores, classes, num_detections) = self.sess.run(
[self.d_boxes, self.d_scores, self.d_classes, self.num_d],
feed_dict={self.image_tensor: img})
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes).astype(np.int32)
for i in range(boxes.shape[0]):
if scores is None or scores[i] > self.min_score_threshold:
if classes[i] == 1:
rospy.loginfo('Light Detected: GREEN')
light_state = TrafficLight.GREEN
elif classes[i] == 2:
rospy.loginfo('Light Detected: RED')
light_state = TrafficLight.RED
elif classes[i] == 3:
rospy.loginfo('Light Detected: YELLOW')
light_state = TrafficLight.YELLOW
else:
rospy.loginfo('No Light Detected')
return light_state
| 44.3125 | 92 | 0.599788 |
806c9f33da63d55d8e9794a74beeb643f273eb56 | 3,156 | py | Python | sap/cli/strust.py | jakub-vaclavik-sap/sapcli | a0f40c3b2363bba0d34f705d92dd420d9adf3987 | [
"Apache-2.0"
] | 42 | 2019-01-16T13:16:09.000Z | 2022-03-23T20:13:00.000Z | sap/cli/strust.py | jakub-vaclavik-sap/sapcli | a0f40c3b2363bba0d34f705d92dd420d9adf3987 | [
"Apache-2.0"
] | 59 | 2019-02-23T07:16:56.000Z | 2022-02-28T09:34:47.000Z | sap/cli/strust.py | jakub-vaclavik-sap/sapcli | a0f40c3b2363bba0d34f705d92dd420d9adf3987 | [
"Apache-2.0"
] | 22 | 2019-03-12T08:15:44.000Z | 2022-01-10T03:56:47.000Z | """
Uploads X.509 Base64 certificates into SAP to enable SSL peer verification
of remote servers
"""
import logging
import sap.cli.core
from sap.errors import SAPCliError
from sap.rfc.strust import (
SSLCertStorage,
CLIENT_ANONYMOUS,
CLIENT_STANDART,
IDENTITY_MAPPING,
Identity,
notify_icm_changed_pse,
iter_storage_certificates
)
class CommandGroup(sap.cli.core.CommandGroup):
"""Commands for strust"""
def __init__(self):
super().__init__('strust')
@CommandGroup.argument('paths', type=str, nargs='+',
help='a file path containing X.509 Base64 certificate')
@CommandGroup.argument('-l', '--algorithm', type=str, help='R,S,G,H,X - or other if you need, of PSE file', default='R')
@CommandGroup.argument('-k', '--key-length', type=int, default=2048, help='Of PSE file')
@CommandGroup.argument('-d', '--dn', type=str, help='Distinguished Name of PSE file', default=None)
@CommandGroup.argument('-s', '--storage', action='append', default=[],
choices=[CLIENT_ANONYMOUS, CLIENT_STANDART, ])
@CommandGroup.argument('-i', '--identity', action='append', default=[])
@CommandGroup.command()
def putcertificate(connection, args):
"""Uploads X.509 Base64 certificates into SAP to enable SSL peer verification
of remote servers
Exceptions:
- SAPCliError:
- when the given storage does not belong to the storage white list
- when identity argument has invalid format
"""
identities = []
for storage in args.storage:
if storage in (CLIENT_ANONYMOUS, CLIENT_STANDART):
identities.append(IDENTITY_MAPPING[storage])
else:
raise SAPCliError(f'Unknown storage: {storage}')
for identity in args.identity:
try:
identities.append(Identity(*identity.split('/')))
except (ValueError, TypeError):
# pylint: disable=raise-missing-from
raise SAPCliError('Invalid identity format')
ssl_storages = []
for identity in identities:
ssl_storage = SSLCertStorage(connection, identity.pse_context, identity.pse_applic)
if not ssl_storage.exists():
ssl_storage.create(
alg=args.algorithm,
keylen=args.key_length,
dn=args.dn
)
logging.debug('SSL Storage is OK: %s', ssl_storage)
ssl_storages.append(ssl_storage)
for file_path in args.paths:
logging.info('Processing the file: %s', file_path)
with open(file_path, 'rb') as cert_file:
cert_contents = cert_file.read()
for ssl_storage in ssl_storages:
logging.info('Adding the file: %s to %s', file_path, ssl_storage)
logging.info(ssl_storage.put_certificate(cert_contents))
logging.info('Notifying ICM ... ')
notify_icm_changed_pse(connection)
for updated_storage in ssl_storages:
logging.info('Certificates of %s:', str(updated_storage))
for cert in iter_storage_certificates(updated_storage):
logging.info('* %s', cert['EV_SUBJECT'])
| 34.304348 | 120 | 0.648289 |
3272849c65cc2905a1f8a96d48f3d6840221c4d3 | 4,612 | py | Python | parl/plutils/common.py | awesome-archive/PARL | 7a7583ab6ff2371a190c683386e0103b91cf0903 | [
"Apache-2.0"
] | 1 | 2019-01-18T10:01:40.000Z | 2019-01-18T10:01:40.000Z | parl/plutils/common.py | JerryMAzhaizhai/PARL | 7a7583ab6ff2371a190c683386e0103b91cf0903 | [
"Apache-2.0"
] | null | null | null | parl/plutils/common.py | JerryMAzhaizhai/PARL | 7a7583ab6ff2371a190c683386e0103b91cf0903 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common functions of PARL framework
"""
import paddle.fluid as fluid
from paddle.fluid.executor import _fetch_var
from parl.layers.layer_wrappers import LayerFunc
from parl.framework.model_base import Network
__all__ = [
'fetch_framework_var', 'fetch_value', 'get_parameter_pairs',
'get_parameter_names'
]
def fetch_framework_var(attr_name):
""" Fetch framework variable according given attr_name.
Return a new reusing variable through create_parameter way
Args:
attr_name: string, attr name of parameter
Returns:
framework_var: framework.Varialbe
"""
scope = fluid.executor.global_scope()
core_var = scope.find_var(attr_name)
shape = core_var.get_tensor().shape()
framework_var = fluid.layers.create_parameter(
shape=shape, dtype='float32', attr=fluid.ParamAttr(name=attr_name))
return framework_var
def fetch_value(attr_name):
""" Given name of ParamAttr, fetch numpy value of the parameter in global_scope
Args:
attr_name: ParamAttr name of parameter
Returns:
numpy.ndarray
"""
return _fetch_var(attr_name, return_numpy=True)
def get_parameter_pairs(src, target):
""" Recursively get pairs of parameter names between src and target
Args:
src: parl.Network/parl.LayerFunc/list/tuple/set/dict
target: parl.Network/parl.LayerFunc/list/tuple/set/dict
Returns:
param_pairs: list of all tuple(src_param_name, target_param_name, is_bias)
between src and target
"""
param_pairs = []
if isinstance(src, Network):
for attr in src.__dict__:
if not attr in target.__dict__:
continue
src_var = getattr(src, attr)
target_var = getattr(target, attr)
param_pairs.extend(get_parameter_pairs(src_var, target_var))
elif isinstance(src, LayerFunc):
src_attrs = src.attr_holder.sorted()
target_attrs = target.attr_holder.sorted()
assert len(src_attrs) == len(target_attrs), \
"number of ParamAttr between source layer and target layer should be same."
for (src_attr, target_attr) in zip(src_attrs, target_attrs):
if src_attr:
assert target_attr, "ParamAttr between source layer and target layer is inconsistent."
param_pairs.append((src_attr.name, target_attr.name))
elif isinstance(src, tuple) or isinstance(src, list) or isinstance(
src, set):
for src_var, target_var in zip(src, target):
param_pairs.extend(get_parameter_pairs(src_var, target_var))
elif isinstance(src, dict):
for k in src.keys():
assert k in target
param_pairs.extend(get_parameter_pairs(src[k], target[k]))
else:
# for any other type, won't be handled
pass
return param_pairs
def get_parameter_names(obj):
""" Recursively get parameter names in obj,
mainly used to get parameter names of a parl.Network
Args:
obj: parl.Network/parl.LayerFunc/list/tuple/set/dict
Returns:
parameter_names: list of string, all parameter names in obj
"""
parameter_names = []
for attr in obj.__dict__:
val = getattr(obj, attr)
if isinstance(val, Network):
parameter_names.extend(get_parameter_names(val))
elif isinstance(val, LayerFunc):
for attr in val.attr_holder.tolist():
if attr:
parameter_names.append(attr.name)
elif isinstance(val, tuple) or isinstance(val, list) or isinstance(
val, set):
for x in val:
parameter_names.extend(get_parameter_names(x))
elif isinstance(val, dict):
for x in list(val.values()):
parameter_names.extend(get_parameter_names(x))
else:
# for any other type, won't be handled
pass
return parameter_names
| 34.41791 | 102 | 0.665004 |
1c7457f44779f81fd2a29a21d4a209095000a627 | 665 | py | Python | sharpy-sc2/sharpy/managers/combat2/protoss/__init__.py | etzhang416/sharpy-bot-eco | badc68ad1aa903dfa1bbc33f6225608e433ff353 | [
"Unlicense"
] | null | null | null | sharpy-sc2/sharpy/managers/combat2/protoss/__init__.py | etzhang416/sharpy-bot-eco | badc68ad1aa903dfa1bbc33f6225608e433ff353 | [
"Unlicense"
] | null | null | null | sharpy-sc2/sharpy/managers/combat2/protoss/__init__.py | etzhang416/sharpy-bot-eco | badc68ad1aa903dfa1bbc33f6225608e433ff353 | [
"Unlicense"
] | null | null | null | from .micro_stalkers import MicroStalkers
from .micro_zealots import MicroZealots
from .micro_carriers import MicroCarriers
from .micro_colossi import MicroColossi
from .micro_adepts import MicroAdepts
from .micro_voidrays import MicroVoidrays
from .micro_disruptor import MicroDisruptor, MicroPurificationNova
from .micro_hightemplars import MicroHighTemplars
from .micro_observers import MicroObservers
from .micro_oracles import MicroOracles
from .micro_phoenixes import MicroPhoenixes
from .micro_sentries import MicroSentries
from .micro_warp_prism import MicroWarpPrism
from .micro_immortal import MicroImmortals
from .micro_mothership import MicroMotherShip
| 41.5625 | 66 | 0.884211 |
02560728918e83670ec8d88d9fd19f62ed3cbdeb | 4,577 | py | Python | ego/decomposition/positive_and_negative.py | fabriziocosta/EGO | d89e88183cce1ff24dca9333c09fa11597a45c7a | [
"MIT"
] | null | null | null | ego/decomposition/positive_and_negative.py | fabriziocosta/EGO | d89e88183cce1ff24dca9333c09fa11597a45c7a | [
"MIT"
] | null | null | null | ego/decomposition/positive_and_negative.py | fabriziocosta/EGO | d89e88183cce1ff24dca9333c09fa11597a45c7a | [
"MIT"
] | 1 | 2022-01-24T09:53:20.000Z | 2022-01-24T09:53:20.000Z | #!/usr/bin/env python
"""Provides scikit interface."""
from toolz import curry
import numpy as np
from ego.component import GraphComponent, serialize, get_subgraphs_from_node_components
@curry
def positive_and_negative(graph, ktop=0, part_importance_estimator=None):
codes, fragments = part_importance_estimator.encoding_func(graph)
scores = [part_importance_estimator.importance_dict.get(code, 0) for code in codes]
if ktop > len(scores) - 1:
ktop = len(scores) - 1
ids = np.argsort(scores)
positive_components = [set(fragments[id].nodes()) for id in ids[-ktop:]]
negative_components = [set(fragments[id].nodes()) for id in ids[:ktop]]
return list(positive_components), list(negative_components)
@curry
def positive_decomposition(graph, ktop=0, part_importance_estimator=None):
res = positive_and_negative(
graph,
ktop=ktop,
part_importance_estimator=part_importance_estimator)
positive_components, negative_components = res
return positive_components
@curry
def negative_decomposition(graph, ktop=0, part_importance_estimator=None):
res = positive_and_negative(
graph,
ktop=ktop,
part_importance_estimator=part_importance_estimator)
positive_components, negative_components = res
return negative_components
@curry
def positive_and_negative_decomposition(graph, ktop=0, part_importance_estimator=None):
res = positive_and_negative(
graph,
ktop=ktop,
part_importance_estimator=part_importance_estimator)
positive_components, negative_components = res
return positive_components + negative_components
@curry
def decompose_positive(graph_component, ktop=0, part_importance_estimator=None):
new_subgraphs_list = []
new_signatures_list = []
for subgraph, signature in zip(graph_component.subgraphs, graph_component.signatures):
components = positive_decomposition(
subgraph, ktop=ktop, part_importance_estimator=part_importance_estimator)
new_subgraphs = get_subgraphs_from_node_components(
graph_component.graph, components)
new_signature = serialize(['positive',
ktop], signature)
new_signatures = [new_signature] * len(new_subgraphs)
new_subgraphs_list += new_subgraphs
new_signatures_list += new_signatures
gc = GraphComponent(
graph=graph_component.graph,
subgraphs=new_subgraphs_list,
signatures=new_signatures_list)
return gc
@curry
def decompose_negative(graph_component, ktop=0, part_importance_estimator=None):
new_subgraphs_list = []
new_signatures_list = []
for subgraph, signature in zip(graph_component.subgraphs, graph_component.signatures):
components = negative_decomposition(
subgraph, ktop=ktop, part_importance_estimator=part_importance_estimator)
new_subgraphs = get_subgraphs_from_node_components(
graph_component.graph, components)
new_signature = serialize(['negative',
ktop], signature)
new_signatures = [new_signature] * len(new_subgraphs)
new_subgraphs_list += new_subgraphs
new_signatures_list += new_signatures
gc = GraphComponent(
graph=graph_component.graph,
subgraphs=new_subgraphs_list,
signatures=new_signatures_list)
return gc
@curry
def decompose_positive_and_negative(graph_component, ktop=0, part_importance_estimator=None):
new_subgraphs_list = []
new_signatures_list = []
for subgraph, signature in zip(graph_component.subgraphs, graph_component.signatures):
components = positive_and_negative_decomposition(
subgraph, ktop=ktop, part_importance_estimator=part_importance_estimator)
new_subgraphs = get_subgraphs_from_node_components(
graph_component.graph, components)
new_signature = serialize(['positive_and_negative',
ktop], signature)
new_signatures = [new_signature] * len(new_subgraphs)
new_subgraphs_list += new_subgraphs
new_signatures_list += new_signatures
gc = GraphComponent(
graph=graph_component.graph,
subgraphs=new_subgraphs_list,
signatures=new_signatures_list)
return gc
def pst(*args, **kargs):
return decompose_positive(*args, **kargs)
def ngt(*args, **kargs):
return decompose_negative(*args, **kargs)
def pstngt(*args, **kargs):
return decompose_positive_and_negative(*args, **kargs) | 36.616 | 93 | 0.719685 |
191d316998efc51b58e4c3b28b3d0075ed32d996 | 739 | py | Python | webfront/migrations/0009_entry_annotation_changes.py | ProteinsWebTeam/project-skeleton | 7aeb971ba2d9bfe272e0590bd4484afb61336b96 | [
"Apache-2.0"
] | 6 | 2020-05-25T17:35:52.000Z | 2022-03-26T00:45:55.000Z | webfront/migrations/0009_entry_annotation_changes.py | ProteinsWebTeam/project-skeleton | 7aeb971ba2d9bfe272e0590bd4484afb61336b96 | [
"Apache-2.0"
] | 76 | 2016-07-29T09:22:34.000Z | 2022-03-15T07:57:17.000Z | webfront/migrations/0009_entry_annotation_changes.py | ProteinsWebTeam/project-skeleton | 7aeb971ba2d9bfe272e0590bd4484afb61336b96 | [
"Apache-2.0"
] | 1 | 2017-04-09T20:08:30.000Z | 2017-04-09T20:08:30.000Z | # Generated by Django 3.0.7 on 2020-06-08 10:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("webfront", "0008_dw_changes")]
operations = [
migrations.AddField(
model_name="entryannotation",
name="num_sequences",
field=models.FloatField(null=True),
),
migrations.AlterField(
model_name="entryannotation",
name="accession",
field=models.ForeignKey(
db_column="accession",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="webfront.Entry",
),
),
]
| 26.392857 | 61 | 0.571042 |
4ac4555387ed45d39e58c9e1a606ae16e1df94d1 | 3,055 | py | Python | tests/api/test_api_init.py | scailfin/benchmark-engine | 7ee5a841c1de873e8cafe2f10da4a23652395f29 | [
"MIT"
] | null | null | null | tests/api/test_api_init.py | scailfin/benchmark-engine | 7ee5a841c1de873e8cafe2f10da4a23652395f29 | [
"MIT"
] | null | null | null | tests/api/test_api_init.py | scailfin/benchmark-engine | 7ee5a841c1de873e8cafe2f10da4a23652395f29 | [
"MIT"
] | null | null | null | """Test initializing the engine API."""
import os
import shutil
from unittest import TestCase
from benchengine.api.base import EngineApi
from benchengine.api.route import UrlFactory
from benchengine.db import DatabaseDriver
import benchengine.api.serialize.hateoas as hateoas
import benchengine.api.serialize.labels as labels
import benchengine.config as config
TMP_DIR = 'tests/files/.tmp'
CONNECT = 'sqlite:{}/test.db'.format(TMP_DIR)
class TestApiInit(TestCase):
"""Test methods for initializing the Api and related components."""
def setUp(self):
"""Create temporary directory and clean database instance."""
if os.path.isdir(TMP_DIR):
shutil.rmtree(TMP_DIR)
os.makedirs(TMP_DIR)
# Create fresh database instance
DatabaseDriver.init_db(connect_string=CONNECT)
def tearDown(self):
"""Close connection and remove database file."""
if os.path.isdir(TMP_DIR):
shutil.rmtree(TMP_DIR)
def test_init_from_environ(self):
"""Test initializing the engine API using the values of environment
variables.
"""
# Set environment variable for database and engine base directory
os.environ[config.ENV_DATABASE] = CONNECT
os.environ[config.ENV_BASEDIR] = TMP_DIR
os.environ[config.ENV_SERVICE_NAME] = 'Test service'
# Create engine without any arguments
api = EngineApi()
# The temporary base directory should contain sub-folders for templates
# and uploaded files. The directory also contains the database file
tmpl_dir = os.path.join(TMP_DIR, config.TEMPLATE_DIR)
self.assertTrue(os.path.isdir(tmpl_dir))
upload_dir = os.path.join(TMP_DIR, config.UPLOAD_DIR)
self.assertTrue(os.path.isdir(upload_dir))
db_file = os.path.join(TMP_DIR, 'test.db')
self.assertTrue(os.path.isfile(db_file))
# Get the service descriptor
service = api.service_descriptor()
self.assertEqual(service[labels.NAME], 'Test service')
self.assertEqual(service[labels.VERSION], api.version)
links = hateoas.deserialize(service[labels.LINKS])
self.assertEqual(len(links), 5)
self.assertTrue(hateoas.SELF in links)
self.assertTrue(hateoas.user(hateoas.LOGIN) in links)
self.assertTrue(hateoas.user(hateoas.LOGOUT) in links)
self.assertTrue(hateoas.user(hateoas.REGISTER) in links)
self.assertTrue(hateoas.benchmark(hateoas.LIST) in links)
# Make sure to close the database connesction
api.close()
def test_url_factory_init(self):
"""Test initializing the ulr factory with and without arguments."""
os.environ[config.ENV_APIURL] = 'http://my.app/api'
urls = UrlFactory(base_url='http://some.url/api////')
self.assertEqual(urls.base_url, 'http://some.url/api')
urls = UrlFactory()
self.assertEqual(urls.base_url, 'http://my.app/api')
if __name__ == '__main__':
import unittest
unittest.main()
| 38.1875 | 79 | 0.686416 |
d821e00d7afef158af284991512d88c38669904b | 169 | py | Python | ex007.py | LeoWshington/Exercicios_CursoEmVideo_Python | 294d14d9aaab5e32aaf39d70b0cd1266f0b55a02 | [
"MIT"
] | null | null | null | ex007.py | LeoWshington/Exercicios_CursoEmVideo_Python | 294d14d9aaab5e32aaf39d70b0cd1266f0b55a02 | [
"MIT"
] | null | null | null | ex007.py | LeoWshington/Exercicios_CursoEmVideo_Python | 294d14d9aaab5e32aaf39d70b0cd1266f0b55a02 | [
"MIT"
] | null | null | null | nota1 = float(input('Primeira nota do aluno: '))
nota2 = float(input('Segunda nota do aluno: '))
print(f'A média entre {nota1} e {nota2} é {(nota1 + nota2) / 2 :.1f}.')
| 42.25 | 71 | 0.64497 |
14b55c5265f9b3652dbb1e5ef4307b3bf3f71ac8 | 430 | py | Python | app/core/migrations/0005_recipe_image.py | ezmani/recipe-app-api | 68771472a2f0f4756f282075c34601f25053ea4a | [
"MIT"
] | null | null | null | app/core/migrations/0005_recipe_image.py | ezmani/recipe-app-api | 68771472a2f0f4756f282075c34601f25053ea4a | [
"MIT"
] | null | null | null | app/core/migrations/0005_recipe_image.py | ezmani/recipe-app-api | 68771472a2f0f4756f282075c34601f25053ea4a | [
"MIT"
] | null | null | null | # Generated by Django 3.2.8 on 2021-10-28 12:45
import core.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_recipe'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='image',
field=models.ImageField(null=True, upload_to=core.models.recipe_image_file_path),
),
]
| 21.5 | 93 | 0.62093 |
80b04bae505544650b6310b9ebdc29d9b6597e71 | 579 | py | Python | aws_mock/requests/create_security_group.py | enaydanov/aws_mock | 4ad3dca270ad164693e85741d5e92f845c34aa01 | [
"Apache-2.0"
] | null | null | null | aws_mock/requests/create_security_group.py | enaydanov/aws_mock | 4ad3dca270ad164693e85741d5e92f845c34aa01 | [
"Apache-2.0"
] | 1 | 2021-10-21T21:06:29.000Z | 2021-10-21T21:06:29.000Z | aws_mock/requests/create_security_group.py | bentsi/aws_mock | d6c1b963e02b4cd3602722e7135f4d65f6a71d3e | [
"Apache-2.0"
] | 1 | 2021-11-08T14:20:36.000Z | 2021-11-08T14:20:36.000Z | import logging
from aws_mock.lib import get_collection_by_resource_id, generate_resource_id, aws_response
LOGGER = logging.getLogger(__name__)
@aws_response
def create_security_group(group_name: str) -> dict:
security_group_id = generate_resource_id(resource_type="sg")
LOGGER.debug("Add security group `%s' with name `%s'", security_group_id, group_name)
get_collection_by_resource_id(resource_id=security_group_id).insert_one({
"id": security_group_id,
"tags": {"Name": group_name},
})
return {"security_group_id": security_group_id}
| 28.95 | 90 | 0.756477 |
fbeb36d5bc60238255c779cae8e8ed35dbbc7679 | 4,025 | py | Python | launchpad/examples/consumer_producers/launch.py | leloykun/launchpad | f591b5931cbba89bc94050a126d86cd1bda312f5 | [
"Apache-2.0"
] | 1 | 2021-05-02T22:03:23.000Z | 2021-05-02T22:03:23.000Z | launchpad/examples/consumer_producers/launch.py | leloykun/launchpad | f591b5931cbba89bc94050a126d86cd1bda312f5 | [
"Apache-2.0"
] | null | null | null | launchpad/examples/consumer_producers/launch.py | leloykun/launchpad | f591b5931cbba89bc94050a126d86cd1bda312f5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example introduces basic notions in Launchpad."""
from typing import Callable, List
from absl import app
from absl import flags
from absl import logging
import launchpad as lp
FLAGS = flags.FLAGS
flags.DEFINE_integer('num_producers', 2, 'The number of concurrent producers.')
class Consumer:
"""A simple consumer that calls producers to perform some work."""
def __init__(
self,
producers: List[lp.CourierClient],
stop_fn: Callable[[], None],
) -> None:
"""Initializes a Consumer.
Args:
producers: a list of Producer handles.
stop_fn: a callable for conditional stopping of the program.
"""
self._producers = producers
self._stop_program = stop_fn
def run(self) -> None:
"""Entry point of the consumer."""
# As a toy example we run 10 steps to interact with producers. Typically,
# this would be replaced with an infinite loop or a loop with some stopping
# criterion.
for _ in range(10):
self.step()
# Stop the whole program (consumer and producers). Simply returning here
# would stop the consumer but not the producers.
self._stop_program()
def step(self) -> None:
"""Tells all the producers to perform one step of work."""
# Call the producers to asynchronously produce work given a dummy context
# represented by a counter.
futures = [
producer.futures.work(context)
for context, producer in enumerate(self._producers)
]
# Block to gather the results of all the producers.
results = [future.result() for future in futures]
logging.info('Results: %s', results)
class Producer:
"""A bare-bones producer."""
def work(self, context: int) -> int:
# Add code here to perform work. Note that this method can be called in
# multiple threads because of the use of Courier futures, and so it has to
# be thread safe! In this example the producer is stateless, so thread
# safety is not a concern.
return context
def make_program(num_producers: int) -> lp.Program:
"""Define the distributed program topology."""
program = lp.Program('consumer_producers')
# Use `program.group()` to group homogeneous nodes.
with program.group('producer'):
# Add a `CourierNode` to the program. `lp.CourierNode()` takes the producer
# constructor and its arguments, and exposes it as an RPC server.
# `program.add_node(lp.CourierNode(...))` returns a handle to this server.
# These handles can then be passed to other nodes.
producers = [
program.add_node(lp.CourierNode(Producer)) for _ in range(num_producers)
]
# Launch a single consumer that connects to the list of producers.
# Note: The use of `label` here actually creates a group with one single node.
node = lp.CourierNode(
Consumer,
producers=producers,
stop_fn=lp.make_program_stopper(FLAGS.lp_launch_type))
program.add_node(node, label='consumer')
return program
def main(_):
# Define a program which describes the topology of communicating nodes and
# edges. In more involved examples, several programs can be defined and
# launched at once.
program = make_program(num_producers=FLAGS.num_producers)
# Note that at launch time, none of the producers has been instantiated.
# Producers are instantiated only at runtime.
lp.launch(program)
if __name__ == '__main__':
app.run(main)
| 32.723577 | 80 | 0.71205 |
c83996c39f42a1e149d875dccfe384937f607f11 | 5,146 | py | Python | alf/networks/network_test.py | www2171668/alf | 6e3731fc559d3b4e6b5b9ed6251fff728a560d64 | [
"Apache-2.0"
] | null | null | null | alf/networks/network_test.py | www2171668/alf | 6e3731fc559d3b4e6b5b9ed6251fff728a560d64 | [
"Apache-2.0"
] | null | null | null | alf/networks/network_test.py | www2171668/alf | 6e3731fc559d3b4e6b5b9ed6251fff728a560d64 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 Horizon Robotics and ALF Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for alf.networks.network."""
from absl.testing import parameterized
import torch
import torch.nn as nn
import alf
from alf.tensor_specs import TensorSpec
from alf.initializers import _numerical_calculate_gain
from alf.initializers import _calculate_gain
from alf.networks import EncodingNetwork, LSTMEncodingNetwork
from alf.networks.network import NaiveParallelNetwork
class BaseNetwork(alf.networks.Network):
def __init__(self, v1, **kwargs):
super().__init__(v1, **kwargs)
class MockNetwork(BaseNetwork):
def __init__(self, param1, param2, kwarg1=2, kwarg2=3):
self.param1 = param1
self.param2 = param2
self.kwarg1 = kwarg1
self.kwarg2 = kwarg2
super().__init__(param1, name='mock')
self.var1 = nn.Parameter(torch.tensor(1., requires_grad=False))
self.var2 = nn.Parameter(torch.tensor(2., requires_grad=True))
def forward(self, observations, network_state=None):
return self.var1 + self.var2
class NoInitNetwork(MockNetwork):
pass
class NetworkTest(alf.test.TestCase):
def test_copy_works(self):
# pass a TensorSpec to prevent assertion error in Network
network1 = MockNetwork(TensorSpec([2]), 1)
network2 = network1.copy()
self.assertNotEqual(network1, network2)
self.assertEqual(TensorSpec([2]), network2.param1)
self.assertEqual(1, network2.param2)
self.assertEqual(2, network2.kwarg1)
self.assertEqual(3, network2.kwarg2)
def test_noinit_copy_works(self):
# pass a TensorSpec to prevent assertion error in Network
network1 = NoInitNetwork(TensorSpec([2]), 1)
network2 = network1.copy()
self.assertNotEqual(network1, network2)
self.assertEqual(TensorSpec([2]), network2.param1)
self.assertEqual(1, network2.param2)
self.assertEqual(2, network2.kwarg1)
self.assertEqual(3, network2.kwarg2)
def test_too_many_args_raises_appropriate_error(self):
self.assertRaises(TypeError, MockNetwork, 0, 1, 2, 3, 4, 5, 6)
class InitializerTest(parameterized.TestCase, alf.test.TestCase):
@parameterized.parameters((torch.relu), (alf.utils.math_ops.identity, ),
(torch.tanh, ), (torch.sigmoid, ),
(torch.nn.functional.elu, ),
(torch.nn.functional.leaky_relu, ))
def test_numerical_calculate_gain(self, activation):
numerical_gain = _numerical_calculate_gain(activation)
if activation.__name__ == "identity":
gain = _calculate_gain("linear")
else:
gain = _calculate_gain(activation.__name__)
print(activation.__name__, numerical_gain, gain)
self.assertLess(abs(numerical_gain - gain), 0.1)
class NaiveParallelNetworkTest(alf.test.TestCase):
def test_non_rnn(self):
input_spec = TensorSpec((100, ), torch.float32)
embedding = input_spec.zeros(outer_dims=(6, ))
network = EncodingNetwork(
input_tensor_spec=input_spec,
fc_layer_params=(30, 40, 50),
activation=torch.tanh)
replicas = 4
num_layers = 3
pnet = NaiveParallelNetwork(network, replicas)
self.assertEqual(
len(list(pnet.parameters())), num_layers * 2 * replicas)
output, _ = pnet(embedding)
self.assertEqual(output.shape, (6, replicas, 50))
self.assertEqual(pnet.output_spec.shape, (replicas, 50))
def test_rnn(self):
input_spec = TensorSpec((100, ), torch.float32)
embedding = input_spec.zeros(outer_dims=(6, ))
network = LSTMEncodingNetwork(
input_tensor_spec=input_spec, hidden_size=(30, 40))
replicas = 4
pnet = NaiveParallelNetwork(network, replicas)
self.assertEqual(pnet.state_spec,
[(TensorSpec((4, 30)), TensorSpec((4, 30))),
(TensorSpec((4, 40)), TensorSpec((4, 40)))])
state = alf.utils.common.zero_tensor_from_nested_spec(
pnet.state_spec, 6)
output, state = pnet(embedding, state)
self.assertEqual(output.shape, (6, replicas, 40))
self.assertEqual(pnet.output_spec.shape, (replicas, 40))
self.assertEqual(
alf.utils.dist_utils.extract_spec(state),
[(TensorSpec((4, 30)), TensorSpec((4, 30))),
(TensorSpec((4, 40)), TensorSpec((4, 40)))])
if __name__ == '__main__':
alf.test.main()
| 36.239437 | 80 | 0.662456 |
6ba28e55bee6223a9b51ff38bf41be9d34c01e6d | 558 | py | Python | src/canisterpy/errors.py | cnstr/canister.py | c383cf1b8feb2d604dc0fa42933822b84ead83ad | [
"Apache-2.0"
] | 4 | 2022-01-14T01:15:22.000Z | 2022-01-14T13:58:32.000Z | src/canisterpy/errors.py | cnstr/canister.py | c383cf1b8feb2d604dc0fa42933822b84ead83ad | [
"Apache-2.0"
] | null | null | null | src/canisterpy/errors.py | cnstr/canister.py | c383cf1b8feb2d604dc0fa42933822b84ead83ad | [
"Apache-2.0"
] | 2 | 2022-02-18T04:01:58.000Z | 2022-03-13T19:32:58.000Z | '''Defines all error types for canister.py.'''
# errors.py
class ClosedError(Exception):
'''Error thrown when a client is trying to be used after it is closed..'''
pass
class InitializationError(Exception):
'''Error thrown when something goes wrong with the initialization of the Canister class.'''
pass
class InvalidFieldError(Exception):
'''Error thrown when an invalid field is passed to SearchFields.'''
pass
class RequestError(Exception):
'''Error thrown when something goes wrong with a Canister API request.'''
pass | 31 | 95 | 0.724014 |
1621d63a2d7c1d7a8ee1385a2b8f50df086d7a31 | 141,153 | py | Python | IA_tool/views.py | HN-Le/Impact-Assessment-Tool | 564fe45f3dd3382237ec8dd32758db266dfcf32e | [
"MIT"
] | null | null | null | IA_tool/views.py | HN-Le/Impact-Assessment-Tool | 564fe45f3dd3382237ec8dd32758db266dfcf32e | [
"MIT"
] | null | null | null | IA_tool/views.py | HN-Le/Impact-Assessment-Tool | 564fe45f3dd3382237ec8dd32758db266dfcf32e | [
"MIT"
] | null | null | null |
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
from . import widgets as w
from . import constants as c
import os
import webbrowser
from tkinter.scrolledtext import ScrolledText
class ProjectPurposeScreen(tk.Frame):
def __init__(self):
tk.Frame.__init__(self)
self.project_goal_selected = False
self.goal_model_selected = False
self.create_doc()
self.create_project_goals_section()
self.create_goal_model_section()
self.create_method_fragment_section()
def create_doc(self):
# load in documentation
frame_project_docs = ttk.LabelFrame(self, text="View help documentation",
width=c.Size.label_frame_width,
height=80,
style="Doc.TLabelframe")
frame_project_docs.grid_propagate(0)
frame_project_docs.grid(row=0, column=0,
padx=(10, 0),
pady=(10, 0),
sticky='nsew')
frame_steps_1 = ttk.LabelFrame(self, text="Phase 1 Checklist",
width=400,
height=200,
style="Doc.TLabelframe")
frame_steps_1.grid(row=0, column=1,
rowspan=4,
padx=(10, 0),
pady=(10, 0),
sticky='nsew')
for step in c.MethodSteps.phase_1:
if step.startswith(('1', '2', '3', '4', '5', '6', '7')):
tk.Label(frame_steps_1,
text=step).grid(sticky='w', padx=5, pady=(10,0))
else:
tk.Label(frame_steps_1,
text=step).grid(sticky='w', padx=(20,10), pady=0)
tk.Label(frame_steps_1,
text=" " * 150).grid(sticky='w', padx=(20, 10), pady=0)
tk.Button(frame_project_docs,
text='1.1 Project Goals',
width=20, height=c.Size.button_height,
command=lambda: [webbrowser.open(c.PdfFiles.project_goals)]).grid(row=0, column=0,
padx=(10, 0), pady=5,
sticky='w')
tk.Button(frame_project_docs,
text='1.2 Goal Model',
width=20, height=c.Size.button_height,
command=lambda: [webbrowser.open(c.PdfFiles.goal_model)]).grid(row=0, column=1,
padx=(10, 0), pady=5,
sticky='w')
tk.Button(frame_project_docs,
text='1.3.1 Method Fragments',
width=20, height=c.Size.button_height,
command=lambda: [webbrowser.open(c.PdfFiles.method_fragments)]).grid(row=0, column=2,
padx=(10, 0), pady=5,
sticky='w')
tk.Button(frame_project_docs,
text='1.3.2 Metrics',
width=20, height=c.Size.button_height,
command=lambda: [webbrowser.open(c.PdfFiles.metrics)]).grid(row=0, column=3,
padx=(10, 0), pady=5,
sticky='w')
def create_project_goals_section(self):
frame_project_goals = ttk.LabelFrame(self, text="1.1 Project Goals",
width=c.Size.label_frame_width, height=c.Size.label_frame_height)
frame_project_goals.grid_propagate(0)
frame_project_goals.grid(row=1, column=0,
padx=(10, 0),
pady=(10, 0),
sticky='nsew')
label_project_goals = tk.Label(frame_project_goals,
text='Define project goals & link the file here')
label_project_goals.grid(row=0, column=0,
padx=(10, 0), columnspan=2,
sticky='n')
# make object
self.project_pdf = w.FileOpener(self)
# convert to string var and set init text
self.text_project_pdf = tk.StringVar()
self.text_project_pdf.set("")
# create label and place in gui
self.project_label = tk.Label(frame_project_goals,
textvariable=self.text_project_pdf).grid(row=3, column=0, sticky='w',
padx=(20, 0), columnspan=150)
# create button with actions
button_upload_1 = tk.Button(frame_project_goals,
text='Select',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [select_goal_select_functions()])
# place upload button
button_upload_1.grid(row=2, column=0,
padx=(10, 0), pady=5,
sticky='w')
def select_goal_select_functions():
file_path = self.project_pdf.get_file_path()
filename = self.project_pdf.return_file_name()
if file_path:
self.text_project_pdf.set(filename)
self.project_goal_selected = True
self.status_message_project_txt.set("")
self.dict_paths.update_user_doc_path_dict('project_goals', file_path)
else:
self.project_goal_selected = False
self.text_project_pdf.set('')
self.dict_paths.update_user_doc_path_dict('project_goals', '')
self.status_message_project_txt = tk.StringVar()
self.status_message_project_txt.set("")
status_message_project_label = tk.Label(frame_project_goals,
font='Helvetica 11', foreground='red',
textvariable=self.status_message_project_txt).grid(row=4, column=0,
sticky='w',
padx=(20, 0),
columnspan=150)
def select_goal_show_functions():
if self.project_goal_selected:
self.project_pdf.show_project_goals()
else:
self.status_message_project_txt.set("Select project goals first!")
# place show button
button_show_1 = tk.Button(frame_project_goals,
text='Show',
width=c.Size.button_width, height=c.Size.button_height,
command=select_goal_show_functions)
button_show_1.grid(row=2, column=1,
padx=(10, 0), pady=5,
sticky='w')
def create_goal_model_section(self):
frame_goal_model = ttk.LabelFrame(self, text="1.2 Goal Model",
width=c.Size.label_frame_width, height=c.Size.label_frame_height)
frame_goal_model.grid_propagate(0)
frame_goal_model.grid(row=2, column=0,
padx=(10, 0),
pady=(10, 0),
sticky='nsew')
label_project_goals = tk.Label(frame_goal_model,
text='Create goal model & link the file here')
label_project_goals.grid(row=0, column=0,
padx=(10, 0),
columnspan=2,
sticky='w')
self.goal_pdf = w.FileOpener(self)
# convert to string var and set init text
self.text_goal_pdf = tk.StringVar()
self.text_goal_pdf.set("")
# create label and place in gui
self.project_goals_label = tk.Label(frame_goal_model,
textvariable=self.text_goal_pdf).grid(row=4, column=0, sticky='w',
padx=(20, 0), columnspan=150)
def goal_model_select_functions():
file_path = self.goal_pdf.get_file_path()
filename = self.goal_pdf.return_file_name()
if file_path:
self.text_goal_pdf.set(filename)
status_message_project_model_txt.set("")
self.goal_model_selected = True
self.dict_paths.update_user_doc_path_dict('goal_model', file_path)
else:
self.goal_model_selected = False
self.text_goal_pdf.set('')
self.dict_paths.update_user_doc_path_dict('goal_model', '')
def goal_model_show_functions():
if self.goal_model_selected:
self.goal_pdf.show_project_goals()
else:
status_message_project_model_txt.set("Select goal model first!")
button_upload_2 = tk.Button(frame_goal_model,
text='Select',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [goal_model_select_functions()])
button_upload_2.grid(row=2, column=0,
padx=(10, 0),
pady=5,
sticky='w')
status_message_project_model_txt = tk.StringVar()
status_message_project_model_txt.set("")
tk.Label(frame_goal_model,
font='Helvetica 11', foreground='red',
textvariable=status_message_project_model_txt).grid(row=5, column=0, sticky='w', padx=(20, 0),
columnspan=150)
button_show_2 = tk.Button(frame_goal_model,
text='Show',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [goal_model_show_functions()])
button_show_2.grid(row=2, column=1,
padx=(10, 0),
pady=2,
sticky='w')
def create_method_fragment_section(self):
frame_select_method_fragments = ttk.LabelFrame(self, text="1.3 Method Fragments",
width=c.Size.label_frame_width, height=250)
frame_select_method_fragments.grid_propagate(0)
frame_select_method_fragments.grid(row=3, column=0,
padx=(10, 0),
pady=(10, 0),
sticky='nsew')
label_selected_method_fragments = tk.Label(frame_select_method_fragments,
text='Select method fragments')
label_selected_method_fragments.grid(row=1, column=0, columnspan=2,
padx=(20, 0),
sticky='w')
self.method_fragment = w.MethodFragmentSelection(self)
# checkboxes and method fragments
button_upload_3 = tk.Button(frame_select_method_fragments,
text='Select',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [self.method_fragment.show_selection_screen(),
self.method_fragment.send_status_message(show_status_message,
show_status_message_metric_def)])
button_upload_3.grid(row=3, column=0,
padx=(10, 0),
pady=2,
sticky='w')
status_message_show_method_frags = ''
status_message_add_metric_def = ''
def if_clicked(section):
self.method_fragment.send_status_message(show_status_message, show_status_message_metric_def)
if self.method_fragment.methode_frags_selected == False:
show_status_message['text'] = 'Select method fragments first!'
show_status_message_metric_def['text'] = 'Select method fragments first!'
else:
show_status_message['text'] = ''
show_status_message_metric_def['text'] = ''
if section == 'method_frag':
self.method_fragment.show_info_screen()
else:
self.method_fragment.show_add_metric_definition_window()
button_upload_4 = tk.Button(frame_select_method_fragments,
text='Show',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [if_clicked('method_frag')])
button_upload_4.grid(row=3, column=1,
padx=(10, 0),
pady=2,
sticky='w')
# ------------
show_status_message = ttk.Label(frame_select_method_fragments,
font='Helvetica 11', foreground='red',
text=status_message_show_method_frags)
show_status_message.grid(row=4, column=0,
columnspan=20,
padx=10, pady=(10),
sticky='w')
label_add_definition = tk.Label(frame_select_method_fragments,
text='Add metric definition & set targets')
label_add_definition.grid(row=5, column=0, columnspan=100,
padx=(20, 0),
sticky='w')
button_upload_5 = tk.Button(frame_select_method_fragments,
text='Add / Show',
height=c.Size.button_height,
command=lambda: [if_clicked('add_metrics')])
button_upload_5.grid(row=6, column=0,
padx=(10, 0),
pady=2,
sticky='w')
show_status_message_metric_def = ttk.Label(frame_select_method_fragments,
font='Helvetica 11', foreground='red',
text=status_message_add_metric_def)
show_status_message_metric_def.grid(row=7, column=0,
columnspan=20,
padx=10, pady=(10),
sticky='w')
self.sendFrame(frame_select_method_fragments)
def save_data(self):
self.save_file_object.get_project_purpose(self.dict_paths.user_doc_file_paths,
self.method_fragment.checkbox_list,
self.method_fragment.methode_frags_selected)
def sendFrame(self, frame):
self.method_fragment.retrieve_frame(frame)
def getProjectPdfPath(self):
self.project_pdf_file_path = filedialog.askopenfilename()
def send_data_object(self, data):
self.data_object = data
self.method_fragment.get_data_object(self.data_object)
def send_dict_paths(self, dict):
self.dict_paths = dict
def send_save_file_object(self, data):
self.save_file_object = data
def restore_from_save_file(self):
# if path to project model was saved
if self.save_file_object.data['project_goals_path']:
self.project_goal_selected = True
self.text_project_pdf.set(self.project_pdf.clean_file_name(self.save_file_object.data['project_goals_path']))
# if path to goal model was saved
if self.save_file_object.data['goal_model_path']:
self.goal_model_selected = True
self.text_goal_pdf.set(self.project_pdf.clean_file_name(self.save_file_object.data['goal_model_path']))
# if method fragments were saved
# checkboxes
if self.save_file_object.data['selected_method_fragments']:
self.method_fragment.show_selection_screen()
self.method_fragment.selection_window.withdraw()
sql = 'select method_fragment_name from method_fragment'
retrieve_sql = self.data_object.query_no_par(sql)
for item in self.save_file_object.data['selected_method_fragments']:
self.method_fragment.checkbox[item].select()
self.method_fragment.methode_frags_selected = True
self.method_fragment.checkbox_list = self.save_file_object.data['selected_method_fragments']
self.method_fragment.show_info_screen()
self.method_fragment.delete_frame(self.method_fragment.scrollable_metric_frame)
self.method_fragment.delete_frame(self.method_fragment.add_metrics_frame)
self.method_fragment.delete_frame(self.method_fragment.remove_frame)
self.method_fragment.add_metric()
self.method_fragment.show_summary_metrics()
self.method_fragment.info_window.withdraw()
class DataCollectionScreen(tk.Frame):
def __init__(self):
tk.Frame.__init__(self)
global start_project_window
self.start_project_window = None
self.data_collection_window = self
self.sampling_selected = False
frame_project_docs = ttk.LabelFrame(self, text="View help documentation",
width=c.Size.label_frame_width,
height=80,
style="Doc.TLabelframe")
frame_project_docs.grid_propagate(0)
frame_project_docs.grid(row=0, column=0,
padx=(10, 0),
pady=(10, 0),
sticky='nsew')
frame_steps_2 = ttk.LabelFrame(self, text="Phase 2 Checklist",
width=400,
height=200,
style="Doc.TLabelframe")
frame_steps_2.grid(row=0, column=1,
rowspan=3,
padx=(10, 0),
pady=(10, 0),
sticky='nsew')
for step in c.MethodSteps.phase_2:
if step.startswith(('1', '2', '3', '4', '5', '6')):
tk.Label(frame_steps_2,
text=step).grid(sticky='w', padx=5, pady=(10, 0))
else:
tk.Label(frame_steps_2,
text=step).grid(sticky='w', padx=(20, 10), pady=0)
tk.Label(frame_steps_2,
text=" " * 150).grid(sticky='w', padx=(20, 10), pady=0)
tk.Button(frame_project_docs,
text='2.1 Sampling Strategy',
width=20, height=c.Size.button_height,
command=lambda: [webbrowser.open(c.PdfFiles.sampling_strategy)]).grid(row=0, column=0,
padx=(10, 0), pady=5,
sticky='w')
tk.Button(frame_project_docs,
text='2.2 Data Collection',
width=20, height=c.Size.button_height,
command=lambda: [webbrowser.open(c.PdfFiles.data_collection)]).grid(row=0, column=1,
padx=(10, 0), pady=5,
sticky='w')
# --------- 2.1 Sampling strategy frame
frame_sampling = ttk.LabelFrame(self, text="2.1 Sampling strategy",
width=c.Size.label_frame_width, height=150)
frame_sampling.grid_propagate(0)
frame_sampling.grid(row=1, column=0,
padx=(10, 0),
pady=(10, 0),
sticky='nsew')
label_sampling = tk.Label(frame_sampling,
text='Determine sampling strategy')
label_sampling.grid(row=1, column=0, columnspan=100,
padx=(20, 0),
sticky='w')
# make file opener object
self.data_collection_pdf = w.FileOpener(self)
# make data collection object
self.data_collection = w.DataCollection(self)
# convert to string var and set init text
self.text_sampling_pdf = tk.StringVar()
self.text_sampling_pdf.set("")
# create label and place in gui
self.project_label = tk.Label(frame_sampling,
textvariable=self.text_sampling_pdf,
foreground = "black")
self.project_label.grid(row=3, column=0,
sticky='w',
padx=(20, 0),
columnspan=150)
# functions if valid
def sampling_show_functions():
if self.sampling_selected:
self.project_label["foreground"] = "black"
self.data_collection_pdf.show_project_goals()
else:
self.status_message_label.config(foreground="red")
self.status_message_txt.set("Select sampling strategy first!")
self.status_message_txt = tk.StringVar()
self.status_message_txt.set("")
self.status_message_label = tk.Label(frame_sampling,
font='Helvetica 11', foreground='red',
textvariable=self.status_message_txt)
self.status_message_label.grid(row=4, column=0,
sticky='w',
padx=(20, 0),
columnspan=150)
# check if valid link
def sampling_strategy_select_functions():
file_path = self.data_collection_pdf.get_file_path()
filename = self.data_collection_pdf.return_file_name()
if file_path:
self.text_sampling_pdf.set(filename)
self.status_message_txt.set("")
self.sampling_selected = True
self.dict_paths.update_user_doc_path_dict('sampling_strategy', file_path)
else:
self.sampling_selected = False
self.text_sampling_pdf.set('')
self.dict_paths.update_user_doc_path_dict('sampling_strategy', '')
# create button with actions
button_upload_1 = tk.Button(frame_sampling,
text='Select',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [sampling_strategy_select_functions()])
# place upload button
button_upload_1.grid(row=2, column=0,
padx=(10, 0), pady=5,
sticky='w')
# place show button
button_show_1 = tk.Button(frame_sampling,
text='Show',
width=c.Size.button_width, height=c.Size.button_height,
command=sampling_show_functions)
button_show_1.grid(row=2, column=1,
padx=(10, 0), pady=5,
sticky='w')
# --------- 2.2 Data collection frame
frame_data_collection = ttk.LabelFrame(self, text="2.2 Data collection",
width=c.Size.label_frame_width, height=400)
frame_data_collection.grid_propagate(0)
frame_data_collection.grid(row=2, column=0,
padx=(10, 0),
pady=(10, 0),
sticky='nsew')
# header
label_date = tk.Label(frame_data_collection,
text='Date')
label_date.grid(row=3, column=0, columnspan=4,
padx=(20, 0), pady=(10),
sticky='w')
label_time_period_header = tk.Label(frame_data_collection,
text='Time period')
label_time_period_header.grid(row=3, column=5, columnspan=4,
padx=(20, 0),
sticky='w')
# row 1
self.user_date_1 = tk.StringVar()
self.user_date_1_input = ttk.Entry(frame_data_collection, width=15, textvariable=self.user_date_1)
self.user_date_1_input.grid(row=4, column=0, padx=(20, 0), pady=15, sticky='nswe')
label_time_period_1 = tk.Label(frame_data_collection,
text='Start of project')
label_time_period_1.grid(row=4, column=5, columnspan=4,
padx=(20, 0),
sticky='w')
button_upload_1 = tk.Button(frame_data_collection,
text='Upload',
width=10, height=1,
command=lambda : [self.show_project_start(), self.notebook_data_collection.select(0)])
button_upload_1.grid(row=4, column=11,
padx=(100, 0),
sticky='w')
# row 2
self.user_date_2 = tk.StringVar()
self.user_date_2_input = ttk.Entry(frame_data_collection, width=15, textvariable=self.user_date_2)
self.user_date_2_input.grid(row=5, column=0, padx=(20, 0), pady=15, sticky='nswe')
label_time_period_2 = tk.Label(frame_data_collection,
text='Halfway point of project')
label_time_period_2.grid(row=5, column=5, columnspan=4,
padx=(20, 0),
sticky='w')
button_upload_2 = tk.Button(frame_data_collection,
text='Upload',
width=10, height=1,
command=lambda : [self.show_project_start(), self.notebook_data_collection.select(1)])
button_upload_2.grid(row=5, column=11,
padx=(100, 0),
sticky='w')
# row 3
self.user_date_3 = tk.StringVar()
self.user_date_3_input = ttk.Entry(frame_data_collection, width=15, textvariable=self.user_date_3)
self.user_date_3_input.grid(row=6, column=0, padx=(20, 0), pady=15, sticky='nswe')
label_time_period_3 = tk.Label(frame_data_collection,
text='End of project')
label_time_period_3.grid(row=6, column=5, columnspan=4,
padx=(20, 0),
sticky='w')
button_upload_3 = tk.Button(frame_data_collection,
text='Upload',
width=10, height=1,
command=lambda : [self.show_project_start(), self.notebook_data_collection.select(2)])
button_upload_3.grid(row=6, column=11,
padx=(100, 0),
sticky='w')
# row 4
self.user_date_4 = tk.StringVar()
self.user_date_4_input = ttk.Entry(frame_data_collection, width=15, textvariable=self.user_date_4)
self.user_date_4_input.grid(row=7, column=0, padx=(20, 0), pady=15, sticky='nswe')
label_time_period_4 = tk.Label(frame_data_collection,
text='Year after end of project')
label_time_period_4.grid(row=7, column=5, columnspan=4,
padx=(20, 0),
sticky='w')
button_upload_4 = tk.Button(frame_data_collection,
text='Upload',
width=10, height=1,
command=lambda : [self.show_project_start(), self.notebook_data_collection.select(3)])
button_upload_4.grid(row=7, column=11,
padx=(100, 0),
sticky='w')
self.user_dates_objects = [self.user_date_1,
self.user_date_2,
self.user_date_3,
self.user_date_4]
def restore_from_save_file(self):
self.user_date_1.set(self.save_file_object.data['date_sop'])
self.user_date_2.set(self.save_file_object.data['date_hop'])
self.user_date_3.set(self.save_file_object.data['date_eop'])
self.user_date_4.set(self.save_file_object.data['date_yap'])
# if path to sampling strategy was saved
if self.save_file_object.data['sampling_strategy_path']:
self.sampling_selected = True
self.text_sampling_pdf.set(self.data_collection_pdf.clean_file_name(self.save_file_object.data['sampling_strategy_path']))
# if paths to loading in data were saved
if self.save_file_object.data['data_collection_paths']:
# make toplevel than hide
self.show_project_start()
self.start_project_window.withdraw()
self.dict_paths.dc_file_paths = self.save_file_object.data['data_collection_paths']
self.data_file_status_list = self.save_file_object.data['data_file_status_list']
# set paths back
self.provider_file_label_sop.set(self.data_collection_pdf.clean_file_name(self.dict_paths.dc_file_paths['sop_provider']))
self.leader_file_label_sop.set(self.data_collection_pdf.clean_file_name(self.dict_paths.dc_file_paths['sop_leader']))
self.teacher_file_label_sop.set(self.data_collection_pdf.clean_file_name(self.dict_paths.dc_file_paths['sop_teacher']))
self.student_file_label_sop.set(self.data_collection_pdf.clean_file_name(self.dict_paths.dc_file_paths['sop_student']))
self.provider_file_label_hop.set(self.data_collection_pdf.clean_file_name(self.dict_paths.dc_file_paths['hop_provider']))
self.leader_file_label_hop.set(self.data_collection_pdf.clean_file_name(self.dict_paths.dc_file_paths['hop_leader']))
self.teacher_file_label_hop.set(self.data_collection_pdf.clean_file_name(self.dict_paths.dc_file_paths['hop_teacher']))
self.student_file_label_hop.set(self.data_collection_pdf.clean_file_name(self.dict_paths.dc_file_paths['hop_student']))
self.provider_file_label_eop.set(self.data_collection_pdf.clean_file_name(self.dict_paths.dc_file_paths['eop_provider']))
self.leader_file_label_eop.set(self.data_collection_pdf.clean_file_name(self.dict_paths.dc_file_paths['eop_leader']))
self.teacher_file_label_eop.set(self.data_collection_pdf.clean_file_name(self.dict_paths.dc_file_paths['eop_teacher']))
self.student_file_label_eop.set(self.data_collection_pdf.clean_file_name(self.dict_paths.dc_file_paths['eop_student']))
self.provider_file_label_yap.set(self.data_collection_pdf.clean_file_name(self.dict_paths.dc_file_paths['yap_provider']))
self.leader_file_label_yap.set(self.data_collection_pdf.clean_file_name(self.dict_paths.dc_file_paths['yap_leader']))
self.teacher_file_label_yap.set(self.data_collection_pdf.clean_file_name(self.dict_paths.dc_file_paths['yap_teacher']))
self.student_file_label_yap.set(self.data_collection_pdf.clean_file_name(self.dict_paths.dc_file_paths['yap_student']))
self.provider_object_sop.file_path = self.dict_paths.dc_file_paths['sop_provider']
self.provider_object_hop.file_path = self.dict_paths.dc_file_paths['hop_provider']
self.provider_object_eop.file_path = self.dict_paths.dc_file_paths['eop_provider']
self.provider_object_yap.file_path = self.dict_paths.dc_file_paths['yap_provider']
self.leader_object_sop.file_path = self.dict_paths.dc_file_paths['sop_leader']
self.leader_object_hop.file_path = self.dict_paths.dc_file_paths['hop_leader']
self.leader_object_eop.file_path = self.dict_paths.dc_file_paths['eop_leader']
self.leader_object_yap.file_path = self.dict_paths.dc_file_paths['yap_leader']
self.teacher_object_sop.file_path = self.dict_paths.dc_file_paths['sop_teacher']
self.teacher_object_hop.file_path = self.dict_paths.dc_file_paths['hop_teacher']
self.teacher_object_eop.file_path = self.dict_paths.dc_file_paths['eop_teacher']
self.teacher_object_yap.file_path = self.dict_paths.dc_file_paths['yap_teacher']
self.student_object_sop.file_path = self.dict_paths.dc_file_paths['sop_student']
self.student_object_hop.file_path = self.dict_paths.dc_file_paths['hop_student']
self.student_object_eop.file_path = self.dict_paths.dc_file_paths['eop_student']
self.student_object_yap.file_path = self.dict_paths.dc_file_paths['yap_student']
def send_dict_paths(self, dict):
self.dict_paths = dict
self.data_collection.get_dict_paths(self.dict_paths)
def send_save_file_object(self, data):
self.save_file_object = data
def save_data(self):
self.user_dates = {'date_sop' : self.user_dates_objects[0].get(),
'date_hop' : self.user_dates_objects[1].get(),
'date_eop' : self.user_dates_objects[2].get(),
'date_yap' : self.user_dates_objects[3].get()}
try:
self.save_file_object.get_data_collection(self.user_dates, self.dict_paths.dc_file_paths, self.data_file_status_list)
except:
self.data_file_status_list = []
self.save_file_object.get_data_collection(self.user_dates, self.dict_paths.dc_file_paths, self.data_file_status_list)
def reset_status_messages(self):
self.provider_status_message_label_sop.set('')
self.provider_status_message_label_hop.set('')
self.provider_status_message_label_eop.set('')
self.provider_status_message_label_yap.set('')
self.leader_status_message_label_sop.set('')
self.leader_status_message_label_hop.set('')
self.leader_status_message_label_eop.set('')
self.leader_status_message_label_yap.set('')
self.teacher_status_message_label_sop.set('')
self.teacher_status_message_label_hop.set('')
self.teacher_status_message_label_eop.set('')
self.teacher_status_message_label_yap.set('')
self.student_status_message_label_sop.set('')
self.student_status_message_label_hop.set('')
self.student_status_message_label_eop.set('')
self.student_status_message_label_yap.set('')
def show_project_start(self):
self.data_file_status_list = []
time_period = ['sop', 'hop', 'eop', 'yap']
targets = ['provider', 'leader', 'teacher', 'student']
# fill data_file_status_list
for period in time_period:
for target in targets:
self.data_file_status_list.append({'time_period': period,
'target': target,
'status': False})
# if there is not already a 'start of project' window
if not self.start_project_window:
# create pop up window
self.start_project_window = tk.Toplevel()
self.start_project_window.wm_title('Load in survey data')
width = 1280
height = 600
position_left = 150
position_right = 150
self.start_project_window.geometry("{}x{}+{}+{}".format(width, height, position_left, position_right))
# set size window fixed
self.start_project_window.resizable(0, 0)
width_tab = 1280
height_tab = 600
# width = self.start_project_window.winfo_screenwidth()
# height = self.start_project_window.winfo_screenheight()
#
# self.start_project_window.geometry('%sx%s' % (int(width-100), int(height)))
#------------------------- Notebook
# make notebook
self.notebook_data_collection = ttk.Notebook(self.start_project_window)
# make tabs
self.tab_sop = ttk.Frame(self.start_project_window, width=width_tab, height=height_tab)
self.tab_sop.grid(row=0, column=0,
padx=(10, 0),
sticky='nsew')
self.tab_hop = ttk.Frame(self.start_project_window, width=width_tab, height=height_tab)
self.tab_hop.grid(padx=(10, 0),
sticky='nsew')
self.tab_eop = ttk.Frame(self.start_project_window, width=width_tab, height=height_tab)
self.tab_eop.grid(padx=(10, 0),
sticky='nsew')
self.tab_yap = ttk.Frame(self.start_project_window, width=width_tab, height=height_tab)
self.tab_yap.grid(padx=(10, 0),
sticky='nsew')
# add tabs to notebook
self.notebook_data_collection.add(self.tab_sop, text='1- Start of project')
self.notebook_data_collection.add(self.tab_hop, text='2- Halfway of project')
self.notebook_data_collection.add(self.tab_eop, text='3- End of project')
self.notebook_data_collection.add(self.tab_yap, text='4- Year after end of project')
self.notebook_data_collection.grid(row=0, column=0, sticky='E', padx=5, pady=5, ipadx=5, ipady=5)
# hide window if closed
self.start_project_window.protocol("WM_DELETE_WINDOW", lambda arg='start_project': self.hide_window(arg))
#------------------------- functions for validation and label creation
self.data_file_status_list = []
time_period = ['sop', 'hop', 'eop', 'yap']
targets = ['provider', 'leader', 'teacher', 'student']
targets_with_period = ['sop_provider',
'sop_leader',
'sop_teacher',
'sop_student',
'hop_provider',
'hop_leader',
'hop_teacher',
'hop_student',
'eop_provider',
'eop_leader',
'eop_teacher',
'eop_student',
'yap_provider',
'yap_leader',
'yap_teacher',
'yap_student'
]
# fill data_file_status_list
for period in time_period:
for target in targets:
self.data_file_status_list.append({'time_period': period,
'target': target,
'status': False})
def create_label(label_name, frame, row, column, color):
tk.Label(frame,
font='Helvetica 11', foreground=color,
textvariable=label_name).grid(row=row, column=column,
sticky='w',
padx=(10, 0),
columnspan=150)
# check if valid link
def validate_path(file_name_label, status_message_label, file_opener_object, index, status_list):
self.start_project_window.attributes("-topmost", False)
file_path = file_opener_object.get_file_path_csv()
filename = file_opener_object.return_file_name()
# check if a file is selected
if file_path:
file_name_label.set(filename)
status_message_label.set('')
status_list[index]['status'] = True
self.dict_paths.update_dc_path_dict(targets_with_period, index, file_opener_object.file_path)
# if no file is selected
else:
file_name_label.set('Filename: ')
self.dict_paths.update_dc_path_dict(targets_with_period, index, '')
self.start_project_window.attributes("-topmost", True)
# functions if valid
def show_csv_file(file_selected, status_message_label, file_opener_object):
if file_opener_object.file_path:
file_opener_object.show_project_goals()
else:
status_message_label.set("Select a file first!")
#------------------------- Data collection: Start of project (SOP) Frame
frame_project_sop= ttk.LabelFrame( self.tab_sop, text="2.2 Data collection - 1: Start of project",
width=1200, height=700)
frame_project_sop.grid_propagate(0)
frame_project_sop.grid(padx=(10, 0),
pady=(10,0),
sticky='nsew')
# ------------------------- SOP
# ------------------------- SOP: Project provider
tk.Label(frame_project_sop,
text='Project provider data (CSV file only)',
font='Helvetica 11 bold').grid(row=2, column=0, columnspan=4,
padx=(10, 0),
pady=(10, 0),
sticky='w')
# make FileOpener object
self.provider_object_sop = w.FileOpener(self)
# label for file_name provider (to store file path)
self.provider_file_label_sop = tk.StringVar()
self.provider_file_label_sop.set("Filename: ")
# place in GUI
create_label(label_name= self.provider_file_label_sop,
frame=frame_project_sop,
row=4,
column=0,
color='black')
# label for status message provider
self.provider_status_message_label_sop = tk.StringVar()
self.provider_status_message_label_sop.set("")
# place in GU
create_label(label_name= self.provider_status_message_label_sop,
frame=frame_project_sop,
row=5,
column=0,
color='red')
# check for period and target
# 0 = sop - provider
# 1 = sop - leader
# 2 = sop - teacher
# 3 = sop - student
# create and place 'select' button with actions
tk.Button(frame_project_sop,
text='Select',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [validate_path(file_name_label= self.provider_file_label_sop,
status_message_label= self.provider_status_message_label_sop,
file_opener_object= self.provider_object_sop,
index=0,
status_list=self.data_file_status_list)
] ).grid(row=3, column=0,
padx=(10, 0), pady=5,
sticky='w')
# create and place 'show' button with actions
tk.Button(frame_project_sop,
text='Show',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [show_csv_file(file_selected= self.data_file_status_list[0]['status'],
status_message_label= self.provider_status_message_label_sop,
file_opener_object= self.provider_object_sop)
]).grid(row=3, column=1,
padx=(10, 0), pady=5,
sticky='w')
#------------------------- SOP: Community leader
tk.Label(frame_project_sop,
text='Community leader data (CSV file only)',
font='Helvetica 11 bold').grid(row=6, column=0, columnspan=4,
pady=(10), padx=(10, 0),
sticky='w')
# make FileOpener object
self.leader_object_sop = w.FileOpener(self)
# label for file_name community leader (to store path)
self.leader_file_label_sop = tk.StringVar()
self.leader_file_label_sop.set("Filename: ")
create_label(label_name=self.leader_file_label_sop,
frame=frame_project_sop,
row=8,
column=0,
color='black')
# label for status message community leader
self.leader_status_message_label_sop = tk.StringVar()
self.leader_status_message_label_sop.set("")
create_label(label_name=self.leader_status_message_label_sop,
frame=frame_project_sop,
row=9,
column=0,
color='red')
# check for period and target
# 0 = sop - provider
# 1 = sop - leader
# 2 = sop - teacher
# 3 = sop - student
# create and place 'select' button with actions
tk.Button(frame_project_sop,
text='Select',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [validate_path(file_name_label=self.leader_file_label_sop,
status_message_label=self.leader_status_message_label_sop,
file_opener_object=self.leader_object_sop,
index=1,
status_list=self.data_file_status_list)
]).grid(row=7, column=0,
padx=(10, 0), pady=5,
sticky='w')
# place show button
tk.Button(frame_project_sop,
text='Show',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [show_csv_file(file_selected=self.data_file_status_list[1]['status'],
status_message_label=self.leader_status_message_label_sop,
file_opener_object=self.leader_object_sop)
]).grid(row=7, column=1,
padx=(10, 0), pady=5,
sticky='w')
# ------------------------- SOP: Teacher
tk.Label(frame_project_sop,
text='Teacher data (CSV file only)',
font='Helvetica 11 bold').grid(row=10,
column=0, columnspan=4,
padx=(10, 0), pady=10,
sticky='w')
# make FileOpener object
self.teacher_object_sop = w.FileOpener(self)
# label for file_name teacher (to store path)
self.teacher_file_label_sop = tk.StringVar()
self.teacher_file_label_sop.set("Filename: ")
create_label(label_name=self.teacher_file_label_sop,
frame=frame_project_sop,
row=14,
column=0,
color='black')
# label for status message community leader
self.teacher_status_message_label_sop = tk.StringVar()
self.teacher_status_message_label_sop.set("")
create_label(label_name=self.teacher_status_message_label_sop,
frame=frame_project_sop,
row=15,
column=0,
color='red')
# check for period and target
# 0 = sop - provider
# 1 = sop - leader
# 2 = sop - teacher
# 3 = sop - student
# create and placee 'select' button with actions
tk.Button(frame_project_sop,
text='Select',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [validate_path(file_name_label=self.teacher_file_label_sop,
status_message_label=self.teacher_status_message_label_sop,
file_opener_object=self.teacher_object_sop,
index=2,
status_list=self.data_file_status_list)
]).grid(row=11, column=0,
padx=(10, 0), pady=5,
sticky='w')
# create and placee 'show' button with actions
tk.Button(frame_project_sop,
text='Show',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [show_csv_file(file_selected=self.data_file_status_list[2]['status'],
status_message_label=self.teacher_status_message_label_sop,
file_opener_object=self.teacher_object_sop)
]).grid(row=11, column=1,
padx=(10, 0), pady=5,
sticky='w')
# ------------------------- SOP: Student
self.student_object_sop = w.FileOpener(self)
tk.Label(frame_project_sop,
text='Student data (CSV file only)',
font='Helvetica 11 bold').grid(row=16, column=0,
columnspan=4,
padx=(10, 0), pady=10,
sticky='w')
# convert to string var and set init text
self.student_file_label_sop = tk.StringVar()
self.student_file_label_sop.set("Filename: ")
create_label(label_name=self.student_file_label_sop,
frame=frame_project_sop,
row=19,
column=0,
color='black')
# label for status message community leader
self.student_status_message_label_sop = tk.StringVar()
self.student_status_message_label_sop.set("")
create_label(label_name=self.student_status_message_label_sop,
frame=frame_project_sop,
row=20,
column=0,
color='red')
# check for period and target
# 0 = sop - provider
# 1 = sop - leader
# 2 = sop - teacher
# 3 = sop - student
# create button with actions
tk.Button(frame_project_sop,
text='Select',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [validate_path(file_name_label=self.student_file_label_sop,
status_message_label=self.student_status_message_label_sop,
file_opener_object=self.student_object_sop,
index=3,
status_list=self.data_file_status_list)]).grid(row=17, column=0,
padx=(10, 0), pady=5,
sticky='w')
# place show button
tk.Button(frame_project_sop,
text='Show',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [show_csv_file(file_selected=self.data_file_status_list[3]['status'],
status_message_label=self.student_status_message_label_sop,
file_opener_object=self.student_object_sop)
]).grid(row=17, column=1,
padx=(10, 0), pady=5,
sticky='w')
# ------------------------- HOP
# ------------------------- Data collection: Halfway of project (HOP) Frame
frame_project_hop = ttk.LabelFrame(self.tab_hop, text="2.2 Data collection - 2: Halfway of project",
width=1200, height=700)
frame_project_hop.grid_propagate(0)
frame_project_hop.grid(padx=(10, 0),
pady=(10, 0),
sticky='nsew')
# ------------------------- HOP: Project provider
tk.Label(frame_project_hop,
text='Project provider data (CSV file only)',
font='Helvetica 11 bold').grid(row=2, column=0, columnspan=4,
padx=(10, 0),
pady=(10, 0),
sticky='w')
# make FileOpener object
self.provider_object_hop = w.FileOpener(self)
# label for file_name provider (to store file path)
self.provider_file_label_hop = tk.StringVar()
self.provider_file_label_hop.set("Filename: ")
# place in GUI
create_label(label_name= self.provider_file_label_hop,
frame=frame_project_hop,
row=4,
column=0,
color='black')
# label for status message provider
self.provider_status_message_label_hop = tk.StringVar()
self.provider_status_message_label_hop.set("")
# place in GUI
create_label(label_name= self.provider_status_message_label_hop,
frame=frame_project_hop,
row=5,
column=0,
color='red')
# check for period and target
# 4 = hop - provider
# 5 = hop - leader
# 6 = hop - teacher
# 7 = hop - student
# create and place 'select' button with actions
tk.Button(frame_project_hop,
text='Select',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [validate_path(file_name_label= self.provider_file_label_hop,
status_message_label= self.provider_status_message_label_hop,
file_opener_object= self.provider_object_hop,
index= 4,
status_list=self.data_file_status_list)
]).grid(row=3, column=0,
padx=(10, 0), pady=5,
sticky='w')
# create and place 'show' button with actions
tk.Button(frame_project_hop,
text='Show',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [show_csv_file(file_selected= self.data_file_status_list[4]['status'],
status_message_label= self.provider_status_message_label_hop,
file_opener_object= self.provider_object_hop)
]).grid(row=3, column=1,
padx=(10, 0), pady=5,
sticky='w')
#------------------------- HOP: Community leader
tk.Label(frame_project_hop,
text='Community leader data (CSV file only)',
font='Helvetica 11 bold').grid(row=6, column=0, columnspan=4,
pady=(10), padx=(10, 0),
sticky='w')
# make FileOpener object
self.leader_object_hop = w.FileOpener(self)
# label for file_name community leader (to store path)
self.leader_file_label_hop = tk.StringVar()
self.leader_file_label_hop.set("Filename: ")
create_label(label_name=self.leader_file_label_hop,
frame=frame_project_hop,
row=8,
column=0,
color='black')
# label for status message community leader
self.leader_status_message_label_hop = tk.StringVar()
self.leader_status_message_label_hop.set("")
create_label(label_name=self.leader_status_message_label_hop,
frame=frame_project_hop,
row=9,
column=0,
color='red')
# check for period and target
# 4 = hop - provider
# 5 = hop - leader
# 6 = hop - teacher
# 7 = hop - student
# create and place 'select' button with actions
tk.Button(frame_project_hop,
text='Select',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [validate_path(file_name_label=self.leader_file_label_hop,
status_message_label=self.leader_status_message_label_hop,
file_opener_object=self.leader_object_hop,
index=5,
status_list=self.data_file_status_list)
]).grid(row=7, column=0,
padx=(10, 0), pady=5,
sticky='w')
# place show button
tk.Button(frame_project_hop,
text='Show',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [show_csv_file(file_selected=self.data_file_status_list[5]['status'],
status_message_label=self.leader_status_message_label_hop,
file_opener_object=self.leader_object_hop)
]).grid(row=7, column=1,
padx=(10, 0), pady=5,
sticky='w')
# ------------------------- HOP: Teacher
tk.Label(frame_project_hop,
text='Teacher data (CSV file only)',
font='Helvetica 11 bold').grid(row=10,
column=0, columnspan=4,
padx=(10, 0), pady=10,
sticky='w')
# make FileOpener object
self.teacher_object_hop = w.FileOpener(self)
# label for file_name teacher (to store path)
self.teacher_file_label_hop = tk.StringVar()
self.teacher_file_label_hop.set("Filename: ")
create_label(label_name=self.teacher_file_label_hop,
frame=frame_project_hop,
row=14,
column=0,
color='black')
# label for status message community leader
self.teacher_status_message_label_hop = tk.StringVar()
self.teacher_status_message_label_hop.set("")
create_label(label_name=self.teacher_status_message_label_hop,
frame=frame_project_hop,
row=15,
column=0,
color='red')
# check for period and target
# 4 = hop - provider
# 5 = hop - leader
# 6 = hop - teacher
# 7 = hop - student
# create and placee 'select' button with actions
tk.Button(frame_project_hop,
text='Select',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [validate_path(file_name_label=self.teacher_file_label_hop,
status_message_label=self.teacher_status_message_label_hop,
file_opener_object=self.teacher_object_hop,
index=6,
status_list=self.data_file_status_list)
]).grid(row=11, column=0,
padx=(10, 0), pady=5,
sticky='w')
# create and placee 'show' button with actions
tk.Button(frame_project_hop,
text='Show',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [show_csv_file(file_selected=self.data_file_status_list[6]['status'],
status_message_label=self.teacher_status_message_label_hop,
file_opener_object=self.teacher_object_hop)
]).grid(row=11, column=1,
padx=(10, 0), pady=5,
sticky='w')
# ------------------------- HOP: Student
self.student_object_hop = w.FileOpener(self)
tk.Label(frame_project_hop,
text='Student data (CSV file only)',
font='Helvetica 11 bold').grid(row=16, column=0,
columnspan=4,
padx=(10, 0), pady=10,
sticky='w')
# convert to string var and set init text
self.student_file_label_hop = tk.StringVar()
self.student_file_label_hop.set("Filename: ")
create_label(label_name=self.student_file_label_hop,
frame=frame_project_hop,
row=19,
column=0,
color='black')
# label for status message community leader
self.student_status_message_label_hop = tk.StringVar()
self.student_status_message_label_hop.set("")
create_label(label_name=self.student_status_message_label_hop,
frame=frame_project_hop,
row=20,
column=0,
color='red')
# check for period and target
# 4 = hop - provider
# 5 = hop - leader
# 6 = hop - teacher
# 7 = hop - student
# create button with actions
tk.Button(frame_project_hop,
text='Select',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [validate_path(file_name_label=self.student_file_label_hop,
status_message_label=self.student_status_message_label_hop,
file_opener_object=self.student_object_hop,
index=7,
status_list=self.data_file_status_list
)]).grid(row=17, column=0,
padx=(10, 0), pady=5,
sticky='w')
# place show button
tk.Button(frame_project_hop,
text='Show',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [show_csv_file(file_selected=self.data_file_status_list[7]['status'],
status_message_label=self.student_status_message_label_hop,
file_opener_object=self.student_object_hop)
]).grid(row=17, column=1,
padx=(10, 0), pady=5,
sticky='w')
# ------------------------- EOP
# ------------------------- Data collection: End of project (EOP) Frame
frame_project_eop = ttk.LabelFrame(self.tab_eop, text="2.2 Data collection - 3: End of project",
width=1200, height=700)
frame_project_eop.grid_propagate(0)
frame_project_eop.grid(padx=(10, 0),
pady=(10, 0),
sticky='nsew')
# ------------------------- EOP: Project provider
tk.Label(frame_project_eop,
text='Project provider data (CSV file only)',
font='Helvetica 11 bold').grid(row=2, column=0, columnspan=4,
padx=(10, 0),
pady=(10, 0),
sticky='w')
# make FileOpener object
self.provider_object_eop = w.FileOpener(self)
# label for file_name provider (to store file path)
self.provider_file_label_eop = tk.StringVar()
self.provider_file_label_eop.set("Filename: ")
# place in GUI
create_label(label_name=self.provider_file_label_eop,
frame=frame_project_eop,
row=4,
column=0,
color='black')
# label for status message provider
self.provider_status_message_label_eop = tk.StringVar()
self.provider_status_message_label_eop.set("")
# place in GUI
create_label(label_name=self.provider_status_message_label_eop,
frame=frame_project_eop,
row=5,
column=0,
color='red')
# check for period and target
# 12 = eop - provider
# 13 = eop - leader
# 14 = eop - teacher
# 15 = eop - student
# create and place 'select' button with actions
tk.Button(frame_project_eop,
text='Select',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [validate_path(file_name_label=self.provider_file_label_eop,
status_message_label=self.provider_status_message_label_eop,
file_opener_object=self.provider_object_eop,
index=8,
status_list=self.data_file_status_list)
]).grid(row=3, column=0,
padx=(10, 0), pady=5,
sticky='w')
# create and place 'show' button with actions
tk.Button(frame_project_eop,
text='Show',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [show_csv_file(file_selected=self.data_file_status_list[8]['status'],
status_message_label=self.provider_status_message_label_eop,
file_opener_object=self.provider_object_eop)
]).grid(row=3, column=1,
padx=(10, 0), pady=5,
sticky='w')
# ------------------------- EOP: Community leader
tk.Label(frame_project_eop,
text='Community leader data (CSV file only)',
font='Helvetica 11 bold').grid(row=6, column=0, columnspan=4,
pady=(10), padx=(10, 0),
sticky='w')
# make FileOpener object
self.leader_object_eop = w.FileOpener(self)
# label for file_name community leader (to store path)
self.leader_file_label_eop = tk.StringVar()
self.leader_file_label_eop.set("Filename: ")
create_label(label_name=self.leader_file_label_eop,
frame=frame_project_eop,
row=8,
column=0,
color='black')
# label for status message community leader
self.leader_status_message_label_eop = tk.StringVar()
self.leader_status_message_label_eop.set("")
create_label(label_name=self.leader_status_message_label_eop,
frame=frame_project_eop,
row=9,
column=0,
color='red')
# check for period and target
# 8 = eop - provider
# 9 = eop - leader
# 10 = eop - teacher
# 11 = eop - student
# create and place 'select' button with actions
tk.Button(frame_project_eop,
text='Select',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [validate_path(file_name_label=self.leader_file_label_eop,
status_message_label=self.leader_status_message_label_eop,
file_opener_object=self.leader_object_eop,
index=9,
status_list=self.data_file_status_list)
]).grid(row=7, column=0,
padx=(10, 0), pady=5,
sticky='w')
# place show button
tk.Button(frame_project_eop,
text='Show',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [show_csv_file(file_selected=self.data_file_status_list[9]['status'],
status_message_label=self.leader_status_message_label_eop,
file_opener_object=self.leader_object_eop)
]).grid(row=7, column=1,
padx=(10, 0), pady=5,
sticky='w')
# ------------------------- HOP: Teacher
tk.Label(frame_project_eop,
text='Teacher data (CSV file only)',
font='Helvetica 11 bold').grid(row=10,
column=0, columnspan=4,
padx=(10, 0), pady=10,
sticky='w')
# make FileOpener object
self.teacher_object_eop = w.FileOpener(self)
# label for file_name teacher (to store path)
self.teacher_file_label_eop = tk.StringVar()
self.teacher_file_label_eop.set("Filename: ")
create_label(label_name=self.teacher_file_label_eop,
frame=frame_project_eop,
row=14,
column=0,
color='black')
# label for status message community leader
self.teacher_status_message_label_eop = tk.StringVar()
self.teacher_status_message_label_eop.set("")
create_label(label_name=self.teacher_status_message_label_eop,
frame=frame_project_eop,
row=15,
column=0,
color='red')
# check for period and target
# 8 = eop - provider
# 9 = eop - leader
# 10 = eop - teacher
# 11 = eop - student
# create and placee 'select' button with actions
tk.Button(frame_project_eop,
text='Select',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [validate_path(file_name_label=self.teacher_file_label_eop,
status_message_label=self.teacher_status_message_label_eop,
file_opener_object=self.teacher_object_eop,
index=10,
status_list=self.data_file_status_list)
]).grid(row=11, column=0,
padx=(10, 0), pady=5,
sticky='w')
# create and placee 'show' button with actions
tk.Button(frame_project_eop,
text='Show',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [show_csv_file(file_selected=self.data_file_status_list[10]['status'],
status_message_label=self.teacher_status_message_label_eop,
file_opener_object=self.teacher_object_eop)
]).grid(row=11, column=1,
padx=(10, 0), pady=5,
sticky='w')
# ------------------------- EOP: Student
self.student_object_eop = w.FileOpener(self)
tk.Label(frame_project_eop,
text='Student data (CSV file only)',
font='Helvetica 11 bold').grid(row=16, column=0,
columnspan=4,
padx=(10, 0), pady=10,
sticky='w')
# convert to string var and set init text
self.student_file_label_eop = tk.StringVar()
self.student_file_label_eop.set("Filename: ")
create_label(label_name=self.student_file_label_eop,
frame=frame_project_eop,
row=19,
column=0,
color='black')
# label for status message community leader
self.student_status_message_label_eop = tk.StringVar()
self.student_status_message_label_eop.set("")
create_label(label_name=self.student_status_message_label_eop,
frame=frame_project_eop,
row=20,
column=0,
color='red')
# check for period and target
# 8 = eop - provider
# 9 = eop - leader
# 10 = eop - teacher
# 11 = eop - student
# create button with actions
tk.Button(frame_project_eop,
text='Select',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [validate_path(file_name_label=self.student_file_label_eop,
status_message_label=self.student_status_message_label_eop,
file_opener_object=self.student_object_eop,
index=11,
status_list=self.data_file_status_list
)]).grid(row=17, column=0,
padx=(10, 0), pady=5,
sticky='w')
# place show button
tk.Button(frame_project_eop,
text='Show',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [show_csv_file(file_selected=self.data_file_status_list[11]['status'],
status_message_label=self.student_status_message_label_eop,
file_opener_object=self.student_object_eop)
]).grid(row=17, column=1,
padx=(10, 0), pady=5,
sticky='w')
# ------------------------- YAP
# ------------------------- Data collection: Year after end project (YAP) Frame
frame_project_yap = ttk.LabelFrame(self.tab_yap, text="2.2 Data collection - 4: Year after end of project",
width=1200, height=700)
frame_project_yap.grid_propagate(0)
frame_project_yap.grid(padx=(10, 0),
pady=(10, 0),
sticky='nsew')
# ------------------------- YAP: Project provider
tk.Label(frame_project_yap,
text='Project provider data (CSV file only)',
font='Helvetica 11 bold').grid(row=2, column=0, columnspan=4,
padx=(10, 0),
pady=(10, 0),
sticky='w')
# make FileOpener object
self.provider_object_yap = w.FileOpener(self)
# label for file_name provider (to store file path)
self.provider_file_label_yap = tk.StringVar()
self.provider_file_label_yap.set("Filename: ")
# place in GUI
create_label(label_name=self.provider_file_label_yap,
frame=frame_project_yap,
row=4,
column=0,
color='black')
# label for status message provider
self.provider_status_message_label_yap = tk.StringVar()
self.provider_status_message_label_yap.set("")
# place in GUI
create_label(label_name=self.provider_status_message_label_yap,
frame=frame_project_yap,
row=5,
column=0,
color='red')
# check for period and target
# 12 = yap - provider
# 13 = yap - leader
# 14 = yap - teacher
# 15 = yap - student
# create and place 'select' button with actions
tk.Button(frame_project_yap,
text='Select',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [validate_path(file_name_label=self.provider_file_label_yap,
status_message_label=self.provider_status_message_label_yap,
file_opener_object=self.provider_object_yap,
index=12,
status_list=self.data_file_status_list)
]).grid(row=3, column=0,
padx=(10, 0), pady=5,
sticky='w')
# create and place 'show' button with actions
tk.Button(frame_project_yap,
text='Show',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [show_csv_file(file_selected=self.data_file_status_list[12]['status'],
status_message_label=self.provider_status_message_label_yap,
file_opener_object=self.provider_object_yap)
]).grid(row=3, column=1,
padx=(10, 0), pady=5,
sticky='w')
# ------------------------- YAP: Community leader
tk.Label(frame_project_yap,
text='Community leader data (CSV file only)',
font='Helvetica 11 bold').grid(row=6, column=0, columnspan=4,
pady=(10), padx=(10, 0),
sticky='w')
# make FileOpener object
self.leader_object_yap = w.FileOpener(self)
# label for file_name community leader (to store path)
self.leader_file_label_yap = tk.StringVar()
self.leader_file_label_yap.set("Filename: ")
create_label(label_name=self.leader_file_label_yap,
frame=frame_project_yap,
row=8,
column=0,
color='black')
# label for status message community leader
self.leader_status_message_label_yap = tk.StringVar()
self.leader_status_message_label_yap.set("")
create_label(label_name=self.leader_status_message_label_yap,
frame=frame_project_yap,
row=9,
column=0,
color='red')
# check for period and target
# 12 = yap - provider
# 13 = yap - leader
# 14 = yap - teacher
# 15 = yap - student
# create and place 'select' button with actions
tk.Button(frame_project_yap,
text='Select',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [validate_path(file_name_label=self.leader_file_label_yap,
status_message_label=self.leader_status_message_label_yap,
file_opener_object=self.leader_object_yap,
index=13,
status_list=self.data_file_status_list)
]).grid(row=7, column=0,
padx=(10, 0), pady=5,
sticky='w')
# place show button
tk.Button(frame_project_yap,
text='Show',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [show_csv_file(file_selected=self.data_file_status_list[13]['status'],
status_message_label=self.leader_status_message_label_yap,
file_opener_object=self.leader_object_yap)
]).grid(row=7, column=1,
padx=(10, 0), pady=5,
sticky='w')
# ------------------------- YAP: Teacher
tk.Label(frame_project_yap,
text='Teacher data (CSV file only)',
font='Helvetica 11 bold').grid(row=10,
column=0, columnspan=4,
padx=(10, 0), pady=10,
sticky='w')
# make FileOpener object
self.teacher_object_yap = w.FileOpener(self)
# label for file_name teacher (to store path)
self.teacher_file_label_yap = tk.StringVar()
self.teacher_file_label_yap.set("Filename: ")
create_label(label_name=self.teacher_file_label_yap,
frame=frame_project_yap,
row=14,
column=0,
color='black')
# label for status message community leader
self.teacher_status_message_label_yap = tk.StringVar()
self.teacher_status_message_label_yap.set("")
create_label(label_name=self.teacher_status_message_label_yap,
frame=frame_project_yap,
row=15,
column=0,
color='red')
# check for period and target
# 12 = yap - provider
# 13 = yap - leader
# 14 = yap - teacher
# 15 = yap - student
# create and placee 'select' button with actions
tk.Button(frame_project_yap,
text='Select',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [validate_path(file_name_label=self.teacher_file_label_yap,
status_message_label=self.teacher_status_message_label_yap,
file_opener_object=self.teacher_object_yap,
index=14,
status_list=self.data_file_status_list)
]).grid(row=11, column=0,
padx=(10, 0), pady=5,
sticky='w')
# create and placee 'show' button with actions
tk.Button(frame_project_yap,
text='Show',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [show_csv_file(file_selected=self.data_file_status_list[14]['status'],
status_message_label=self.teacher_status_message_label_yap,
file_opener_object=self.teacher_object_yap)
]).grid(row=11, column=1,
padx=(10, 0), pady=5,
sticky='w')
# ------------------------- YAP: Student
self.student_object_yap = w.FileOpener(self)
tk.Label(frame_project_yap,
text='Student data (CSV file only)',
font='Helvetica 11 bold').grid(row=16, column=0,
columnspan=4,
padx=(10, 0), pady=10,
sticky='w')
# convert to string var and set init text
self.student_file_label_yap = tk.StringVar()
self.student_file_label_yap.set("Filename: ")
create_label(label_name=self.student_file_label_yap,
frame=frame_project_yap,
row=19,
column=0,
color='black')
# label for status message community leader
self.student_status_message_label_yap = tk.StringVar()
self.student_status_message_label_yap.set("")
create_label(label_name=self.student_status_message_label_yap,
frame=frame_project_yap,
row=20,
column=0,
color='red')
# check for period and target
# 12 = yap - provider
# 13 = yap - leader
# 14 = yap - teacher
# 15 = yap - student
# create button with actions
tk.Button(frame_project_yap,
text='Select',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [validate_path(file_name_label=self.student_file_label_yap,
status_message_label=self.student_status_message_label_yap,
file_opener_object=self.student_object_yap,
index=15,
status_list=self.data_file_status_list
)]).grid(row=17, column=0,
padx=(10, 0), pady=5,
sticky='w')
# place show button
tk.Button(frame_project_yap,
text='Show',
width=c.Size.button_width, height=c.Size.button_height,
command=lambda: [show_csv_file(file_selected=self.data_file_status_list[15]['status'],
status_message_label=self.student_status_message_label_yap,
file_opener_object=self.student_object_yap)
]).grid(row=17, column=1,
padx=(10, 0), pady=5,
sticky='w')
# focus on window
window_obj = w.Window()
window_obj.focus_window(self.start_project_window)
else:
# reset all the red status messages when re opening window
self.reset_status_messages()
self.start_project_window.deiconify()
def hide_window(self, window):
if window == "start_project":
self.start_project_window.withdraw()
class DataAnalysisScreen(tk.Frame):
def __init__(self):
tk.Frame.__init__(self)
global popup_window
self.popup_window = None
# create new instance of DataAnalysis class
self.data_analysis_object = w.DataAnalysis(self)
frame_project_docs = ttk.LabelFrame(self, text="View help documentation",
width=c.Size.label_frame_width,
height=80,
style="Doc.TLabelframe")
frame_project_docs.grid_propagate(0)
frame_project_docs.grid(row=0, column=0,
padx=(10, 0),
pady=(10, 0),
sticky='nsew')
frame_steps_3 = ttk.LabelFrame(self, text="Phase 3 Checklist",
width=400,
height=200,
style="Doc.TLabelframe")
frame_steps_3.grid(row=0, column=1,
rowspan=4,
padx=(10, 0),
pady=(10, 0),
sticky='nsew')
for step in c.MethodSteps.phase_3:
if step.startswith(('1', '2', '3', '4')):
tk.Label(frame_steps_3,
text=step).grid(sticky='w', padx=5, pady=(10, 0))
else:
tk.Label(frame_steps_3,
text=step).grid(sticky='w', padx=(20, 10), pady=0)
tk.Label(frame_steps_3,
text=" " * 150).grid(sticky='w', padx=(20, 10), pady=0)
tk.Button(frame_project_docs,
text='3.1 Loading in Data',
width=20, height=c.Size.button_height,
command=lambda: [webbrowser.open(c.PdfFiles.loading_in_data)]).grid(row=0, column=0,
padx=(10, 0), pady=5,
sticky='w')
tk.Button(frame_project_docs,
text='3.2 Summary Data',
width=20, height=c.Size.button_height,
command=lambda: [webbrowser.open(c.PdfFiles.summary_data)]).grid(row=0, column=1,
padx=(10, 0), pady=5,
sticky='w')
# ------------------------- Data Analysis: Load all data files
frame_load_data = ttk.LabelFrame(self, text="3.1 Load All Data",
width=c.Size.label_frame_width, height=150)
frame_load_data.grid_propagate(0)
frame_load_data.grid(row=1, column=0,
padx=(10, 0),
pady=(10, 0),
sticky='nsew')
# status message
tk.Label(frame_load_data,
text='Load all data for data analysis').grid(padx=(10,0), pady=5,
sticky='w')
# delete window and reset state every time load in data button is clicked
def adjusted_state_window():
self.popup_window = None
tk.Button(frame_load_data, text='Load in data',
width=18, height=1,
command=lambda: [self.data_analysis_object.delete_frame(self.popup_window), adjusted_state_window(),
self.data_analysis_object.load_into_database(self.dict_paths.dc_file_paths, frame_load_data)]).grid(row=2, column=0,
padx=(10,0), pady=5,
sticky='w')
# ------------------------- Data Analysis: 3.1 Summary Data Frame
frame_summary_data = ttk.LabelFrame(self, text="3.2 Summary Data",
width=c.Size.label_frame_width, height=150)
frame_summary_data.grid_propagate(0)
frame_summary_data.grid(row=2, column=0,
padx=(10, 0),
pady=(10, 0),
sticky='nsew')
self.status_message_summary_data = tk.StringVar()
self.status_message_summary_data.set("")
# status message
tk.Label(frame_summary_data,
textvariable=self.status_message_summary_data,
foreground='red',
font='Helvetica 12').grid(row=3, column=0,
padx=(10, 0),
pady=5,
sticky='w')
# validation for file input
def data_files_selected(tab_index):
if not self.data_analysis_object.selected_file_counter:
self.status_message_summary_data.set('Please load in csv files!')
else:
self.status_message_summary_data.set("")
self.create_popup()
self.notebook_data_analysis.select(tab_index)
tk.Button(frame_summary_data, text='Show tables',
width=18, height=1,
command=lambda : [data_files_selected(0)]).grid(row=1, column=0,
padx=(10,0), pady=(10,5),
sticky='w')
tk.Button(frame_summary_data, text='Show visualizations',
width=18, height=1,
command=lambda: [data_files_selected(1)]).grid(row=2, column=0,
padx=(10, 0),
pady=5,
sticky='w')
# ------------------------- Data Analysis: Get data from SQL model
def send_data_object(self, data):
self.data_object = data
self.data_analysis_object.get_data_object(self.data_object)
def send_dict_paths(self, dict):
self.dict_paths = dict
self.data_analysis_object.get_paths_dict(self.dict_paths)
def send_save_file_object(self, data):
self.save_file_object = data
def save_data(self):
self.save_file_object.get_data_analysis(self.data_analysis_object.selected_file_counter)
# ------------------------- Popup window
def create_popup(self):
# if there is not already a 'start of project' window
if not self.popup_window:
# create pop up window
self.popup_window = tk.Toplevel()
width = 1280
height = 720
position_left = 150
position_right = 150
self.popup_window.geometry("{}x{}+{}+{}".format(width, height, position_left, position_right))
# set size window fixed
self.popup_window.resizable(0, 0)
self.popup_window.wm_title('Data Analysis')
# make notebook
self.notebook_data_analysis = ttk.Notebook(self.popup_window)
# make tabs
self.tab_tables = ttk.Frame(self.popup_window, width=c.Size.hd_frame_width, height=c.Size.hd_frame_height)
self.tab_tables.pack(side="left", fill="both", expand=True)
self.tab_visualisations = ttk.Frame(self.popup_window, width=c.Size.hd_frame_width, height=c.Size.hd_frame_height)
self.tab_visualisations.pack(side="left", fill="both", expand=True)
# add tabs to notebook
self.notebook_data_analysis.add(self.tab_tables, text='Summary Tables')
self.notebook_data_analysis.add(self.tab_visualisations, text='Visualisations')
self.notebook_data_analysis.pack(side="left", fill="both", expand='true')
# hide window if closed
self.popup_window.protocol("WM_DELETE_WINDOW", lambda arg='popup': self.hide_window(arg))
# prevent shrinking to widget size
# self.tab_tables.pack_propagate(False)
# self.tab_visualisations.pack_propagate(False)
sql_check_file_id = "select distinct file_id from metric_value"
retrieve_check_file_id = self.data_object.query_no_par(sql_check_file_id)
provider_id = [0, 4, 8, 12]
leader_id = [1, 5, 9, 13]
teacher_id = [2, 6, 10, 14]
student_id = [3, 7, 11, 15]
self.time_option_list = []
time_option_dict = {
0: "Start of project",
1: "Halfway point of project",
2: "End of project",
3: "Year after end of project"
}
def get_target(event):
self.select_time_frame.set('')
self.time_option_list = []
self.input_target = event.widget.get()
# check in database which timeframe is present
for index, file_id in enumerate(retrieve_check_file_id):
if file_id[0] in provider_id:
if self.input_target == "Project Provider":
self.time_option_list.append(time_option_dict[provider_id.index(file_id[0])])
if file_id[0] in leader_id:
if self.input_target == "Community School Leader":
self.time_option_list.append(time_option_dict[leader_id.index(file_id[0])])
if file_id[0] in teacher_id:
if self.input_target == "Teacher":
self.time_option_list.append(time_option_dict[teacher_id.index(file_id[0])])
if file_id[0] in student_id:
if self.input_target == "Student":
self.time_option_list.append(time_option_dict[student_id.index(file_id[0])])
def change_time_frame_box():
if self.time_option_list:
self.select_time_frame["values"] = self.time_option_list
else:
self.select_time_frame["values"] = ["(No data available)"]
# label
tk.Label(self.tab_tables,
text= 'Select the target group and time frame',
font='Helvetica 12').pack(side='top',
anchor='nw',
pady=5,
padx=10)
self.select_target = ttk.Combobox(
self.tab_tables,
state="readonly",
values=["Project Provider",
"Community School Leader",
"Teacher",
"Student"
])
self.select_target.bind("<<ComboboxSelected>>", get_target)
self.select_target.pack(side='top',
anchor='nw',
pady=5,
padx=10)
self.select_time_frame = ttk.Combobox(
self.tab_tables,
state="readonly",
values="",
postcommand=change_time_frame_box)
self.select_time_frame.pack(side='top',
anchor='nw',
pady=5,
padx=10)
def remap_target(target):
if target == "Project Provider":
return 'provider'
elif target == "Community School Leader":
return 'leader'
elif target == "Teacher":
return 'teacher'
else:
return 'student'
def remap_target_sql(target):
if target == "Project Provider":
return 'project_provider'
elif target == "Community School Leader":
return 'community_school_leader'
elif target == "Teacher":
return 'teacher'
else:
return 'student'
def remap_timeframe(time_frame):
if time_frame == "Start of project":
return 'sop'
elif time_frame == "Halfway point of project":
return 'hop'
elif time_frame == "End of project":
return 'eop'
else:
return 'yap'
def validate_combobox_input(state):
if self.select_time_frame.get() == '' and self.select_target.get() == '':
self.status_message_tables.set('Select a timeframe and target!')
elif self.select_time_frame.get() == '' and self.select_target.get() != '':
self.status_message_tables.set('Select a timeframe!')
elif self.select_time_frame.get() != '' and self.select_target.get() == '':
self.status_message_tables.set('Select a target!')
else:
input_key = remap_timeframe(self.select_time_frame.get()) + '_' + remap_target(self.select_target.get())
value = self.dict_paths.dc_file_paths[input_key]
# calculate the table based on user input
self.data_analysis_object.calculate_data(self.select_time_frame.get(), self.select_target.get())
if state == 'new':
self.data_analysis_object.make_table(self.tab_tables,
self.select_time_frame.get(),
self.select_target.get())
self.data_analysis_object.fill_table(self.data_analysis_object.tree)
else:
self.data_analysis_object.update_table(self.data_analysis_object.tree, self.tab_tables)
create_table_button = tk.Button(self.tab_tables, text='Create table',
width=18, height=1,
command=lambda: [validate_combobox_input('new'),
create_table_button.configure(command=lambda: [validate_combobox_input('update')])
])
create_table_button.pack(side='top',
anchor='nw',
pady=5,
padx=10)
# convert to string var and set init text
self.status_message_tables = tk.StringVar()
self.status_message_tables.set("")
# status message
tk.Label(self.tab_tables,
textvariable=self.status_message_tables,
font='Helvetica 12', foreground='red').pack(side='top',
anchor='nw',
pady=5,
padx=10)
# -----------------
self.visualisation_frame = ttk.Frame(self.tab_visualisations, width=1000, height=300)
self.visualisation_frame.pack(side="bottom",
pady=0, padx=(10, 20))
self.survey_question = tk.StringVar()
self.survey_question.set('')
tk.Label(self.tab_visualisations,
textvariable=self.survey_question,
font='Helvetica 12',
foreground = 'mediumpurple').pack(side='bottom',
pady=(10, 5),
padx=10)
self.vis_option_1_frame = ttk.Frame(self.tab_visualisations, width=150, height=10)
self.vis_option_1_frame.pack(side="left", fill="both")
self.vis_option_2_frame = ttk.Frame(self.tab_visualisations, width=150, height=10)
self.vis_option_2_frame.pack(side="left", fill="both")
self.vis_option_3_frame = ttk.Frame(self.tab_visualisations, width=200, height=10)
self.vis_option_3_frame.pack(side="left", fill="both")
self.vis_option_4_frame = ttk.Frame(self.tab_visualisations, width=150, height=10)
self.vis_option_4_frame.pack(side="left", fill="both")
self.metric_option_list = []
def update_metric_list():
time_frame_list = [var1.get(), var2.get(), var3.get(), var4.get()]
self.metric_option_list = self.data_analysis_object.visualisation_get_metrics(self.select_target_visualisations.get(),
time_frame_list)
if not self.metric_option_list:
self.select_metric.set("")
def get_inputs(event):
self.select_metric.set('')
self.metric_option_list = []
self.input_target_vis = event.widget.get()
time_frame_list = [var1.get(), var2.get(), var3.get(), var4.get()]
if self.input_target_vis and any(time_frame_list):
self.metric_option_list = self.data_analysis_object.visualisation_get_metrics(self.input_target_vis, time_frame_list)
# label
tk.Label(self.vis_option_1_frame,
text='Select the target group',
font='Helvetica 12').pack(
anchor='nw',
pady=(10, 5),
padx=10)
self.select_target_visualisations = ttk.Combobox(
self.vis_option_1_frame,
state="readonly",
values=["Project Provider",
"Community School Leader",
"Teacher",
"Student"
])
self.select_target_visualisations.bind("<<ComboboxSelected>>", get_inputs)
self.select_target_visualisations.pack(
anchor='nw',
pady=5,
padx=10)
tk.Label(self.vis_option_2_frame,
text='Select the time frame(s)',
font='Helvetica 12').pack(
anchor='nw',
pady=(10, 5),
padx=10)
var1 = tk.BooleanVar()
tk.Checkbutton(self.vis_option_2_frame,
text="Start of project",
command=lambda : [update_metric_list()],
variable=var1).pack(
padx = 10,
anchor='nw')
var2 = tk.BooleanVar()
tk.Checkbutton(self.vis_option_2_frame,
text= "Halfway point of project",
command=lambda : [update_metric_list()],
variable=var2).pack(
padx = 10,
anchor='nw')
var3 = tk.BooleanVar()
tk.Checkbutton(self.vis_option_2_frame,
text="End of project",
command=lambda : [update_metric_list()],
variable=var3).pack(
padx = 10,
anchor='nw')
var4 = tk.BooleanVar()
tk.Checkbutton(self.vis_option_2_frame,
text="Year after end of project",
command=lambda: [update_metric_list()],
variable=var4).pack(
padx = 10,
anchor='nw')
tk.Label(self.vis_option_3_frame,
text='Select metric',
font='Helvetica 12').pack(
anchor='nw',
pady=(10, 5),
padx=10)
def change_visualisation_metric_option():
if self.metric_option_list:
self.select_metric["values"] = self.metric_option_list
else:
self.select_metric["values"] = ["(No data available)"]
self.select_metric = ttk.Combobox(
self.vis_option_3_frame,
width=40,
state="readonly",
postcommand=change_visualisation_metric_option,
values='')
self.select_metric.pack(
anchor='nw',
pady=5,
padx=10)
self.status_message_vis = tk.StringVar()
self.status_message_vis.set('')
def validate_visualisation_options(target, point, metric, state):
message_list = []
def point_selected():
for time_point in point:
if time_point:
return True
def status_message():
message_string = 'Please select '
for index, message in enumerate(message_list):
if index is not (len(message_list)-1):
message_string += message + ', '
else:
message_string += message
message_string += '!'
self.status_message_vis.set(message_string)
if not target:
message_list.append('target group')
if not point_selected():
message_list.append('time frame')
if not metric or metric == "(No data available)" :
message_list.append('metric')
# only show status message if one or more boxes are not filled in
if not target or not point_selected() or not metric:
status_message()
if not message_list:
sql_target = remap_target_sql(self.select_target_visualisations.get())
sql_metric = self.select_metric.get()
sql = 'select metric_question from metric where target_name = (?) and metric_name = (?)'
retrieve_question = self.data_object.query_with_par(sql, (sql_target, sql_metric))
metric_question = retrieve_question[0][0]
self.survey_question.set('Survey Question: ' + metric_question)
self.status_message_vis.set('')
if state == 'new':
self.data_analysis_object.create_visualisations(self.select_target_visualisations.get(),
[var1.get(), var2.get(), var3.get(),
var4.get()],
self.select_metric.get(),
self.visualisation_frame
)
else:
self.visualisation_frame = self.refresh_vis_tab(self.tab_visualisations, self.visualisation_frame)
self.data_analysis_object.create_visualisations(self.select_target_visualisations.get(),
[var1.get(), var2.get(), var3.get(),
var4.get()],
self.select_metric.get(),
self.visualisation_frame
)
create_visualisations_button = tk.Button(self.vis_option_4_frame, text='Create visualisation',
width=18, height=1,
command=lambda: [validate_visualisation_options(self.select_target_visualisations.get(),
[var1.get(), var2.get(), var3.get(), var4.get()],
self.select_metric.get(), "new"),
create_visualisations_button.configure(command=lambda: [validate_visualisation_options(self.select_target_visualisations.get(),
[var1.get(), var2.get(), var3.get(), var4.get()],
self.select_metric.get(), "updated")])
])
create_visualisations_button.pack(
anchor='nw',
pady=(40,5),
padx=10)
tk.Label(self.vis_option_4_frame,
textvariable=self.status_message_vis,
foreground='red').pack(
anchor='nw',
pady=(10, 5),
padx=10)
# focus on window
window_obj = w.Window()
window_obj.focus_window(self.popup_window)
else:
self.popup_window.deiconify()
def hide_window(self, window):
if window == "popup":
self.popup_window.withdraw()
def refresh_vis_tab(self, main_frame, frame_to_delete):
# empty out widgets in frame
for widget in frame_to_delete.winfo_children():
widget.destroy()
# frame_to_delete.destroy()
#
# self.visualisation_frame = ttk.Frame(main_frame, width=1000, height=300)
#
# self.visualisation_frame.pack(side="bottom",
# pady=10, padx=(100, 20))
return frame_to_delete
class EvaluationScreen(tk.Frame):
def __init__(self):
tk.Frame.__init__(self)
global popup_window_metrics
self.popup_window_metrics = None
frame_steps_4 = ttk.LabelFrame(self, text="Phase 4 Checklist",
width=600,
height=200,
style="Doc.TLabelframe")
frame_steps_4.pack(padx=(10, 0), pady=10,
side="right", fill="both", expand=True)
for step in c.MethodSteps.phase_4:
if step.startswith(('1', '2', '3', '4', '5')):
tk.Label(frame_steps_4,
text=step).grid(sticky='w', padx=5, pady=(10, 0))
else:
tk.Label(frame_steps_4,
text=step, justify="left").grid(sticky='w', padx=(20, 10), pady=0)
# make object
self.impact_evaluation = w.ImpactEvaluation(self)
frame_project_docs = ttk.LabelFrame(self, text="View help documentation",
width=700,
height=100,
style="Doc.TLabelframe")
frame_project_docs.pack(padx=(10, 0), pady = (10, 0),
side="top", anchor='w')
tk.Button(frame_project_docs,
text='4.1.1 Metrics Results',
width=20, height=c.Size.button_height,
command = lambda: [webbrowser.open(c.PdfFiles.metric_results)]).pack(padx=(10, 0), pady=(5, 15),
side='left',
anchor='nw',
)
tk.Button(frame_project_docs,
text='4.1.2 Evaluation',
width=20, height=c.Size.button_height,
command=lambda: [webbrowser.open(c.PdfFiles.evaluation)]).pack(padx=(10, 0), pady=(5, 15),
side='left',
anchor='nw',
)
tk.Label(frame_project_docs,
text=" " * 130).pack()
self.frame_main_evaluate= ttk.LabelFrame(self, text="4.1 Evaluate metric results, goals and targets",
width=720, height=720)
self.frame_main_evaluate.pack(padx=(10, 0), pady = (10, 0),
side="top", fill= "both", expand=True)
tk.Label(self.frame_main_evaluate,
text=" " * 237).pack()
self.scrollable_labelframe = w.ScrollableFrame(self.frame_main_evaluate)
show_results_button = tk.Button(self.scrollable_labelframe.scrollable_frame,
text='Show metric results',
width=20,
command=lambda: [self.check_if_data_loaded(0), self.check_if_data_loaded(1)])
show_results_button.pack(padx=(10, 0), pady=(10,5),
side='top',
anchor='nw',
)
# convert to string var and set init text
self.status_message_show_metrics = tk.StringVar()
self.status_message_show_metrics.set("")
tk.Label(self.scrollable_labelframe.scrollable_frame,
textvariable=self.status_message_show_metrics,
foreground='red').pack(padx=(10, 0),
side='top',
anchor='nw')
# --------------
# metric evaluation
tk.Label(self.scrollable_labelframe.scrollable_frame,
text='Metric evaluation',
font='Helvetica 12').pack(padx=(10, 0),
side='top',
anchor='nw')
# create frame for entry box
self.frame_entry_box_metric = ttk.Frame(self.scrollable_labelframe.scrollable_frame,
width=20, height=20)
self.frame_entry_box_metric.pack(padx=(10, 0), pady=5,
side='top',
anchor='nw')
self.entry_box_metric = ScrolledText(self.frame_entry_box_metric,
width=c.Size.txt_box_width,
height=c.Size.txt_box_height)
self.entry_box_metric.pack(padx=(10, 0), pady=5, side='top', anchor='nw')
# -----------------
# Target and goal evaluation
tk.Label(self.scrollable_labelframe.scrollable_frame,
text='Target and goal evaluation',
font='Helvetica 12').pack(padx=(10, 0),
side='top',
anchor='nw')
# create frame for entry box
self.frame_entry_box_target = ttk.Frame(self.scrollable_labelframe.scrollable_frame,
width=20, height=20)
self.frame_entry_box_target.pack(padx=(10, 0), pady=5,
side='top',
anchor='nw')
self.entry_box_target = ScrolledText(self.frame_entry_box_target,
width=c.Size.txt_box_width,
height=c.Size.txt_box_height)
self.entry_box_target.pack(padx=(10, 0), pady=5, side='top', anchor='nw')
# -----------------
# Evaluation questions
tk.Label(self.scrollable_labelframe.scrollable_frame,
text='Evaluation questions',
font='Helvetica 12 ').pack(padx=(10, 0), pady=(30,5),
side='top',
anchor='nw')
# --- Q1
tk.Label(self.scrollable_labelframe.scrollable_frame,
text='Q1 - Is the impact desirable?').pack(padx=(10, 0),
side='top',
anchor='nw')
# create frame for entry box
self.frame_entry_box_question_1 = ttk.Frame(self.scrollable_labelframe.scrollable_frame,
width=20, height=20)
self.frame_entry_box_question_1.pack(padx=(10, 0), pady=5,
side='top',
anchor='nw')
self.entry_box_question_1 = ScrolledText(self.frame_entry_box_question_1,
width=c.Size.txt_box_width,
height=c.Size.txt_box_height)
self.entry_box_question_1.pack(padx=(10, 0), pady=5, side='top', anchor='nw')
# --- Q2
tk.Label(self.scrollable_labelframe.scrollable_frame,
text='Q2 - What is the time of the impact? (Short/long term?)').pack(padx=(10, 0),
side='top',
anchor='nw')
self.frame_entry_box_question_2 = ttk.Frame(self.scrollable_labelframe.scrollable_frame,
width=c.Size.txt_box_width,
height=c.Size.txt_box_height)
self.frame_entry_box_question_2.pack(padx=(10, 0), pady=5,
side='top',
anchor='nw')
self.entry_box_question_2 = ScrolledText(self.frame_entry_box_question_2,
width=c.Size.txt_box_width,
height=c.Size.txt_box_height)
self.entry_box_question_2.pack(padx=(10, 0), pady=5, side='top', anchor='nw')
# --- Q3
tk.Label(self.scrollable_labelframe.scrollable_frame,
text='Q3 - Is the impact sustainable over time?').pack(padx=(10, 0),
side='top',
anchor='nw')
self.frame_entry_box_question_3 = ttk.Frame(self.scrollable_labelframe.scrollable_frame,
width=20, height=20)
self.frame_entry_box_question_3.pack(padx=(10, 0), pady=5,
side='top',
anchor='nw')
self.entry_box_question_3 = ScrolledText(self.frame_entry_box_question_3,
width=c.Size.txt_box_width,
height=c.Size.txt_box_height)
self.entry_box_question_3.pack(padx=(10, 0), pady=5, side='top', anchor='nw')
# --- Q4
tk.Label(self.scrollable_labelframe.scrollable_frame,
text='Q4 - What is the severity of the impact?').pack(padx=(10, 0),
side='top',
anchor='nw')
self.frame_entry_box_question_4 = ttk.Frame(self.scrollable_labelframe.scrollable_frame,
width=20, height=20)
self.frame_entry_box_question_4.pack(padx=(10, 0), pady=5,
side='top',
anchor='nw')
self.entry_box_question_4 = ScrolledText(self.frame_entry_box_question_4,
width=c.Size.txt_box_width,
height=c.Size.txt_box_height)
self.entry_box_question_4.pack(padx=(10, 0), pady=5, side='top', anchor='nw')
# --- Q5
tk.Label(self.scrollable_labelframe.scrollable_frame,
text='Q5 - What is the number of beneficiaries of the impact?').pack(padx=(10, 0),
side='top',
anchor='nw')
self.frame_entry_box_question_5 = ttk.Frame(self.scrollable_labelframe.scrollable_frame,
width=20, height=20)
self.frame_entry_box_question_5.pack(padx=(10, 0), pady=5,
side='top',
anchor='nw')
self.entry_box_question_5 = ScrolledText(self.frame_entry_box_question_5,
width=c.Size.txt_box_width,
height=c.Size.txt_box_height)
self.entry_box_question_5.pack(padx=(10, 0), pady=5, side='top', anchor='nw')
# --- Q6
tk.Label(self.scrollable_labelframe.scrollable_frame,
text='Q6 - What is the level of impact on different individuals in the community?').pack(padx=(10, 0),
side='top',
anchor='nw')
self.frame_entry_box_question_6 = ttk.Frame(self.scrollable_labelframe.scrollable_frame,
width=20, height=20)
self.frame_entry_box_question_6.pack(padx=(10, 0), pady=5,
side='top',
anchor='nw')
self.entry_box_question_6 = ScrolledText(self.frame_entry_box_question_6,
width=c.Size.txt_box_width,
height=c.Size.txt_box_height)
self.entry_box_question_6.pack(padx=(10, 0), pady=5, side='top', anchor='nw')
# --- Q7
tk.Label(self.scrollable_labelframe.scrollable_frame,
text='Q7 - Is the impact in line with the goals of the development project?').pack(padx=(10, 0),
side='top',
anchor='nw')
self.frame_entry_box_question_7 = ttk.Frame(self.scrollable_labelframe.scrollable_frame,
width=20, height=20)
self.frame_entry_box_question_7.pack(padx=(10, 0), pady=5,
side='top',
anchor='nw')
self.entry_box_question_7 = ScrolledText(self.frame_entry_box_question_7,
width=c.Size.txt_box_width,
height=c.Size.txt_box_height)
self.entry_box_question_7.pack(padx=(10, 0), pady=5, side='top', anchor='nw')
# --- pack scrollable frame
self.scrollable_labelframe.pack(side="left", fill="both", expand=True)
def check_if_data_loaded(self, update):
if update:
self.impact_evaluation.refresh_tree()
# check if database has entries
sql = "SELECT DISTINCT metric_id FROM metric_value"
retrieve_sql = self.data_object.query_no_par(sql)
if retrieve_sql:
self.metric_results_window()
self.status_message_show_metrics.set('')
else:
self.status_message_show_metrics.set('Please load data first!')
def metric_results_window(self):
if not self.popup_window_metrics:
# create pop up window
self.popup_window_metrics = tk.Toplevel()
width = 1280
height = 720
position_left = 150
position_right = 150
self.popup_window_metrics.geometry("{}x{}+{}+{}".format(width, height, position_left, position_right))
# set size window fixed
# self.popup_window_metrics.resizable(0, 0)
self.popup_window_metrics.wm_title('Overview of metric results, goals and targets')
# hide window if closed
self.popup_window_metrics.protocol("WM_DELETE_WINDOW", lambda arg='popup': self.hide_window(arg))
# add refresh button
tk.Button(self.popup_window_metrics,
text='Refresh',
width=20, height=c.Size.button_height,
command=self.impact_evaluation.refresh_tree).pack(pady=(10,0))
self.impact_evaluation.create_treeview(self.popup_window_metrics)
# focus on window
window_obj = w.Window()
window_obj.focus_window(self.popup_window_metrics)
else:
self.popup_window_metrics.deiconify()
def send_data_object(self, data):
self.data_object = data
self.impact_evaluation.get_data_object(self.data_object)
def save_data(self):
self.user_input_objects = [self.entry_box_metric.get('1.0', 'end-1c'),
self.entry_box_target.get('1.0', 'end-1c'),
self.entry_box_question_1.get('1.0', 'end-1c'),
self.entry_box_question_2.get('1.0', 'end-1c'),
self.entry_box_question_3.get('1.0', 'end-1c'),
self.entry_box_question_4.get('1.0', 'end-1c'),
self.entry_box_question_5.get('1.0', 'end-1c'),
self.entry_box_question_6.get('1.0', 'end-1c'),
self.entry_box_question_7.get('1.0', 'end-1c')]
self.save_file_object.get_impact_evaluation(self.user_input_objects)
def send_save_file_object(self, data):
self.save_file_object = data
def hide_window(self, window):
if window == "popup":
self.popup_window_metrics.withdraw()
def restore_from_save_file(self):
self.entry_box_metric.insert('1.0', self.save_file_object.data['metric_evaluation'])
self.entry_box_target.insert('1.0', self.save_file_object.data['target_evaluation'])
self.entry_box_question_1.insert('1.0', self.save_file_object.data['eval_question_1'])
self.entry_box_question_2.insert('1.0', self.save_file_object.data['eval_question_2'])
self.entry_box_question_3.insert('1.0', self.save_file_object.data['eval_question_3'])
self.entry_box_question_4.insert('1.0', self.save_file_object.data['eval_question_4'])
self.entry_box_question_5.insert('1.0', self.save_file_object.data['eval_question_5'])
self.entry_box_question_6.insert('1.0', self.save_file_object.data['eval_question_6'])
self.entry_box_question_7.insert('1.0', self.save_file_object.data['eval_question_7'])
| 47.638542 | 188 | 0.466033 |
4317303d48d6875960d8793e612ca745361d9f64 | 1,678 | py | Python | var/spack/repos/builtin/packages/cereal/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2018-11-27T03:39:44.000Z | 2021-09-06T15:50:35.000Z | var/spack/repos/builtin/packages/cereal/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2019-01-11T20:11:52.000Z | 2019-01-11T20:11:52.000Z | var/spack/repos/builtin/packages/cereal/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2020-10-14T14:20:17.000Z | 2020-10-14T14:20:17.000Z | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Cereal(CMakePackage):
"""cereal is a header-only C++11 serialization library. cereal takes
arbitrary data types and reversibly turns them into different
representations, such as compact binary encodings, XML, or
JSON. cereal was designed to be fast, light-weight, and easy to
extend - it has no external dependencies and can be easily bundled
with other code or used standalone.
"""
homepage = "http://uscilab.github.io/cereal/"
url = "https://github.com/USCiLab/cereal/archive/v1.1.2.tar.gz"
version('1.2.2', '4c56c7b9499dba79404250ef9a040481')
version('1.2.1', '64476ed74c19068ee543b53ad3992261')
version('1.2.0', 'e372c9814696481dbdb7d500e1410d2b')
version('1.1.2', '34d4ad174acbff005c36d4d10e48cbb9')
version('1.1.1', '0ceff308c38f37d5b5f6df3927451c27')
version('1.1.0', '9f2d5f72e935c54f4c6d23e954ce699f')
version('1.0.0', 'd1bacca70a95cec0ddbff68b0871296b')
version('0.9.1', '8872d4444ff274ce6cd1ed364d0fc0ad')
patch("Boost.patch")
patch("Boost2.patch", when="@1.2.2:")
patch("pointers.patch")
depends_on('cmake@2.6.2:', type='build')
def cmake_args(self):
# Boost is only used for self-tests, which we are not running (yet?)
return [
'-DCMAKE_DISABLE_FIND_PACKAGE_Boost=TRUE',
'-DSKIP_PORTABILITY_TEST=TRUE',
'-DJUST_INSTALL_CEREAL=On',
'-DWITH_WERROR=Off',
]
| 38.136364 | 76 | 0.684744 |
31394ab41a95cb1f416b7464d4bf7ef3df74943f | 3,818 | py | Python | assignment4/run_experiment.py | fbrubacher/assignment23 | 007e3d54a58f1654137c177af6095ce64724471c | [
"MIT"
] | 148 | 2018-12-18T21:14:04.000Z | 2022-03-04T09:13:21.000Z | assignment4/run_experiment.py | fbrubacher/assignment23 | 007e3d54a58f1654137c177af6095ce64724471c | [
"MIT"
] | 22 | 2019-01-20T00:11:06.000Z | 2021-05-01T17:21:58.000Z | assignment4/run_experiment.py | fbrubacher/assignment23 | 007e3d54a58f1654137c177af6095ce64724471c | [
"MIT"
] | 172 | 2019-01-09T06:01:54.000Z | 2022-03-25T22:53:19.000Z | import argparse
from datetime import datetime
import logging
import random as rand
import numpy as np
import environments
import experiments
from experiments import plotting
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def run_experiment(experiment_detals, experiment, timing_key, verbose, timings):
t = datetime.now()
for details in experiment_detals:
logger.info("Running {} experiment: {}".format(timing_key, details.env_readable_name))
exp = experiment(details, verbose=verbose)
exp.perform()
t_d = datetime.now() - t
timings[timing_key] = t_d.seconds
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run MDP experiments')
parser.add_argument('--threads', type=int, default=1, help='Number of threads (defaults to 1, -1 for auto)')
parser.add_argument('--seed', type=int, help='A random seed to set, if desired')
parser.add_argument('--policy', action='store_true', help='Run the policy iteration experiment')
parser.add_argument('--value', action='store_true', help='Run the value iteration experiment')
parser.add_argument('--q', action='store_true', help='Run the Q-Learner experiment')
parser.add_argument('--all', action='store_true', help='Run all experiments')
parser.add_argument('--plot', action='store_true', help='Plot data results')
parser.add_argument('--verbose', action='store_true', help='If true, provide verbose output')
args = parser.parse_args()
verbose = args.verbose
threads = args.threads
seed = args.seed
if seed is None:
seed = np.random.randint(0, (2 ** 31) - 1) # The openAI gym code does not like unit64
logger.info("Using seed {}".format(seed))
np.random.seed(seed)
rand.seed(seed)
logger.info("Creating MDPs")
logger.info("----------")
envs = [
{
# This is not really a rewarding frozen lake env, but the custom class has extra functionality
'env': environments.get_rewarding_no_reward_frozen_lake_environment(),
'name': 'frozen_lake',
'readable_name': 'Frozen Lake (8x8)',
},
{
'env': environments.get_large_rewarding_no_reward_frozen_lake_environment(),
'name': 'large_frozen_lake',
'readable_name': 'Frozen Lake (20x20)',
},
{
'env': environments.get_windy_cliff_walking_environment(),
'name': 'cliff_walking',
'readable_name': 'Cliff Walking (4x12)',
}
]
experiment_details = []
for env in envs:
env['env'].seed(seed)
logger.info('{}: State space: {}, Action space: {}'.format(env['readable_name'], env['env'].unwrapped.nS,
env['env'].unwrapped.nA))
experiment_details.append(experiments.ExperimentDetails(
env['env'], env['name'], env['readable_name'],
threads=threads,
seed=seed
))
if verbose:
logger.info("----------")
logger.info("Running experiments")
timings = {}
if args.policy or args.all:
run_experiment(experiment_details, experiments.PolicyIterationExperiment, 'PI', verbose, timings)
if args.value or args.all:
run_experiment(experiment_details, experiments.ValueIterationExperiment, 'VI', verbose, timings)
if args.q or args.all:
run_experiment(experiment_details, experiments.QLearnerExperiment, 'Q', verbose, timings)
logger.info(timings)
if args.plot:
if verbose:
logger.info("----------")
logger.info("Plotting results")
plotting.plot_results(envs)
| 36.361905 | 113 | 0.63934 |
ffdde4590b7ca7db6d5b6ca73cc0f5e2cb629a0b | 1,451 | py | Python | tests/test_config.py | iliadmitriev/auth-fapi | a219efd5a8daa9d5f876355cde6b974021ca7890 | [
"MIT"
] | null | null | null | tests/test_config.py | iliadmitriev/auth-fapi | a219efd5a8daa9d5f876355cde6b974021ca7890 | [
"MIT"
] | 131 | 2021-09-29T06:07:18.000Z | 2022-03-31T08:29:30.000Z | tests/test_config.py | iliadmitriev/auth-fapi | a219efd5a8daa9d5f876355cde6b974021ca7890 | [
"MIT"
] | null | null | null | import importlib
import os
from unittest import mock
def test_import_config():
with mock.patch.dict(os.environ, {
'DATABASE_NAME': 'database',
'DATABASE_USER': 'user',
'DATABASE_PASSWORD': 'pass',
'DATABASE_HOST': 'host',
'DATABASE_PORT': '5432',
'DATABASE_DRIVER': 'postgresql+asyncpg'
}):
from config import connection
importlib.reload(connection)
assert connection.DATABASE_NAME == 'database'
assert connection.DATABASE_USER == 'user'
assert connection.DATABASE_PASSWORD == 'pass'
assert connection.DATABASE_HOST == 'host'
assert connection.DATABASE_PORT == '5432'
assert connection.DATABASE_DRIVER == 'postgresql+asyncpg'
def test_db_url():
with mock.patch.dict(os.environ, {
'DATABASE_NAME': 'database',
'DATABASE_USER': 'user',
'DATABASE_PASSWORD': 'pass',
'DATABASE_HOST': 'host',
'DATABASE_PORT': '5432',
'DATABASE_DRIVER': 'postgresql+asyncpg'
}):
from config import connection
importlib.reload(connection)
assert connection.DATABASE_URL == 'postgresql+asyncpg://user:pass@host:5432/database'
def test_redis_url():
with mock.patch.dict(os.environ, {
'REDIS_URL': 'redis_secret_url',
}):
from config import connection
importlib.reload(connection)
assert connection.REDIS_URL == 'redis_secret_url'
| 31.543478 | 93 | 0.643694 |
bca1d723a2e0d6ef4c2039b822cce2b0b260f383 | 376 | py | Python | asyncfileserver/model/confirm_command.py | tarc/echo_server | 3517778b97f13bce47ef8c7fb919e9f0925f4784 | [
"MIT"
] | 1 | 2021-05-02T03:49:20.000Z | 2021-05-02T03:49:20.000Z | asyncfileserver/model/confirm_command.py | tarc/echo_server | 3517778b97f13bce47ef8c7fb919e9f0925f4784 | [
"MIT"
] | 20 | 2020-03-30T00:00:27.000Z | 2022-01-21T19:12:16.000Z | asyncfileserver/model/confirm_command.py | tarc/echo_server | 3517778b97f13bce47ef8c7fb919e9f0925f4784 | [
"MIT"
] | null | null | null | import re
class ConfirmCommand(object):
_pattern = re.compile(b'\W')
def __init__(self, data: bytearray):
self._data = self._format(data)
def go_on(self):
return self._data == b"C"
def yes(self):
return self._data == b"Y"
def _format(self, data):
upper = data.upper()
return re.sub(self._pattern, b'', upper)
| 19.789474 | 49 | 0.587766 |
9e6efbdb1ba8e3dda89b4b6c878ff73bbca83450 | 1,389 | py | Python | setup.py | miesli/mopidy-mqtt | 5442d3b371f225c4fc981bb095485d6639bea46f | [
"Apache-2.0"
] | 16 | 2017-11-19T13:54:20.000Z | 2021-02-02T09:52:02.000Z | setup.py | miesli/mopidy-mqtt | 5442d3b371f225c4fc981bb095485d6639bea46f | [
"Apache-2.0"
] | 7 | 2017-11-21T14:43:37.000Z | 2020-09-20T08:46:52.000Z | setup.py | miesli/mopidy-mqtt | 5442d3b371f225c4fc981bb095485d6639bea46f | [
"Apache-2.0"
] | 16 | 2017-08-30T20:30:22.000Z | 2021-01-15T21:54:28.000Z | # future imports
from __future__ import unicode_literals
# stdlib imports
import re
from setuptools import find_packages
from setuptools import setup
def get_version(filename):
content = open(filename).read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", content))
return metadata['version']
setup(
name='Mopidy-MQTT',
version=get_version('mopidy_mqtt/__init__.py'),
url='',
license='Apache License, Version 2.0',
author='magcode',
author_email='',
description='Mopidy extension that sends POSTs to Openhab',
long_description=open('README.md').read(),
packages=find_packages(exclude=['tests', 'tests.*']),
zip_safe=False,
include_package_data=True,
install_requires=[
'setuptools',
'Mopidy >= 0.18',
'Pykka >= 1.1',
'paho-mqtt'
],
test_suite='nose.collector',
tests_require=[
'nose',
'mock >= 1.0',
],
entry_points={
'mopidy.ext': [
'mqtthook = mopidy_mqtt:Extension',
],
},
classifiers=[
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Multimedia :: Sound/Audio :: Players',
],
)
| 26.207547 | 68 | 0.605472 |
0fde0a78b7c99fb2fe6c5facff75c3300ddc00ab | 10,812 | py | Python | fluid/PaddleCV/object_detection/main_quant.py | JiabinYang/models | 2c4a77ed79a18844a38b0bf19d6a92d16ce7eda3 | [
"Apache-2.0"
] | null | null | null | fluid/PaddleCV/object_detection/main_quant.py | JiabinYang/models | 2c4a77ed79a18844a38b0bf19d6a92d16ce7eda3 | [
"Apache-2.0"
] | null | null | null | fluid/PaddleCV/object_detection/main_quant.py | JiabinYang/models | 2c4a77ed79a18844a38b0bf19d6a92d16ce7eda3 | [
"Apache-2.0"
] | null | null | null | import os
import time
import numpy as np
import argparse
import functools
import shutil
import math
import paddle
import paddle.fluid as fluid
import reader
from mobilenet_ssd import mobile_net
from utility import add_arguments, print_arguments
from train import build_program
from train import train_parameters
from infer import draw_bounding_box_on_image
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('learning_rate', float, 0.0001, "Learning rate.")
add_arg('batch_size', int, 64, "Minibatch size.")
add_arg('epoc_num', int, 20, "Epoch number.")
add_arg('use_gpu', bool, True, "Whether use GPU.")
add_arg('parallel', bool, True, "Whether train in parallel on multi-devices.")
add_arg('model_save_dir', str, 'quant_model', "The path to save model.")
add_arg('init_model', str, 'ssd_mobilenet_v1_pascalvoc', "The init model path.")
add_arg('ap_version', str, '11point', "mAP version can be integral or 11point.")
add_arg('image_shape', str, '3,300,300', "Input image shape.")
add_arg('mean_BGR', str, '127.5,127.5,127.5', "Mean value for B,G,R channel which will be subtracted.")
add_arg('lr_epochs', str, '30,60', "The learning decay steps.")
add_arg('lr_decay_rates', str, '1,0.1,0.01', "The learning decay rates for each step.")
add_arg('data_dir', str, 'data/pascalvoc', "Data directory")
add_arg('act_quant_type', str, 'abs_max', "Quantize type of activation, whicn can be abs_max or range_abs_max")
add_arg('image_path', str, '', "The image used to inference and visualize.")
add_arg('confs_threshold', float, 0.5, "Confidence threshold to draw bbox.")
add_arg('mode', str, 'train', "Job mode can be one of ['train', 'test', 'infer'].")
#yapf: enable
def test(exe, test_prog, map_eval, test_py_reader):
_, accum_map = map_eval.get_map_var()
map_eval.reset(exe)
test_py_reader.start()
try:
batch = 0
while True:
test_map, = exe.run(test_prog, fetch_list=[accum_map])
if batch % 10 == 0:
print("Batch {0}, map {1}".format(batch, test_map))
batch += 1
except fluid.core.EOFException:
test_py_reader.reset()
finally:
test_py_reader.reset()
print("Test map {0}".format(test_map))
return test_map
def save_model(exe, main_prog, model_save_dir, postfix):
model_path = os.path.join(model_save_dir, postfix)
if os.path.isdir(model_path):
shutil.rmtree(model_path)
fluid.io.save_persistables(exe, model_path, main_program=main_prog)
def train(args,
data_args,
train_params,
train_file_list,
val_file_list):
model_save_dir = args.model_save_dir
init_model = args.init_model
epoc_num = args.epoc_num
use_gpu = args.use_gpu
parallel = args.parallel
is_shuffle = True
act_quant_type = args.act_quant_type
if use_gpu:
devices_num = fluid.core.get_cuda_device_count()
else:
devices_num = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
batch_size = train_params['batch_size']
batch_size_per_device = batch_size // devices_num
iters_per_epoc = train_params["train_images"] // batch_size
num_workers = 4
startup_prog = fluid.Program()
train_prog = fluid.Program()
test_prog = fluid.Program()
train_py_reader, loss = build_program(
main_prog=train_prog,
startup_prog=startup_prog,
train_params=train_params,
is_train=True)
test_py_reader, map_eval, _, _ = build_program(
main_prog=test_prog,
startup_prog=startup_prog,
train_params=train_params,
is_train=False)
test_prog = test_prog.clone(for_test=True)
transpiler = fluid.contrib.QuantizeTranspiler(weight_bits=8,
activation_bits=8,
activation_quantize_type=act_quant_type,
weight_quantize_type='abs_max')
transpiler.training_transpile(train_prog, startup_prog)
transpiler.training_transpile(test_prog, startup_prog)
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_prog)
if init_model:
print('Load init model %s.' % init_model)
def if_exist(var):
return os.path.exists(os.path.join(init_model, var.name))
fluid.io.load_vars(exe, init_model, main_program=train_prog,
predicate=if_exist)
else:
print('There is no init model.')
if parallel:
train_exe = fluid.ParallelExecutor(main_program=train_prog,
use_cuda=use_gpu, loss_name=loss.name)
train_reader = reader.train(data_args,
train_file_list,
batch_size_per_device,
shuffle=is_shuffle,
use_multiprocessing=True,
num_workers=num_workers,
max_queue=24)
test_reader = reader.test(data_args, val_file_list, batch_size)
train_py_reader.decorate_paddle_reader(train_reader)
test_py_reader.decorate_paddle_reader(test_reader)
train_py_reader.start()
best_map = 0.
try:
for epoc in range(epoc_num):
if epoc == 0:
# test quantized model without quantization-aware training.
test_map = test(exe, test_prog, map_eval, test_py_reader)
# train
for batch in range(iters_per_epoc):
start_time = time.time()
if parallel:
outs = train_exe.run(fetch_list=[loss.name])
else:
outs = exe.run(train_prog, fetch_list=[loss])
end_time = time.time()
avg_loss = np.mean(np.array(outs[0]))
if batch % 20 == 0:
print("Epoc {:d}, batch {:d}, loss {:.6f}, time {:.5f}".format(
epoc , batch, avg_loss, end_time - start_time))
end_time = time.time()
test_map = test(exe, test_prog, map_eval, test_py_reader)
save_model(exe, train_prog, model_save_dir, str(epoc))
if test_map > best_map:
best_map = test_map
save_model(exe, train_prog, model_save_dir, 'best_map')
print("Best test map {0}".format(best_map))
except (fluid.core.EOFException, StopIteration):
train_py_reader.reset()
def eval(args, data_args, configs, val_file_list):
init_model = args.init_model
use_gpu = args.use_gpu
act_quant_type = args.act_quant_type
model_save_dir = args.model_save_dir
batch_size = configs['batch_size']
batch_size_per_device = batch_size
startup_prog = fluid.Program()
test_prog = fluid.Program()
test_py_reader, map_eval, nmsed_out, image = build_program(
main_prog=test_prog,
startup_prog=startup_prog,
train_params=configs,
is_train=False)
test_prog = test_prog.clone(for_test=True)
transpiler = fluid.contrib.QuantizeTranspiler(weight_bits=8,
activation_bits=8,
activation_quantize_type=act_quant_type,
weight_quantize_type='abs_max')
transpiler.training_transpile(test_prog, startup_prog)
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_prog)
def if_exist(var):
return os.path.exists(os.path.join(init_model, var.name))
fluid.io.load_vars(exe, init_model, main_program=test_prog,
predicate=if_exist)
# freeze after load parameters
transpiler.freeze_program(test_prog, place)
test_reader = reader.test(data_args, val_file_list, batch_size)
test_py_reader.decorate_paddle_reader(test_reader)
test_map = test(exe, test_prog, map_eval, test_py_reader)
print("Test model {0}, map {1}".format(init_model, test_map))
fluid.io.save_inference_model(model_save_dir, [image.name],
[nmsed_out], exe, test_prog)
def infer(args, data_args):
model_dir = args.init_model
image_path = args.image_path
confs_threshold = args.confs_threshold
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
[inference_program, feed , fetch] = fluid.io.load_inference_model(
dirname=model_dir,
executor=exe,
model_filename='__model__')
#print(np.array(fluid.global_scope().find_var('conv2d_20.w_0').get_tensor()))
#print(np.max(np.array(fluid.global_scope().find_var('conv2d_20.w_0').get_tensor())))
infer_reader = reader.infer(data_args, image_path)
data = infer_reader()
data = data.reshape((1,) + data.shape)
outs = exe.run(inference_program,
feed={feed[0]: data},
fetch_list=fetch,
return_numpy=False)
out = np.array(outs[0])
draw_bounding_box_on_image(image_path, out, confs_threshold,
data_args.label_list)
if __name__ == '__main__':
args = parser.parse_args()
print_arguments(args)
# for pascalvoc
label_file = 'label_list'
train_list = 'trainval.txt'
val_list = 'test.txt'
dataset = 'pascalvoc'
mean_BGR = [float(m) for m in args.mean_BGR.split(",")]
image_shape = [int(m) for m in args.image_shape.split(",")]
lr_epochs = [int(m) for m in args.lr_epochs.split(",")]
lr_rates = [float(m) for m in args.lr_decay_rates.split(",")]
train_parameters[dataset]['image_shape'] = image_shape
train_parameters[dataset]['batch_size'] = args.batch_size
train_parameters[dataset]['lr'] = args.learning_rate
train_parameters[dataset]['epoc_num'] = args.epoc_num
train_parameters[dataset]['ap_version'] = args.ap_version
train_parameters[dataset]['lr_epochs'] = lr_epochs
train_parameters[dataset]['lr_decay'] = lr_rates
data_args = reader.Settings(
dataset=dataset,
data_dir=args.data_dir,
label_file=label_file,
resize_h=image_shape[1],
resize_w=image_shape[2],
mean_value=mean_BGR,
apply_distort=True,
apply_expand=True,
ap_version = args.ap_version)
if args.mode == 'train':
train(args, data_args, train_parameters[dataset], train_list, val_list)
elif args.mode == 'test':
eval(args, data_args, train_parameters[dataset], val_list)
else:
infer(args, data_args)
| 38.340426 | 125 | 0.641602 |
6f99682ff9425be9de7808f408037d8f53e75868 | 281 | py | Python | detector/utils/bbox/setup.py | AnandK27/English_OCR | 136e4ccf69f8162787f5002c1f989c76f8785217 | [
"MIT"
] | null | null | null | detector/utils/bbox/setup.py | AnandK27/English_OCR | 136e4ccf69f8162787f5002c1f989c76f8785217 | [
"MIT"
] | null | null | null | detector/utils/bbox/setup.py | AnandK27/English_OCR | 136e4ccf69f8162787f5002c1f989c76f8785217 | [
"MIT"
] | null | null | null | from distutils.core import setup
import setuptools
import numpy as np
from Cython.Build import cythonize
numpy_include = np.get_include()
setup(ext_modules=cythonize("bbox.pyx"), include_dirs=[numpy_include])
setup(ext_modules=cythonize("nms.pyx"), include_dirs=[numpy_include])
| 31.222222 | 70 | 0.811388 |
1c382f14053fc6ec2c7e73bebc1a8bdd0037777f | 333 | py | Python | scripts/5.2.plot_stability.py | nmningmei/agent_models | 8380f1203e6d5a18f18f9adeb6bd36b23b2ae61b | [
"MIT"
] | null | null | null | scripts/5.2.plot_stability.py | nmningmei/agent_models | 8380f1203e6d5a18f18f9adeb6bd36b23b2ae61b | [
"MIT"
] | null | null | null | scripts/5.2.plot_stability.py | nmningmei/agent_models | 8380f1203e6d5a18f18f9adeb6bd36b23b2ae61b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 29 08:37:19 2020
@author: nmei
"""
import os
import gc
from glob import glob
from tqdm import tqdm
import numpy as np
import pandas as pd
import seaborn as sns
working_dir = '../stability'
working_data = glob(os.path.join(working_dir,'*','stability*.npy'))
| 15.136364 | 67 | 0.6997 |
4e29b93460cf8c57db12b873e398cd2f37a73df2 | 1,809 | py | Python | tutorial.py | Harsha470007/PIZZA2 | 73e146af9a233c0b4be13436c2ad566980a0062d | [
"MIT"
] | 1 | 2020-09-30T09:35:45.000Z | 2020-09-30T09:35:45.000Z | tutorial.py | Harsha470007/PIZZA2 | 73e146af9a233c0b4be13436c2ad566980a0062d | [
"MIT"
] | null | null | null | tutorial.py | Harsha470007/PIZZA2 | 73e146af9a233c0b4be13436c2ad566980a0062d | [
"MIT"
] | null | null | null | from pizzapy import Customer, StoreLocator, Order, ConsoleInput
def searchMenu(menu):
print("You are now searching the menu...")
item = input("Type an item to look for: ").strip().lower()
if len(item) > 1:
item = item[0].upper() + item[1:]
print(f"Results for: {item}\n")
menu.search(Name=item)
print()
else:
print("No Results")
def addToOrder(order):
print("Please type the codes of the items you'd like to order...")
print("Press ENTER to stop ordering.")
while True:
item = input("Code: ").upper()
try:
order.add_item(item)
except:
if item == "":
break
print("Invalid Code...")
customer = ConsoleInput.get_new_customer()
my_local_dominos = StoreLocator.find_closest_store_to_customer(customer)
print("\nClosest Store:")
print(my_local_dominos)
ans = input("Would you like to order from this store? (Y/N)")
if ans.lower() not in ["yes", "y"]:
print("Goodbye!")
quit()
print("\nMENU\n")
menu = my_local_dominos.get_menu()
order = Order.begin_customer_order(customer, my_local_dominos)
while True:
searchMenu(menu)
addToOrder(order)
answer = input("Would you like to add more items (y/n)? ")
if answer.lower() not in ["yes", "y"]:
break
total = 0
print("\nYour order is as follows: ")
for item in order.data["Products"]:
price = item["Price"]
print(item["Name"] + " $" + price)
total += float(price)
print("\nYour order total is: $" + str(total) + " + TAX\n")
payment = input("\nWill you be paying CASH or CREDIT CARD? (CASH, CREDIT CARD)")
if payment.lower() in ["card", "credit card"]:
card = ConsoleInput.get_credit_card()
else:
card = False
ans = input("Would you like to place this order? (y/n)")
if ans.lower() in ["y", "yes"]:
order.place(card)
my_local_dominos.place_order(order, card)
print("Order Placed!")
else:
print("Goodbye!")
| 24.445946 | 80 | 0.678275 |
3391eff2fe0f0ea6b285fd4fabf567edc5dfa932 | 949 | py | Python | ReSpider/extend/redis/spider.py | zaoxg/ReSpiderFramework | bd49ffb79867f5cd0eceb5f820498fbbf8611a60 | [
"MIT"
] | 1 | 2022-03-27T15:03:36.000Z | 2022-03-27T15:03:36.000Z | ReSpider/extend/redis/spider.py | zaoxg/ReSpiderFramework | bd49ffb79867f5cd0eceb5f820498fbbf8611a60 | [
"MIT"
] | null | null | null | ReSpider/extend/redis/spider.py | zaoxg/ReSpiderFramework | bd49ffb79867f5cd0eceb5f820498fbbf8611a60 | [
"MIT"
] | null | null | null | import ReSpider.setting as setting
from ReSpider.core.spiders import Crawler
# from ...extend import SettingLoader
class RedisSpider(Crawler):
"""
redis spider
实现一个函数用来解析队列的任务
"""
name = 'redis_spider'
redis_key = None
# settings = SettingLoader.from_crawler()
def __init__(self, redis_key: str = None, **kwargs):
super().__init__()
setting.SCHEDULER = 'ReSpider.extend.redis.scheduler.RedisScheduler'
setting.ALWAYS_RUNNING = True
self.__dict__.update(**kwargs)
if redis_key is None:
raise AttributeError("Init Error")
self.redis_key = redis_key
def make_request(self, **kwargs):
"""
如果使用这个Spider,
意味着将持续运行,
从queue中取出的不一定是Request,
所以需要重写这个function,
来实现自己想要的Request
"""
raise NotImplementedError
def start_requests(self):
pass
def parse(self, response):
pass
| 24.333333 | 76 | 0.632244 |
dd3c974659b437c58ece18079bdbb8679c257fab | 320 | py | Python | challenge_1/python/hkl0902/challenge_1.py | rchicoli/2017-challenges | 44f0b672e5dea34de1dde131b6df837d462f8e29 | [
"Apache-2.0"
] | 271 | 2017-01-01T22:58:36.000Z | 2021-11-28T23:05:29.000Z | challenge_1/python/hkl0902/challenge_1.py | AakashOfficial/2017Challenges | a8f556f1d5b43c099a0394384c8bc2d826f9d287 | [
"Apache-2.0"
] | 283 | 2017-01-01T23:26:05.000Z | 2018-03-23T00:48:55.000Z | challenge_1/python/hkl0902/challenge_1.py | AakashOfficial/2017Challenges | a8f556f1d5b43c099a0394384c8bc2d826f9d287 | [
"Apache-2.0"
] | 311 | 2017-01-01T22:59:23.000Z | 2021-09-23T00:29:12.000Z | import sys
'''
Given a string S, returns the reverse of S
'''
def reverse(s):
r = ""
for i in range(len(s)):
r += s[len(s) - i - 1]
return r
if __name__ == "__main__":
args = sys.argv
if len(args) > 1:
print(reverse(args[1]))
else:
print("Give me string to reverse")
| 15.238095 | 42 | 0.53125 |
a3451ec3e47c95fa42334794a9e5c5191155cec0 | 629 | py | Python | everscan.py | kkerwin1/everscan | 413012ceee22be1580fcf552fbb25def0aed32f0 | [
"BSD-2-Clause"
] | null | null | null | everscan.py | kkerwin1/everscan | 413012ceee22be1580fcf552fbb25def0aed32f0 | [
"BSD-2-Clause"
] | null | null | null | everscan.py | kkerwin1/everscan | 413012ceee22be1580fcf552fbb25def0aed32f0 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python
# everscan/everscan.py
from modules.scanning import ScanningManager
from modules.evernote import EvernoteManager
from modules.interface import InterfaceManager
from modules.imaging import ImagingManager
class EverscanMaster:
"""
Everscan master class.
Facilitates communication between child objects.
"""
def __init__(self):
# Initialize child manager objects.
self.m_scanning = ScanningManager(self)
self.m_evernote = EvernoteManager(self)
self.m_interface = InterfaceManager(self)
self.m_imaging = ImagingManager(self) | 31.45 | 52 | 0.710652 |
db2688e607c06244ffd31b8c5864347ba4c3c0b5 | 3,070 | py | Python | migrations/versions/a22bbe42b9e3_migrate_model_classes.py | Munene19/IP9-Pitch | 840e8f1ecf42f39f0467d875f35402c8eaeb5d28 | [
"MIT"
] | null | null | null | migrations/versions/a22bbe42b9e3_migrate_model_classes.py | Munene19/IP9-Pitch | 840e8f1ecf42f39f0467d875f35402c8eaeb5d28 | [
"MIT"
] | null | null | null | migrations/versions/a22bbe42b9e3_migrate_model_classes.py | Munene19/IP9-Pitch | 840e8f1ecf42f39f0467d875f35402c8eaeb5d28 | [
"MIT"
] | null | null | null | """migrate model classes
Revision ID: a22bbe42b9e3
Revises:
Create Date: 2020-01-17 08:48:32.493810
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a22bbe42b9e3'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('category',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=255), nullable=True),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('pass_secure', sa.String(length=255), nullable=True),
sa.Column('category', sa.Integer(), nullable=True),
sa.Column('bio', sa.String(length=255), nullable=True),
sa.Column('profile_pic_path', sa.String(length=100), nullable=True),
sa.ForeignKeyConstraint(['category'], ['category.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('photoprofiles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('pic_path', sa.String(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('pitches',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=255), nullable=True),
sa.Column('content', sa.String(), nullable=True),
sa.Column('category', sa.Integer(), nullable=True),
sa.Column('vote', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['category'], ['category.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('feedback', sa.String(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('pitch_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['pitch_id'], ['pitches.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('votes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('vote', sa.Integer(), nullable=True),
sa.Column('pitch_id', sa.Integer(), nullable=True),
sa.Column('comment_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['comment_id'], ['comments.id'], ),
sa.ForeignKeyConstraint(['pitch_id'], ['pitches.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('votes')
op.drop_table('comments')
op.drop_table('pitches')
op.drop_table('photoprofiles')
op.drop_table('user')
op.drop_table('category')
# ### end Alembic commands ###
| 36.117647 | 72 | 0.656678 |
43be48b36f185b6ac2c7df31394a8b771e9dc49b | 1,813 | py | Python | moto/cloudwatch/models.py | EvaSDK/moto | 8095f31772fb144f2045c0991f4c8ed17f324b91 | [
"Apache-2.0"
] | null | null | null | moto/cloudwatch/models.py | EvaSDK/moto | 8095f31772fb144f2045c0991f4c8ed17f324b91 | [
"Apache-2.0"
] | null | null | null | moto/cloudwatch/models.py | EvaSDK/moto | 8095f31772fb144f2045c0991f4c8ed17f324b91 | [
"Apache-2.0"
] | null | null | null | from moto.core import BaseBackend
class Dimension(object):
def __init__(self, name, value):
self.name = name
self.value = value
class FakeAlarm(object):
def __init__(self, name, comparison_operator, evaluation_periods, period,
threshold, statistic, description, dimensions, alarm_actions,
ok_actions, insufficient_data_actions, unit):
self.name = name
self.comparison_operator = comparison_operator
self.evaluation_periods = evaluation_periods
self.period = period
self.threshold = threshold
self.statistic = statistic
self.description = description
self.dimensions = [Dimension(dimension['name'], dimension['value']) for dimension in dimensions]
self.alarm_actions = alarm_actions
self.ok_actions = ok_actions
self.insufficient_data_actions = insufficient_data_actions
self.unit = unit
class CloudWatchBackend(BaseBackend):
def __init__(self):
self.alarms = {}
def put_metric_alarm(self, name, comparison_operator, evaluation_periods,
period, threshold, statistic, description, dimensions,
alarm_actions, ok_actions, insufficient_data_actions, unit):
alarm = FakeAlarm(name, comparison_operator, evaluation_periods, period,
threshold, statistic, description, dimensions, alarm_actions,
ok_actions, insufficient_data_actions, unit)
self.alarms[name] = alarm
return alarm
def get_all_alarms(self):
return self.alarms.values()
def delete_alarms(self, alarm_names):
for alarm_name in alarm_names:
self.alarms.pop(alarm_name, None)
cloudwatch_backend = CloudWatchBackend()
| 35.54902 | 104 | 0.666851 |
1bdb747f5554aba5bb0d63b52d398e4de9694503 | 1,414 | py | Python | experiments/point_cloud/plot_data.py | alisiahkoohi/survae_flows | e1747b05524c7ab540a211ed360ab3e67bc3e96d | [
"MIT"
] | 262 | 2020-07-05T20:57:44.000Z | 2022-03-28T02:24:43.000Z | experiments/point_cloud/plot_data.py | alisiahkoohi/survae_flows | e1747b05524c7ab540a211ed360ab3e67bc3e96d | [
"MIT"
] | 17 | 2020-08-15T05:43:34.000Z | 2022-01-31T12:24:21.000Z | experiments/point_cloud/plot_data.py | alisiahkoohi/survae_flows | e1747b05524c7ab540a211ed360ab3e67bc3e96d | [
"MIT"
] | 35 | 2020-08-24T06:55:37.000Z | 2022-02-11T05:17:58.000Z | import torch
import argparse
# Plot
import matplotlib.pyplot as plt
# Data
from data import get_data, dataset_choices
###########
## Setup ##
###########
parser = argparse.ArgumentParser()
# Data params
parser.add_argument('--dataset', type=str, default='spatial_mnist', choices=dataset_choices)
# Plotting params
parser.add_argument('--rowcol', type=int, default=8)
parser.add_argument('--pixels', type=int, default=1000)
parser.add_argument('--dpi', type=int, default=96)
# Check the DPI of your monitor at: https://www.infobyip.com/detectmonitordpi.php
args = parser.parse_args()
args.batch_size = args.rowcol**2
torch.manual_seed(0)
##################
## Specify data ##
##################
train_loader, valid_loader, test_loader = get_data(args)
x = next(iter(test_loader)).numpy()
##############
## Sampling ##
##############
if args.dataset in {'spatial_mnist'}:
bounds = [[0, 28], [0, 28]]
else:
raise NotImplementedError()
fig, ax = plt.subplots(args.rowcol,args.rowcol, figsize=(args.pixels/args.dpi, args.pixels/args.dpi), dpi=args.dpi)
for i in range(args.rowcol):
for j in range(args.rowcol):
idx = i+args.rowcol*j
ax[i][j].scatter(x[idx,:,0], x[idx,:,1])
ax[i][j].set_xlim(bounds[0])
ax[i][j].set_ylim(bounds[1])
ax[i][j].axis('off')
plt.savefig('figures/{}.png'.format(args.dataset), bbox_inches = 'tight', pad_inches = 0)
plt.show()
| 25.25 | 115 | 0.647808 |
98501d245fd73160e50418925d438d1cfd3a1dcd | 305 | py | Python | 2017/06/diarrheal-deaths-20170615/graphic_config.py | nprapps/graphics-archive | 97b0ef326b46a959df930f5522d325e537f7a655 | [
"FSFAP"
] | 14 | 2015-05-08T13:41:51.000Z | 2021-02-24T12:34:55.000Z | 2017/06/diarrheal-deaths-20170615/graphic_config.py | nprapps/graphics-archive | 97b0ef326b46a959df930f5522d325e537f7a655 | [
"FSFAP"
] | null | null | null | 2017/06/diarrheal-deaths-20170615/graphic_config.py | nprapps/graphics-archive | 97b0ef326b46a959df930f5522d325e537f7a655 | [
"FSFAP"
] | 7 | 2015-04-04T04:45:54.000Z | 2021-02-18T11:12:48.000Z | #!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '1Oy7rPtd7qSWXhiUiTytI3OLjZsqO391tepFJXcU7Hzs'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
| 21.785714 | 77 | 0.819672 |
29c1b4fa9862f494c36e34d77f0838a46c20f3ca | 614 | py | Python | dephell_pythons/_constants.py | jayvdb/dephell_pythons | 17afb28bd89652718410998dcd13bfdde9dfd865 | [
"MIT"
] | 2 | 2019-03-20T14:39:55.000Z | 2019-04-21T15:48:36.000Z | dephell_pythons/_constants.py | jayvdb/dephell_pythons | 17afb28bd89652718410998dcd13bfdde9dfd865 | [
"MIT"
] | 4 | 2019-07-24T13:51:09.000Z | 2020-05-28T14:28:14.000Z | dephell_pythons/_constants.py | jayvdb/dephell_pythons | 17afb28bd89652718410998dcd13bfdde9dfd865 | [
"MIT"
] | 3 | 2019-12-19T01:38:56.000Z | 2021-09-28T02:43:43.000Z | # built-in
import os
import platform
IS_WINDOWS = (os.name == 'nt' or platform.system() == 'Windows')
PYTHONS_DEPRECATED = ('2.6', '2.7', '3.0', '3.1', '3.2', '3.3', '3.4')
PYTHONS_POPULAR = ('3.5', '3.6', '3.7')
PYTHONS_UNRELEASED = ('3.8', '4.0')
PYTHONS = PYTHONS_POPULAR + tuple(reversed(PYTHONS_DEPRECATED)) + PYTHONS_UNRELEASED
PYTHON_IMPLEMENTATIONS = (
'python',
'ironpython',
'jython',
'pypy',
'anaconda',
'miniconda',
'stackless',
'activepython',
'micropython',
)
SUFFIX_PATTERNS = (
'?',
'?.?',
'?.?m',
'?-?.?',
'?-?.?.?',
'?.?-?.?.?',
)
| 18.606061 | 84 | 0.535831 |
ad7aa2d7a1f4d075711d96766d00f0705e0ab4d2 | 2,334 | py | Python | medet/users/migrations/0016_auto_20210803_1049.py | ayushri117/MEDET | 11941c89a44f9e49939710ad23ff85378b89c5a9 | [
"MIT"
] | null | null | null | medet/users/migrations/0016_auto_20210803_1049.py | ayushri117/MEDET | 11941c89a44f9e49939710ad23ff85378b89c5a9 | [
"MIT"
] | null | null | null | medet/users/migrations/0016_auto_20210803_1049.py | ayushri117/MEDET | 11941c89a44f9e49939710ad23ff85378b89c5a9 | [
"MIT"
] | 1 | 2021-08-07T15:57:40.000Z | 2021-08-07T15:57:40.000Z | # Generated by Django 3.0.6 on 2021-08-03 10:49
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('users', '0015_auto_20210803_0931'),
]
operations = [
migrations.RenameField(
model_name='medicine',
old_name='schedule',
new_name='sch',
),
migrations.RemoveField(
model_name='medicine',
name='disease',
),
migrations.AddField(
model_name='medicine',
name='disease_name',
field=models.CharField(max_length=200, null=True),
),
migrations.AlterField(
model_name='medicine',
name='image',
field=models.ImageField(blank=True, upload_to='medicine_pics'),
),
migrations.CreateModel(
name='Schedule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('schedule', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Medicine')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Doctor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('doctor_name', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='users.Medicine')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Disease',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('disease_name', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='users.Medicine')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 38.9 | 118 | 0.598543 |
13f9ab762a4b0510561c7b30058c5b7154542047 | 11,072 | py | Python | pyzoo/zoo/examples/orca/learn/tf/ncf/ncf.py | ankitdobhal/analytics-zoo | b8374bcd6c73bba49fe0b0ab075528cdd94cf2af | [
"Apache-2.0"
] | null | null | null | pyzoo/zoo/examples/orca/learn/tf/ncf/ncf.py | ankitdobhal/analytics-zoo | b8374bcd6c73bba49fe0b0ab075528cdd94cf2af | [
"Apache-2.0"
] | 1 | 2021-01-20T15:41:01.000Z | 2021-01-20T15:41:01.000Z | pyzoo/zoo/examples/orca/learn/tf/ncf/ncf.py | ankitdobhal/analytics-zoo | b8374bcd6c73bba49fe0b0ab075528cdd94cf2af | [
"Apache-2.0"
] | 1 | 2020-12-21T11:48:49.000Z | 2020-12-21T11:48:49.000Z | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import zipfile
import argparse
import numpy as np
import tensorflow as tf
from bigdl.dataset import base
from sklearn.model_selection import train_test_split
from zoo.orca import init_orca_context, stop_orca_context
from zoo.orca.learn.tf.estimator import Estimator
from zoo.orca.data import SharedValue
import zoo.orca.data.pandas
SOURCE_URL = 'http://files.grouplens.org/datasets/movielens/'
COLUMN_NAMES = ['user', 'item', 'label']
def re_index(s):
""" for reindexing the item set. """
i = 0
s_map = {}
for key in s:
s_map[key] = i
i += 1
return s_map
def set_index(data, user_map, item_map):
def set_user_item(df, item_map, user_map):
user_list = []
item_list = []
item_map = item_map.value
user_map = user_map.value
for i in range(len(df)):
user_list.append(user_map[df['user'][i]])
item_list.append(item_map[df['item'][i]])
df['user'] = user_list
df['item'] = item_list
return df
user_map_shared_value = SharedValue(user_map)
item_map_shared_value = SharedValue(item_map)
return data.transform_shard(set_user_item, item_map_shared_value, user_map_shared_value)
def load_data(data_dir):
WHOLE_DATA = 'ml-1m.zip'
local_file = base.maybe_download(WHOLE_DATA, data_dir, SOURCE_URL + WHOLE_DATA)
zip_ref = zipfile.ZipFile(local_file, 'r')
extracted_to = os.path.join(data_dir, "ml-1m")
if not os.path.exists(extracted_to):
print("Extracting %s to %s" % (local_file, data_dir))
zip_ref.extractall(data_dir)
zip_ref.close()
rating_files = os.path.join(extracted_to, "ratings.dat")
# replace :: to : for spark 2.4 support
new_rating_files = os.path.join(extracted_to, "ratings_new.dat")
if not os.path.exists(new_rating_files):
fin = open(rating_files, "rt")
# output file to write the result to
fout = open(new_rating_files, "wt")
# for each line in the input file
for line in fin:
# read replace the string and write to output file
fout.write(line.replace('::', ':'))
# close input and output files
fin.close()
fout.close()
# read movive len csv to XShards of Pandas Dataframe
full_data = zoo.orca.data.pandas.read_csv(new_rating_files, sep=':', header=None,
names=COLUMN_NAMES, usecols=[0, 1, 2],
dtype={0: np.int32, 1: np.int32, 2: np.int32})
user_set = set(full_data['user'].unique())
item_set = set(full_data['item'].unique())
min_user_id = min(user_set)
max_user_id = max(user_set)
min_item_id = min(item_set)
max_item_id = max(item_set)
print(min_user_id, max_user_id, min_item_id, max_item_id)
# update label starting from 0
def update_label(df):
df['label'] = df['label'] - 1
return df
full_data = full_data.transform_shard(update_label)
# split to train/test dataset
def split_train_test(data):
# splitting the full set into train and test sets.
train, test = train_test_split(data, test_size=0.2, random_state=100)
return train, test
train_data, test_data = full_data.transform_shard(split_train_test).split()
def to_train_val_shard(df):
result = {
"x": (df['user'].to_numpy(), df['item'].to_numpy()),
"y": df['label'].to_numpy()
}
return result
train_data = train_data.transform_shard(to_train_val_shard)
test_data = test_data.transform_shard(to_train_val_shard)
return train_data, test_data, max_user_id, max_item_id
class NCF(object):
def __init__(self, embed_size, user_size, item_size):
self.user = tf.placeholder(dtype=tf.int32, shape=(None,))
self.item = tf.placeholder(dtype=tf.int32, shape=(None,))
self.label = tf.placeholder(dtype=tf.int32, shape=(None,))
with tf.name_scope("GMF"):
user_embed_GMF = tf.contrib.layers.embed_sequence(self.user,
vocab_size=user_size + 1,
embed_dim=embed_size,
unique=False
)
item_embed_GMF = tf.contrib.layers.embed_sequence(self.item,
vocab_size=item_size + 1,
embed_dim=embed_size,
unique=False
)
GMF = tf.multiply(user_embed_GMF, item_embed_GMF, name='GMF')
# MLP part starts
with tf.name_scope("MLP"):
user_embed_MLP = tf.contrib.layers.embed_sequence(self.user,
vocab_size=user_size + 1,
embed_dim=embed_size,
unique=False,
)
item_embed_MLP = tf.contrib.layers.embed_sequence(self.item,
vocab_size=item_size + 1,
embed_dim=embed_size,
unique=False
)
interaction = tf.concat([user_embed_MLP, item_embed_MLP],
axis=-1, name='interaction')
layer1_MLP = tf.layers.dense(inputs=interaction,
units=embed_size * 2,
name='layer1_MLP')
layer1_MLP = tf.layers.dropout(layer1_MLP, rate=0.2)
layer2_MLP = tf.layers.dense(inputs=layer1_MLP,
units=embed_size,
name='layer2_MLP')
layer2_MLP = tf.layers.dropout(layer2_MLP, rate=0.2)
layer3_MLP = tf.layers.dense(inputs=layer2_MLP,
units=embed_size // 2,
name='layer3_MLP')
layer3_MLP = tf.layers.dropout(layer3_MLP, rate=0.2)
# Concate the two parts together
with tf.name_scope("concatenation"):
concatenation = tf.concat([GMF, layer3_MLP], axis=-1,
name='concatenation')
self.logits = tf.layers.dense(inputs=concatenation,
units=5,
name='predict')
self.logits_softmax = tf.nn.softmax(self.logits)
self.class_number = tf.argmax(self.logits_softmax, 1)
with tf.name_scope("loss"):
self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=self.label, logits=self.logits, name='loss'))
with tf.name_scope("optimzation"):
self.optim = tf.train.AdamOptimizer(1e-3, name='Adam')
self.optimizer = self.optim.minimize(self.loss)
def train(train_data, test_data, user_size, item_size):
model = NCF(opt.embedding_size, user_size, item_size)
estimator = Estimator.from_graph(
inputs=[model.user, model.item],
outputs=[model.class_number],
labels=[model.label],
loss=model.loss,
optimizer=model.optim,
model_dir=opt.model_dir,
metrics={"loss": model.loss})
estimator.fit(data=train_data,
batch_size=opt.batch_size,
epochs=opt.epochs,
validation_data=test_data
)
checkpoint_path = os.path.join(opt.model_dir, "NCF.ckpt")
estimator.save_tf_checkpoint(checkpoint_path)
estimator.sess.close()
def predict(predict_data, user_size, item_size):
def to_predict(data):
del data['y']
return data
predict_data = predict_data.transform_shard(to_predict)
tf.reset_default_graph()
with tf.Session() as sess:
model = NCF(opt.embedding_size, user_size, item_size)
saver = tf.train.Saver(tf.global_variables())
checkpoint_path = os.path.join(opt.model_dir, "NCF.ckpt")
saver.restore(sess, checkpoint_path)
estimator = Estimator.from_graph(
inputs=[model.user, model.item],
outputs=[model.class_number],
sess=sess,
model_dir=opt.model_dir
)
predict_result = estimator.predict(predict_data)
predictions = predict_result.collect()
assert 'prediction' in predictions[0]
print(predictions[0]['prediction'])
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='NCF example on movie len dataset.')
parser.add_argument('--cluster_mode', type=str, default="local",
help='The mode for the Spark cluster. local or yarn.')
parser.add_argument('--data_dir', type=str, default='/tmp',
help='the dir for downloaded data.')
parser.add_argument('--embedding_size', type=int, default=16,
help='the size for embedding user and item.')
parser.add_argument('--model_dir', type=str, default='./',
help='the dir for saving model.')
parser.add_argument('-b', '--batch_size', type=int, default=1280,
help='size of mini-batch')
parser.add_argument('-e', '--epochs', type=int, default=10,
help='The number of epochs to train the model.')
opt = parser.parse_args()
if opt.cluster_mode == "local":
init_orca_context(cluster_mode="local", cores=4)
elif opt.cluster_mode == "yarn":
init_orca_context(cluster_mode="yarn-client", num_nodes=2, cores=2, driver_memory="6g")
(train_data, test_data, max_user_id, max_item_id) = load_data(opt.data_dir)
train(train_data, test_data, max_user_id, max_item_id)
predict(test_data, max_user_id, max_item_id)
stop_orca_context()
| 39.262411 | 95 | 0.567287 |
3cbb6dbe7c91aa24c7a5d89d627ca321cf84d32a | 1,170 | py | Python | tests/test_authentication_base.py | artreven/fastapi-users | f4fa72328e476a4dbf74c4403572bc1247b1c319 | [
"MIT"
] | null | null | null | tests/test_authentication_base.py | artreven/fastapi-users | f4fa72328e476a4dbf74c4403572bc1247b1c319 | [
"MIT"
] | null | null | null | tests/test_authentication_base.py | artreven/fastapi-users | f4fa72328e476a4dbf74c4403572bc1247b1c319 | [
"MIT"
] | null | null | null | import pytest
from fastapi import Response
from fastapi_users.authentication import BaseAuthentication
@pytest.fixture
def base_authentication():
return BaseAuthentication()
@pytest.mark.authentication
class TestAuthenticate:
@pytest.mark.asyncio
async def test_not_implemented(self, base_authentication, user_manager):
with pytest.raises(NotImplementedError):
await base_authentication(None, user_manager)
@pytest.mark.authentication
@pytest.mark.asyncio
async def test_get_login_response(base_authentication, user, user_manager):
with pytest.raises(NotImplementedError):
await base_authentication.get_login_response(user, Response(), user_manager)
@pytest.mark.authentication
@pytest.mark.asyncio
async def test_get_logout_response(base_authentication, user, user_manager):
with pytest.raises(NotImplementedError):
await base_authentication.get_logout_response(user, Response(), user_manager)
@pytest.mark.authentication
@pytest.mark.asyncio
async def test_decode_jwt_response(base_authentication, user):
with pytest.raises(NotImplementedError):
await base_authentication.decode_jwt(user)
| 30 | 85 | 0.801709 |
fa3a852b41850f154380e24a79c5d071db18bc7e | 16 | py | Python | second.py | woorim960/coding-exam | 02eb16ad4d5d5a989dfa8e9a5c2e76b198788012 | [
"MIT"
] | null | null | null | second.py | woorim960/coding-exam | 02eb16ad4d5d5a989dfa8e9a5c2e76b198788012 | [
"MIT"
] | null | null | null | second.py | woorim960/coding-exam | 02eb16ad4d5d5a989dfa8e9a5c2e76b198788012 | [
"MIT"
] | null | null | null | print("작업한 코드")
| 8 | 15 | 0.625 |
9b516c0641d30ab0248d323e015a04b041b8da22 | 3,954 | py | Python | Questionnaire2/Questionnaire/Api/admin.py | riverstation/project-all | c56f1879e1303d561e95a3ff3a70f94fb5fa2191 | [
"Apache-2.0"
] | 2 | 2020-11-09T06:20:45.000Z | 2021-05-10T07:03:35.000Z | Questionnaire2/Questionnaire/Api/admin.py | riverstation/project-all | c56f1879e1303d561e95a3ff3a70f94fb5fa2191 | [
"Apache-2.0"
] | null | null | null | Questionnaire2/Questionnaire/Api/admin.py | riverstation/project-all | c56f1879e1303d561e95a3ff3a70f94fb5fa2191 | [
"Apache-2.0"
] | 1 | 2020-02-21T09:37:01.000Z | 2020-02-21T09:37:01.000Z | import math
from datetime import datetime
from Api.utils import *
from Api.resources import Rest
from Question.models import *
from Api.decorators import *
class AdminQuestionnaire(Rest):
@admin_required
def get(self,request,*args,**kwargs):
user = request.user
# customer = user.customer
data = request.GET
page = int(data.get('page', 1))
limit = int(data.get('limit', 10))
start_id = int(data.get('start_id', 1))
with_detail = data.get('with_detail', False)
questionnaire = Questionnaire.objects.filter(
id__gte=start_id, state=1)
count = questionnaire.count()
pages = math.ceil(count/limit) or 1
if page > pages:
page = pages
# 取出对应页面的数据
start = (page-1)*limit
end = page*limit
objs = questionnaire[start:end]
# 构建响应数据
result = dict()
result['objs'] = []
for obj in objs:
# 构建
questionnaire_dict = dict()
questionnaire_dict['id'] = obj.id
questionnaire_dict['title'] = obj.title
questionnaire_dict['create_date'] = datetime.strftime(
obj.create_date, '%Y-%m-%d')
questionnaire_dict['expire_date'] = datetime.strftime(
obj.expire_date, '%Y-%m-%d')
questionnaire_dict['quantity'] = obj.quantity
questionnaire_dict['left'] = obj.left
questionnaire_dict['state'] = obj.state
# 把客户信息取出来
questionnaire_dict['customer']={
"id":obj.customer.id,
"company_name":obj.customer.company_name
}
if with_detail == 'true':
# 构建问卷下问题列表
questionnaire_dict['questions'] = []
for question in obj.question_set.all():
question_dict = dict()
question_dict['id'] = question.id
question_dict['title'] = question.title
question_dict['is_checkbox'] = question.is_checkbox
# 构建问题下选项列表
question_dict['items'] = []
for item in question.item_set.all():
item_dict = dict()
item_dict['id'] = item.id
item_dict['content'] = item.content
question_dict['items'].append(item_dict)
questionnaire_dict['questions'].append(question_dict)
result['objs'].append(questionnaire_dict)
return json_response(result)
class QuestionnaireCommentResource(Rest):
@admin_required
def put(self,request,*args,**kwargs):
user=request.user
data=request.PUT
admin=user.admin
questionnaire_id=data.get('questionnaire_id',0)
is_agree=data.get('is_agree',False)
comment=data.get('comment','')
# 找出问卷
questionnaire_exist=Questionnaire.objects.filter(id=questionnaire_id,state=1)
if questionnaire_exist:
questionnaire=questionnaire_exist[0]
else:
return params_error({
"questionnaire_id":"问卷不存在,或者该问卷非待审核状态"
})
if is_agree:
# 如果同意,将问卷状态保存为审核通过
questionnaire.state=3
questionnaire.save()
else:
# 如果不同意,首先判断是否提交了批注信息,保存批注信息,将问卷状态修改为审核未通过
if comment:
comment_obj=QuestionnaireComment()
comment_obj.questionnaire=questionnaire
comment_obj.comment=comment
comment_obj.admin=admin
comment_obj.save()
else:
return params_error({
'comment':"必须提供批注信息"
})
questionnaire.state=2
questionnaire.save()
return json_response({
"msg":"提交成功"
})
| 33.794872 | 85 | 0.541224 |
17556efc18e7ff4143a7defe48d7a3cfe0caf292 | 2,691 | py | Python | openpyxl/pivot/tests/test_record.py | CargobaseDev/openpyxl | 782ba90d26be452379fc06f45227e12bca9bade4 | [
"MIT"
] | 1 | 2022-01-02T02:58:03.000Z | 2022-01-02T02:58:03.000Z | openpyxl/pivot/tests/test_record.py | CargobaseDev/openpyxl | 782ba90d26be452379fc06f45227e12bca9bade4 | [
"MIT"
] | 1 | 2022-01-01T15:08:26.000Z | 2022-01-01T15:08:36.000Z | openpyxl/pivot/tests/test_record.py | CargobaseDev/openpyxl | 782ba90d26be452379fc06f45227e12bca9bade4 | [
"MIT"
] | 1 | 2021-04-04T09:38:38.000Z | 2021-04-04T09:38:38.000Z | # Copyright (c) 2010-2021 openpyxl
import pytest
from io import BytesIO
from zipfile import ZipFile
from openpyxl.packaging.manifest import Manifest
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
from .test_fields import (
Index,
Number,
Text,
)
@pytest.fixture
def Record():
from ..record import Record
return Record
class TestRecord:
def test_ctor(self, Record, Number, Text, Index):
n = [Number(v=1), Number(v=25)]
s = [Text(v="2014-03-24")]
x = [Index(), Index(), Index()]
fields = n + s + x
field = Record(_fields=fields)
xml = tostring(field.to_tree())
expected = """
<r>
<n v="1"/>
<n v="25"/>
<s v="2014-03-24"/>
<x v="0"/>
<x v="0"/>
<x v="0"/>
</r>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, Record, Number, Text, Index):
src = """
<r>
<n v="1"/>
<x v="0"/>
<s v="2014-03-24"/>
<x v="0"/>
<n v="25"/>
<x v="0"/>
</r>
"""
node = fromstring(src)
n = [Number(v=1), Number(v=25)]
s = [Text(v="2014-03-24")]
x = [Index(), Index(), Index()]
fields = [
Number(v=1),
Index(),
Text(v="2014-03-24"),
Index(),
Number(v=25),
Index(),
]
field = Record.from_tree(node)
assert field == Record(_fields=fields)
@pytest.fixture
def RecordList():
from ..record import RecordList
return RecordList
class TestRecordList:
def test_ctor(self, RecordList):
cache = RecordList()
xml = tostring(cache.to_tree())
expected = """
<pivotCacheRecords xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main"
count="0" />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, RecordList):
src = """
<pivotCacheRecords count="0" />
"""
node = fromstring(src)
cache = RecordList.from_tree(node)
assert cache == RecordList()
def test_write(self, RecordList):
out = BytesIO()
archive = ZipFile(out, mode="w")
manifest = Manifest()
records = RecordList()
xml = tostring(records.to_tree())
records._write(archive, manifest)
manifest.append(records)
assert archive.namelist() == [records.path[1:]]
assert manifest.find(records.mime_type)
| 23.814159 | 92 | 0.528056 |
dbea75a4103c50d9deb6ceb61d81db4d1b4dbb20 | 202 | py | Python | components/mlserve/mlpm/app.py | xzyaoi/AID | 4eef0e261038ffa71552c5dcc55ee5d76b40d3d9 | [
"MIT"
] | null | null | null | components/mlserve/mlpm/app.py | xzyaoi/AID | 4eef0e261038ffa71552c5dcc55ee5d76b40d3d9 | [
"MIT"
] | null | null | null | components/mlserve/mlpm/app.py | xzyaoi/AID | 4eef0e261038ffa71552c5dcc55ee5d76b40d3d9 | [
"MIT"
] | null | null | null | # Copyright (c) 2020 Xiaozhe Yao & AICAMP.CO.,LTD
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
#coding:utf-8
from sanic import Sanic
aidserver = Sanic()
| 20.2 | 50 | 0.732673 |
d20d6db08c57c2d5409145c7a68f9fe3982dec6b | 6,597 | py | Python | tests/lookups/handlers/test_ssm.py | paul-duffy/runway | a0c22eb7ca7b55df5317bdda92c08c4bb39569d2 | [
"Apache-2.0"
] | 1 | 2020-02-25T21:08:00.000Z | 2020-02-25T21:08:00.000Z | tests/lookups/handlers/test_ssm.py | paul-duffy/runway | a0c22eb7ca7b55df5317bdda92c08c4bb39569d2 | [
"Apache-2.0"
] | 2 | 2020-01-07T15:00:55.000Z | 2020-01-07T15:03:25.000Z | tests/lookups/handlers/test_ssm.py | voodooGQ/runway | 8a744f33b39f1342022f1b57db996bb843e4556c | [
"Apache-2.0"
] | null | null | null | """Test runway.lookups.handlers.ssm."""
# pylint: disable=no-self-use,unused-import
import json
from datetime import datetime
import pytest
import yaml
from runway.cfngin.exceptions import FailedVariableLookup
from runway.variables import Variable
def get_parameter_response(name, value, value_type='String', label=None,
version=1):
"""Generate a mock ssm.get_parameter response."""
selector = '{}/{}'.format(name, label or version)
return {
'Parameter': {
'Name': name,
'Type': value_type,
'Value': value,
'Version': 1,
'Selector': selector,
'SourceResult': '',
'LastModifiedDate': datetime.now(),
'ARN': ''
}
}
def get_parameter_request(name, decrypt=True):
"""Generate the expected request paramters for ssm.get_parameter."""
return {
'Name': name,
'WithDecryption': decrypt
}
class TestSsmLookup(object):
"""Test runway.lookups.handlers.ssm.SsmLookup."""
def test_basic(self, cfngin_context, runway_context):
"""Test resolution of a basic lookup."""
name = '/test/param'
value = 'test value'
cfngin_stubber = cfngin_context.add_stubber('ssm')
runway_stubber = runway_context.add_stubber('ssm')
cfngin_var = Variable('test_var', '${ssm %s}' % name,
variable_type='cfngin')
runway_var = Variable('test_var', '${ssm %s}' % name,
variable_type='runway')
for stubber in [cfngin_stubber, runway_stubber]:
stubber.add_response('get_parameter',
get_parameter_response(name, value),
get_parameter_request(name))
with cfngin_stubber as cfn_stub, runway_stubber as rw_stub:
cfngin_var.resolve(context=cfngin_context)
assert cfngin_var.value == value
runway_var.resolve(context=runway_context)
assert runway_var.value == value
cfn_stub.assert_no_pending_responses()
rw_stub.assert_no_pending_responses()
def test_default(self, runway_context):
"""Test resolution of a default value."""
name = '/test/param'
value = 'test value'
stubber = runway_context.add_stubber('ssm')
var = Variable(
'test_var',
'${ssm /test/invalid::load=json, default=${ssm %s}}' % name,
variable_type='runway'
)
stubber.add_response('get_parameter',
get_parameter_response(name, value),
get_parameter_request(name))
stubber.add_client_error('get_parameter',
'ParameterNotFound',
expected_params=get_parameter_request(
'/test/invalid'
))
with stubber as stub:
var.resolve(context=runway_context)
assert var.value == value
stub.assert_no_pending_responses()
def test_different_region(self, runway_context):
"""Test Lookup in region other than that set in Context."""
name = '/test/param'
value = 'test value'
stubber = runway_context.add_stubber('ssm', region='us-west-2')
var = Variable('test_var', '${ssm %s::region=us-west-2}' % name,
variable_type='runway')
stubber.add_response('get_parameter',
get_parameter_response(name, value),
get_parameter_request(name))
with stubber as stub:
var.resolve(context=runway_context)
assert var.value == value
stub.assert_no_pending_responses()
def test_loaded_value(self, runway_context):
"""Test resolution of a JSON value."""
name = '/test/param'
raw_value = {
'nested': {
'bool': True,
'nest_key': 'nested_val'
},
'test_key': 'test_val'
}
stubber = runway_context.add_stubber('ssm')
parsers = ['json', 'yaml']
tests = [
{
'lookup': '${{ssm {name}::load={parser}}}',
'expected': raw_value
},
{
'lookup': '${{ssm {name}::load={parser},transform=str,indent=2}}',
'expected': json.dumps(json.dumps(raw_value, indent=2))
},
{
'lookup': '${{ssm {name}::load={parser},get=nested}}',
'expected': raw_value['nested']
},
{
'lookup': '${{ssm {name}::load={parser},get=nested.bool,transform=str}}',
'expected': json.dumps('True')
}
]
for parser in parsers:
for test in tests:
var = Variable('test_var.{}'.format(parser),
test['lookup'].format(name=name, parser=parser),
variable_type='runway')
if parser == 'json':
dumped_value = json.dumps(raw_value)
elif parser == 'yaml':
dumped_value = yaml.dump(raw_value)
else:
raise ValueError
stubber.add_response('get_parameter',
get_parameter_response(name,
dumped_value),
get_parameter_request(name))
with stubber as stub:
var.resolve(context=runway_context)
assert var.value == test['expected']
stub.assert_no_pending_responses()
def test_not_found(self, runway_context):
"""Test raises ParameterNotFound."""
name = '/test/param'
stubber = runway_context.add_stubber('ssm')
var = Variable('test_var', '${ssm %s}' % name, variable_type='runway')
stubber.add_client_error('get_parameter',
'ParameterNotFound',
expected_params=get_parameter_request(
name
))
with stubber as stub, pytest.raises(FailedVariableLookup) as err:
var.resolve(context=runway_context)
assert 'ParameterNotFound' in str(err.value)
stub.assert_no_pending_responses()
| 36.65 | 89 | 0.524329 |
4c0618166fff07b4c6d0bed125977a8b6588ef00 | 9,320 | py | Python | tests/aioneo4j/test_driver.py | bbc/rd-cloudfit-python-aiocypher | eb6ce85ee1045ed715bbc4f2b5e033688f7fb5f2 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2021-11-09T20:48:18.000Z | 2021-11-12T07:45:39.000Z | tests/aioneo4j/test_driver.py | bbc/rd-cloudfit-python-aiocypher | eb6ce85ee1045ed715bbc4f2b5e033688f7fb5f2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/aioneo4j/test_driver.py | bbc/rd-cloudfit-python-aiocypher | eb6ce85ee1045ed715bbc4f2b5e033688f7fb5f2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | #
#
# Copyright 2020-21 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from .. import HAS_NEO4J
from .fixtures import clean_session
@unittest.skipUnless(HAS_NEO4J, "Don't test aioneo4j unless neo4j is installed")
class TestDriver(unittest.IsolatedAsyncioTestCase):
@clean_session
async def test_basic_query(self, session):
async with session.begin_transaction() as tx:
await tx.run("CREATE(n:TestNode{name:'example'})")
async with session.begin_transaction() as tx:
result = await tx.run("MATCH(n:TestNode) RETURN n.name")
record = result.single()
self.assertEqual("example", record.value())
@clean_session
async def test_single(self, session):
async with session.begin_transaction() as tx:
await tx.run("CREATE(n:TestNode{name:'example'})")
async with session.begin_transaction() as tx:
record = await tx.run("MATCH(n:TestNode) RETURN n.name").single()
self.assertEqual("example", record.value())
@clean_session
async def test_single_value(self, session):
async with session.begin_transaction() as tx:
await tx.run("CREATE(n:TestNode{name:'example'})")
async with session.begin_transaction() as tx:
name = await tx.run("MATCH(n:TestNode) RETURN n.name").single().value()
self.assertEqual("example", name)
@clean_session
async def test_single_data(self, session):
async with session.begin_transaction() as tx:
await tx.run("CREATE(n:TestNode{name:'example', potato:'desirée'})")
async with session.begin_transaction() as tx:
n = await tx.run("MATCH(n:TestNode) RETURN n").single().data()
self.assertEqual(n, {
'n': {
'name': 'example',
'potato': 'desirée'
}
})
@clean_session
async def test_result_data__one_record(self, session):
async with session.begin_transaction() as tx:
await tx.run("CREATE(n:TestNode{name:'example', potato:'desirée'})")
async with session.begin_transaction() as tx:
n = await tx.run("MATCH(n:TestNode) RETURN n").data()
self.assertEqual(n, [{
'n': {
'name': 'example',
'potato': 'desirée'
}
}])
@clean_session
async def test_result_data__two_records(self, session):
async with session.begin_transaction() as tx:
await tx.run("CREATE(n:TestNode{name:'example', potato:'desirée'})")
await tx.run("CREATE(n:TestNode{name:'模範', potato:'じゃが芋'})")
async with session.begin_transaction() as tx:
n = await tx.run("MATCH(n:TestNode) RETURN n").data()
self.assertCountEqual(n, [
{
'n': {
'name': 'example',
'potato': 'desirée'
}
},
{
'n': {
'name': '模範',
'potato': 'じゃが芋'
}
}
])
@clean_session
async def test_result_data__relationship(self, session):
async with session.begin_transaction() as tx:
await tx.run("CREATE(n:TestNode{name:'example', potato:'desirée'})")
await tx.run("CREATE(n:TestNode{name:'模範', potato:'じゃが芋'})")
await tx.run("""\
MATCH(a:TestNode{name:'example'})
MATCH(b:TestNode{name:'模範'})
CREATE (a)-[:Translation {lang:'ja'}]->(b)""")
async with session.begin_transaction() as tx:
data = await tx.run("MATCH (a:TestNode)-[r:Translation]-(b:TestNode) RETURN a,r,b").data()
# It's a weird thing that this doesn't return you the properties of the relationship, but
# that's down to the underlying synchronous library's behaviour
self.assertCountEqual(data, [
{
'a': {'name': 'example', 'potato': 'desirée'},
'r': ({'name': 'example', 'potato': 'desirée'}, 'Translation', {'name': '模範', 'potato': 'じゃが芋'}),
'b': {'name': '模範', 'potato': 'じゃが芋'}
},
{
'b': {'name': 'example', 'potato': 'desirée'},
'r': ({'name': 'example', 'potato': 'desirée'}, 'Translation', {'name': '模範', 'potato': 'じゃが芋'}),
'a': {'name': '模範', 'potato': 'じゃが芋'}
}
])
@clean_session
async def test_result_data__two_values(self, session):
async with session.begin_transaction() as tx:
await tx.run("CREATE(n:TestNode{name:'example', potato:'desirée'})")
await tx.run("CREATE(n:TestNode{name:'模範', potato:'じゃが芋'})")
async with session.begin_transaction() as tx:
n = await tx.run("MATCH(n:TestNode) RETURN n.name AS name, n.potato AS potato").data()
self.assertCountEqual(n, [
{
'name': 'example',
'potato': 'desirée'
},
{
'name': '模範',
'potato': 'じゃが芋'
}
])
@clean_session
async def test_graph_nodes(self, session):
async with session.begin_transaction() as tx:
await tx.run("CREATE(n:TestNode{name:'example', potato:'desirée'})")
await tx.run("CREATE(n:TestNode{name:'模範', potato:'じゃが芋'})")
await tx.run("""\
MATCH(a:TestNode{name:'example'})
MATCH(b:TestNode{name:'模範'})
CREATE (a)-[:Translation {lang:'ja'}]->(b)""")
async with session.begin_transaction() as tx:
nodes = await tx.run("MATCH (a:TestNode)-[r:Translation]-(b:TestNode) RETURN a,r,b").graph().nodes
self.assertEqual(len(nodes), 2)
self.assertCountEqual(
[n.labels for n in nodes],
[{'TestNode'}, {'TestNode'}]
)
self.assertCountEqual(
[n['name'] for n in nodes],
['example', '模範']
)
self.assertCountEqual(
[n['potato'] for n in nodes],
['desirée', 'じゃが芋']
)
@clean_session
async def test_graph_relationships(self, session):
async with session.begin_transaction() as tx:
await tx.run("CREATE(n:TestNode{name:'example', potato:'desirée'})")
await tx.run("CREATE(n:TestNode{name:'模範', potato:'じゃが芋'})")
await tx.run("""\
MATCH(a:TestNode{name:'example'})
MATCH(b:TestNode{name:'模範'})
CREATE (a)-[:Translation {lang:'ja'}]->(b)""")
async with session.begin_transaction() as tx:
rels = await tx.run("MATCH (a:TestNode)-[r:Translation]-(b:TestNode) RETURN a,r,b").graph().relationships
self.assertEqual(len(rels), 1)
rel = next(iter(rels))
self.assertEqual(rel.type, 'Translation')
self.assertEqual(rel['lang'], 'ja')
self.assertEqual(rel.start_node.labels, {'TestNode'})
self.assertEqual(rel.start_node['name'], 'example')
self.assertEqual(rel.start_node['potato'], 'desirée')
self.assertEqual(rel.end_node.labels, {'TestNode'})
self.assertEqual(rel.end_node['name'], '模範')
self.assertEqual(rel.end_node['potato'], 'じゃが芋')
@clean_session
async def test_graph_nodes_and_relationships(self, session):
async with session.begin_transaction() as tx:
await tx.run("CREATE(n:TestNode{name:'example', potato:'desirée'})")
await tx.run("CREATE(n:TestNode{name:'模範', potato:'じゃが芋'})")
await tx.run("""\
MATCH(a:TestNode{name:'example'})
MATCH(b:TestNode{name:'模範'})
CREATE (a)-[:Translation {lang:'ja'}]->(b)""")
async with session.begin_transaction() as tx:
graph = tx.run("MATCH (a:TestNode)-[r:Translation]-(b:TestNode) RETURN a,r,b").graph()
nodes = await graph.nodes
rels = await graph.relationships
self.assertEqual(len(nodes), 2)
self.assertCountEqual(
[n.labels for n in nodes],
[{'TestNode'}, {'TestNode'}]
)
self.assertCountEqual(
[n['name'] for n in nodes],
['example', '模範']
)
self.assertCountEqual(
[n['potato'] for n in nodes],
['desirée', 'じゃが芋']
)
self.assertEqual(len(rels), 1)
rel = next(iter(rels))
self.assertEqual(rel.type, 'Translation')
self.assertEqual(rel['lang'], 'ja')
self.assertEqual(rel.start_node.labels, {'TestNode'})
self.assertEqual(rel.start_node['name'], 'example')
self.assertEqual(rel.start_node['potato'], 'desirée')
self.assertEqual(rel.end_node.labels, {'TestNode'})
self.assertEqual(rel.end_node['name'], '模範')
self.assertEqual(rel.end_node['potato'], 'じゃが芋')
| 37.429719 | 117 | 0.578004 |
5ac3ef800afcdf2182194670858b4bd20e86238e | 33,230 | py | Python | sendgrid/helpers/mail/mail.py | itismichael/sendgrid-python | 09f787662e247f102494ce43091503daecc4f86a | [
"MIT"
] | null | null | null | sendgrid/helpers/mail/mail.py | itismichael/sendgrid-python | 09f787662e247f102494ce43091503daecc4f86a | [
"MIT"
] | 5 | 2020-06-19T05:13:55.000Z | 2022-03-12T00:36:17.000Z | virtual/lib/python3.6/site-packages/sendgrid/helpers/mail/mail.py | rosekairu/KairuBlog | 3a5f737f64b8146ff64aad6e516a1147aecb0363 | [
"MIT"
] | 2 | 2020-06-18T15:41:36.000Z | 2020-06-18T18:29:09.000Z | """Twilio SendGrid v3/mail/send response body builder"""
from .bcc_email import Bcc
from .cc_email import Cc
from .content import Content
from .custom_arg import CustomArg
from .dynamic_template_data import DynamicTemplateData
from .email import Email
from .from_email import From
from .header import Header
from .mime_type import MimeType
from .personalization import Personalization
from .reply_to import ReplyTo
from .send_at import SendAt
from .subject import Subject
from .substitution import Substitution
from .template_id import TemplateId
from .to_email import To
class Mail(object):
"""Creates the response body for v3/mail/send"""
def __init__(
self,
from_email=None,
to_emails=None,
subject=None,
plain_text_content=None,
html_content=None,
global_substitutions=None,
is_multiple=False):
"""
Creates the response body for a v3/mail/send API call
:param from_email: The email address of the sender
:type from_email: From, tuple, optional
:param subject: The subject of the email
:type subject: Subject, optional
:param to_emails: The email address of the recipient
:type to_emails: To, tuple, optional
:param plain_text_content: The plain text body of the email
:type plain_text_content: string, optional
:param html_content: The html body of the email
:type html_content: string, optional
"""
self._attachments = None
self._categories = None
self._contents = None
self._custom_args = None
self._headers = None
self._personalizations = []
self._sections = None
self._asm = None
self._batch_id = None
self._from_email = None
self._ip_pool_name = None
self._mail_settings = None
self._reply_to = None
self._send_at = None
self._subject = None
self._template_id = None
self._tracking_settings = None
# Minimum required data to send a single email
if from_email is not None:
self.from_email = from_email
if to_emails is not None:
self.add_to(to_emails, global_substitutions, is_multiple)
if subject is not None:
self.subject = subject
if plain_text_content is not None:
self.add_content(plain_text_content, MimeType.text)
if html_content is not None:
self.add_content(html_content, MimeType.html)
def __str__(self):
"""A JSON-ready string representation of this Mail object.
:returns: A JSON-ready string representation of this Mail object.
:rtype: string
"""
return str(self.get())
def _ensure_append(self, new_items, append_to, index=0):
"""Ensure an item is appended to a list or create a new empty list
:param new_items: the item(s) to append
:type new_items: list(obj)
:param append_to: the list on which to append the items
:type append_to: list()
:param index: index of the list on which to append the items
:type index: int
"""
append_to = append_to or []
append_to.insert(index, new_items)
return append_to
def _ensure_insert(self, new_items, insert_to):
"""Ensure an item is inserted to a list or create a new empty list
:param new_items: the item(s) to insert
:type new_items: list(obj)
:param insert_to: the list on which to insert the items at index 0
:type insert_to: list()
"""
insert_to = insert_to or []
insert_to.insert(0, new_items)
return insert_to
def _flatten_dicts(self, dicts):
"""Flatten a dict
:param dicts: Flatten a dict
:type dicts: list(dict)
"""
d = dict()
list_of_dicts = [d.get() for d in dicts or []]
return {k: v for d in list_of_dicts for k, v in d.items()}
def _get_or_none(self, from_obj):
"""Get the JSON representation of the object, else return None
:param from_obj: Get the JSON representation of the object,
else return None
:type from_obj: obj
"""
return from_obj.get() if from_obj is not None else None
def _set_emails(
self, emails, global_substitutions=None, is_multiple=False, p=0):
"""Adds emails to the Personalization object
:param emails: An Email or list of Email objects
:type emails: Email, list(Email)
:param global_substitutions: A dict of substitutions for all recipients
:type global_substitutions: dict
:param is_multiple: Create a new personalization for each recipient
:type is_multiple: bool
:param p: p is the Personalization object or Personalization object
index
:type p: Personalization, integer, optional
"""
# Send multiple emails to multiple recipients
if is_multiple is True:
if isinstance(emails, list):
for email in emails:
personalization = Personalization()
personalization.add_email(email)
self.add_personalization(personalization)
else:
personalization = Personalization()
personalization.add_email(emails)
self.add_personalization(personalization)
if global_substitutions is not None:
if isinstance(global_substitutions, list):
for substitution in global_substitutions:
for p in self.personalizations:
p.add_substitution(substitution)
else:
for p in self.personalizations:
p.add_substitution(global_substitutions)
else:
try:
personalization = self._personalizations[p]
has_internal_personalization = True
except IndexError:
personalization = Personalization()
has_internal_personalization = False
if isinstance(emails, list):
for email in emails:
personalization.add_email(email)
else:
personalization.add_email(emails)
if global_substitutions is not None:
if isinstance(global_substitutions, list):
for substitution in global_substitutions:
personalization.add_substitution(substitution)
else:
personalization.add_substitution(global_substitutions)
if not has_internal_personalization:
self.add_personalization(personalization, index=p)
@property
def personalizations(self):
"""A list of one or more Personaliztion objects
:rtype: list(Personalization)
"""
return self._personalizations
def add_personalization(self, personalization, index=0):
"""Add a Personaliztion object
:param personalizations: Add a Personalization object
:type personalizations: Personalization
:param index: The index where to add the Personalization
:type index: int
"""
self._personalizations = self._ensure_append(
personalization, self._personalizations, index)
@property
def to(self):
pass
@to.setter
def to(self, to_emails, global_substitutions=None, is_multiple=False, p=0):
"""Adds To objects to the Personalization object
:param to_emails: An To or list of To objects
:type to_emails: To, list(To), str, tuple
:param global_substitutions: A dict of substitutions for all recipients
:type global_substitutions: dict
:param is_multiple: Create a new personalization for each recipient
:type is_multiple: bool
:param p: p is the Personalization object or Personalization object
index
:type p: Personalization, integer, optional
"""
if isinstance(to_emails, list):
for email in to_emails:
if isinstance(email, str):
email = To(email, None)
if isinstance(email, tuple):
email = To(email[0], email[1])
self.add_to(email, global_substitutions, is_multiple, p)
else:
if isinstance(to_emails, str):
to_emails = To(to_emails, None)
if isinstance(to_emails, tuple):
to_emails = To(to_emails[0], to_emails[1])
self.add_to(to_emails, global_substitutions, is_multiple, p)
def add_to(
self, to_email, global_substitutions=None, is_multiple=False, p=0):
"""Adds a To object to the Personalization object
:param to_email: A To object
:type to_email: To, str, tuple
:param global_substitutions: A dict of substitutions for all recipients
:type global_substitutions: dict
:param is_multiple: Create a new personalization for each recipient
:type is_multiple: bool
:param p: p is the Personalization object or Personalization object
index
:type p: Personalization, integer, optional
"""
if isinstance(to_email, list):
for email in to_email:
if isinstance(email, str):
email = To(email, None)
if isinstance(email, tuple):
email = To(email[0], email[1])
self._set_emails(email, global_substitutions, is_multiple, p)
else:
if isinstance(to_email, str):
to_email = To(to_email, None)
if isinstance(to_email, tuple):
to_email = To(to_email[0], to_email[1])
if isinstance(to_email, Email):
p = to_email.personalization
self._set_emails(to_email, global_substitutions, is_multiple, p)
@property
def cc(self):
pass
@cc.setter
def cc(self, cc_emails, global_substitutions=None, is_multiple=False, p=0):
"""Adds Cc objects to the Personalization object
:param cc_emails: An Cc or list of Cc objects
:type cc_emails: Cc, list(Cc), tuple
:param global_substitutions: A dict of substitutions for all recipients
:type global_substitutions: dict
:param is_multiple: Create a new personalization for each recipient
:type is_multiple: bool
:param p: p is the Personalization object or Personalization object
index
:type p: Personalization, integer, optional
"""
if isinstance(cc_emails, list):
for email in cc_emails:
if isinstance(email, str):
email = Cc(email, None)
if isinstance(email, tuple):
email = Cc(email[0], email[1])
self.add_cc(email, global_substitutions, is_multiple, p)
else:
if isinstance(cc_emails, str):
cc_emails = Cc(cc_emails, None)
if isinstance(cc_emails, tuple):
cc_emails = To(cc_emails[0], cc_emails[1])
self.add_cc(cc_emails, global_substitutions, is_multiple, p)
def add_cc(
self, cc_email, global_substitutions=None, is_multiple=False, p=0):
"""Adds a Cc object to the Personalization object
:param to_emails: An Cc object
:type to_emails: Cc
:param global_substitutions: A dict of substitutions for all recipients
:type global_substitutions: dict
:param is_multiple: Create a new personalization for each recipient
:type is_multiple: bool
:param p: p is the Personalization object or Personalization object
index
:type p: Personalization, integer, optional
"""
if isinstance(cc_email, str):
cc_email = Cc(cc_email, None)
if isinstance(cc_email, tuple):
cc_email = Cc(cc_email[0], cc_email[1])
if isinstance(cc_email, Email):
p = cc_email.personalization
self._set_emails(
cc_email, global_substitutions, is_multiple=is_multiple, p=p)
@property
def bcc(self):
pass
@bcc.setter
def bcc(
self,
bcc_emails,
global_substitutions=None,
is_multiple=False,
p=0):
"""Adds Bcc objects to the Personalization object
:param bcc_emails: An Bcc or list of Bcc objects
:type bcc_emails: Bcc, list(Bcc), tuple
:param global_substitutions: A dict of substitutions for all recipients
:type global_substitutions: dict
:param is_multiple: Create a new personalization for each recipient
:type is_multiple: bool
:param p: p is the Personalization object or Personalization object
index
:type p: Personalization, integer, optional
"""
if isinstance(bcc_emails, list):
for email in bcc_emails:
if isinstance(email, str):
email = Bcc(email, None)
if isinstance(email, tuple):
email = Bcc(email[0], email[1])
self.add_bcc(email, global_substitutions, is_multiple, p)
else:
if isinstance(bcc_emails, str):
bcc_emails = Bcc(bcc_emails, None)
if isinstance(bcc_emails, tuple):
bcc_emails = Bcc(bcc_emails[0], bcc_emails[1])
self.add_bcc(bcc_emails, global_substitutions, is_multiple, p)
def add_bcc(
self,
bcc_email,
global_substitutions=None,
is_multiple=False,
p=0):
"""Adds a Bcc object to the Personalization object
:param to_emails: An Bcc object
:type to_emails: Bcc
:param global_substitutions: A dict of substitutions for all recipients
:type global_substitutions: dict
:param is_multiple: Create a new personalization for each recipient
:type is_multiple: bool
:param p: p is the Personalization object or Personalization object
index
:type p: Personalization, integer, optional
"""
if isinstance(bcc_email, str):
bcc_email = Bcc(bcc_email, None)
if isinstance(bcc_email, tuple):
bcc_email = Bcc(bcc_email[0], bcc_email[1])
if isinstance(bcc_email, Email):
p = bcc_email.personalization
self._set_emails(
bcc_email,
global_substitutions,
is_multiple=is_multiple,
p=p)
@property
def subject(self):
"""The global Subject object
:rtype: Subject
"""
return self._subject
@subject.setter
def subject(self, value):
"""The subject of the email(s)
:param value: The subject of the email(s)
:type value: Subject, string
"""
if isinstance(value, Subject):
if value.personalization is not None:
try:
personalization = \
self._personalizations[value.personalization]
has_internal_personalization = True
except IndexError:
personalization = Personalization()
has_internal_personalization = False
personalization.subject = value.subject
if not has_internal_personalization:
self.add_personalization(
personalization,
index=value.personalization)
else:
self._subject = value
else:
self._subject = Subject(value)
@property
def headers(self):
"""A list of global Header objects
:rtype: list(Header)
"""
return self._headers
@property
def header(self):
pass
@header.setter
def header(self, headers):
"""Add headers to the email
:param value: A list of Header objects or a dict of header key/values
:type value: Header, list(Header), dict
"""
if isinstance(headers, list):
for h in headers:
self.add_header(h)
else:
self.add_header(headers)
def add_header(self, header):
"""Add headers to the email globaly or to a specific Personalization
:param value: A Header object or a dict of header key/values
:type value: Header, dict
"""
if header.personalization is not None:
try:
personalization = \
self._personalizations[header.personalization]
has_internal_personalization = True
except IndexError:
personalization = Personalization()
has_internal_personalization = False
if isinstance(header, dict):
(k, v) = list(header.items())[0]
personalization.add_header(Header(k, v))
else:
personalization.add_header(header)
if not has_internal_personalization:
self.add_personalization(
personalization,
index=header.personalization)
else:
if isinstance(header, dict):
(k, v) = list(header.items())[0]
self._headers = self._ensure_append(
Header(k, v), self._headers)
else:
self._headers = self._ensure_append(header, self._headers)
@property
def substitution(self):
pass
@substitution.setter
def substitution(self, substitution):
"""Add substitutions to the email
:param value: Add substitutions to the email
:type value: Substitution, list(Substitution)
"""
if isinstance(substitution, list):
for s in substitution:
self.add_substitution(s)
else:
self.add_substitution(substitution)
def add_substitution(self, substitution):
"""Add a substitution to the email
:param value: Add a substitution to the email
:type value: Substitution
"""
if substitution.personalization:
try:
personalization = \
self._personalizations[substitution.personalization]
has_internal_personalization = True
except IndexError:
personalization = Personalization()
has_internal_personalization = False
personalization.add_substitution(substitution)
if not has_internal_personalization:
self.add_personalization(
personalization, index=substitution.personalization)
else:
if isinstance(substitution, list):
for s in substitution:
for p in self.personalizations:
p.add_substitution(s)
else:
for p in self.personalizations:
p.add_substitution(substitution)
@property
def custom_args(self):
"""A list of global CustomArg objects
:rtype: list(CustomArg)
"""
return self._custom_args
@property
def custom_arg(self):
return self._custom_args
@custom_arg.setter
def custom_arg(self, custom_arg):
"""Add custom args to the email
:param value: A list of CustomArg objects or a dict of custom arg
key/values
:type value: CustomArg, list(CustomArg), dict
"""
if isinstance(custom_arg, list):
for c in custom_arg:
self.add_custom_arg(c)
else:
self.add_custom_arg(custom_arg)
def add_custom_arg(self, custom_arg):
"""Add custom args to the email globaly or to a specific Personalization
:param value: A CustomArg object or a dict of custom arg key/values
:type value: CustomArg, dict
"""
if custom_arg.personalization is not None:
try:
personalization = \
self._personalizations[custom_arg.personalization]
has_internal_personalization = True
except IndexError:
personalization = Personalization()
has_internal_personalization = False
if isinstance(custom_arg, dict):
(k, v) = list(custom_arg.items())[0]
personalization.add_custom_arg(CustomArg(k, v))
else:
personalization.add_custom_arg(custom_arg)
if not has_internal_personalization:
self.add_personalization(
personalization, index=custom_arg.personalization)
else:
if isinstance(custom_arg, dict):
(k, v) = list(custom_arg.items())[0]
self._custom_args = self._ensure_append(
CustomArg(k, v), self._custom_args)
else:
self._custom_args = self._ensure_append(
custom_arg, self._custom_args)
@property
def send_at(self):
"""The global SendAt object
:rtype: SendAt
"""
return self._send_at
@send_at.setter
def send_at(self, value):
"""A unix timestamp specifying when your email should
be delivered.
:param value: A unix timestamp specifying when your email should
be delivered.
:type value: SendAt, int
"""
if isinstance(value, SendAt):
if value.personalization is not None:
try:
personalization = \
self._personalizations[value.personalization]
has_internal_personalization = True
except IndexError:
personalization = Personalization()
has_internal_personalization = False
personalization.send_at = value.send_at
if not has_internal_personalization:
self.add_personalization(
personalization, index=value.personalization)
else:
self._send_at = value
else:
self._send_at = SendAt(value)
@property
def dynamic_template_data(self):
pass
@dynamic_template_data.setter
def dynamic_template_data(self, value):
"""Data for a transactional template
:param value: Data for a transactional template
:type value: DynamicTemplateData, a JSON-serializeable structure
"""
if not isinstance(value, DynamicTemplateData):
value = DynamicTemplateData(value)
try:
personalization = self._personalizations[value.personalization]
has_internal_personalization = True
except IndexError:
personalization = Personalization()
has_internal_personalization = False
personalization.dynamic_template_data = value.dynamic_template_data
if not has_internal_personalization:
self.add_personalization(
personalization, index=value.personalization)
@property
def from_email(self):
"""The email address of the sender
:rtype: From
"""
return self._from_email
@from_email.setter
def from_email(self, value):
"""The email address of the sender
:param value: The email address of the sender
:type value: From, str, tuple
"""
if isinstance(value, str):
value = From(value, None)
if isinstance(value, tuple):
value = From(value[0], value[1])
self._from_email = value
@property
def reply_to(self):
"""The reply to email address
:rtype: ReplyTo
"""
return self._reply_to
@reply_to.setter
def reply_to(self, value):
"""The reply to email address
:param value: The reply to email address
:type value: ReplyTo, str, tuple
"""
if isinstance(value, str):
value = ReplyTo(value, None)
if isinstance(value, tuple):
value = ReplyTo(value[0], value[1])
self._reply_to = value
@property
def contents(self):
"""The contents of the email
:rtype: list(Content)
"""
return self._contents
@property
def content(self):
pass
@content.setter
def content(self, contents):
"""The content(s) of the email
:param contents: The content(s) of the email
:type contents: Content, list(Content)
"""
if isinstance(contents, list):
for c in contents:
self.add_content(c)
else:
self.add_content(contents)
def add_content(self, content, mime_type=None):
"""Add content to the email
:param contents: Content to be added to the email
:type contents: Content
:param mime_type: Override the mime type
:type mime_type: MimeType, str
"""
if isinstance(content, str):
content = Content(mime_type, content)
# Content of mime type text/plain must always come first
if content.mime_type == "text/plain":
self._contents = self._ensure_insert(content, self._contents)
else:
if self._contents:
index = len(self._contents)
else:
index = 0
self._contents = self._ensure_append(
content, self._contents, index=index)
@property
def attachments(self):
"""The attachments to this email
:rtype: list(Attachment)
"""
return self._attachments
@property
def attachment(self):
pass
@attachment.setter
def attachment(self, attachment):
"""Add attachment(s) to this email
:param attachment: Add attachment(s) to this email
:type attachment: Attachment, list(Attachment)
"""
if isinstance(attachment, list):
for a in attachment:
self.add_attachment(a)
else:
self.add_attachment(attachment)
def add_attachment(self, attachment):
"""Add an attachment to this email
:param attachment: Add an attachment to this email
:type attachment: Attachment
"""
self._attachments = self._ensure_append(attachment, self._attachments)
@property
def template_id(self):
"""The transactional template id for this email
:rtype: TemplateId
"""
return self._template_id
@template_id.setter
def template_id(self, value):
"""The transactional template id for this email
:param value: The transactional template id for this email
:type value: TemplateId
"""
if isinstance(value, TemplateId):
self._template_id = value
else:
self._template_id = TemplateId(value)
@property
def sections(self):
"""The block sections of code to be used as substitutions
:rtype: Section
"""
return self._sections
@property
def section(self):
pass
@section.setter
def section(self, section):
"""The block sections of code to be used as substitutions
:rtype: Section, list(Section)
"""
if isinstance(section, list):
for h in section:
self.add_section(h)
else:
self.add_section(section)
def add_section(self, section):
"""A block section of code to be used as substitutions
:param section: A block section of code to be used as substitutions
:type section: Section
"""
self._sections = self._ensure_append(section, self._sections)
@property
def categories(self):
"""The categories assigned to this message
:rtype: list(Category)
"""
return self._categories
@property
def category(self):
pass
@category.setter
def category(self, categories):
"""Add categories assigned to this message
:rtype: list(Category)
"""
if isinstance(categories, list):
for c in categories:
self.add_category(c)
else:
self.add_category(categories)
def add_category(self, category):
"""Add a category assigned to this message
:rtype: Category
"""
self._categories = self._ensure_append(category, self._categories)
@property
def batch_id(self):
"""The batch id for this email
:rtype: BatchId
"""
return self._batch_id
@batch_id.setter
def batch_id(self, value):
"""The batch id for this email
:param value: The batch id for this email
:type value: BatchId
"""
self._batch_id = value
@property
def asm(self):
"""An object specifying unsubscribe behavior.
:rtype: Asm
"""
return self._asm
@asm.setter
def asm(self, value):
"""An object specifying unsubscribe behavior.
:param value: An object specifying unsubscribe behavior.
:type value: Asm
"""
self._asm = value
@property
def ip_pool_name(self):
"""The IP Pool that you would like to send this email from
:rtype: IpPoolName
"""
return self._ip_pool_name
@ip_pool_name.setter
def ip_pool_name(self, value):
"""The IP Pool that you would like to send this email from
:paran value: The IP Pool that you would like to send this email from
:type value: IpPoolName
"""
self._ip_pool_name = value
@property
def mail_settings(self):
"""The mail settings for this email
:rtype: MailSettings
"""
return self._mail_settings
@mail_settings.setter
def mail_settings(self, value):
"""The mail settings for this email
:param value: The mail settings for this email
:type value: MailSettings
"""
self._mail_settings = value
@property
def tracking_settings(self):
"""The tracking settings for this email
:rtype: TrackingSettings
"""
return self._tracking_settings
@tracking_settings.setter
def tracking_settings(self, value):
"""The tracking settings for this email
:param value: The tracking settings for this email
:type value: TrackingSettings
"""
self._tracking_settings = value
def get(self):
"""
Get a JSON-ready representation of this Mail object.
:returns: This Mail object, ready for use in a request body.
:rtype: dict
"""
mail = {
'from': self._get_or_none(self.from_email),
'subject': self._get_or_none(self.subject),
'personalizations': [p.get() for p in self.personalizations or []],
'content': [c.get() for c in self.contents or []],
'attachments': [a.get() for a in self.attachments or []],
'template_id': self._get_or_none(self.template_id),
'sections': self._flatten_dicts(self.sections),
'headers': self._flatten_dicts(self.headers),
'categories': [c.get() for c in self.categories or []],
'custom_args': self._flatten_dicts(self.custom_args),
'send_at': self._get_or_none(self.send_at),
'batch_id': self._get_or_none(self.batch_id),
'asm': self._get_or_none(self.asm),
'ip_pool_name': self._get_or_none(self.ip_pool_name),
'mail_settings': self._get_or_none(self.mail_settings),
'tracking_settings': self._get_or_none(self.tracking_settings),
'reply_to': self._get_or_none(self.reply_to),
}
return {key: value for key, value in mail.items()
if value is not None and value != [] and value != {}}
@classmethod
def from_EmailMessage(cls, message):
"""Create a Mail object from an instance of
email.message.EmailMessage.
:type message: email.message.EmailMessage
:rtype: Mail
"""
mail = cls(
from_email=Email(message.get('From')),
subject=message.get('Subject'),
to_emails=Email(message.get('To')),
)
try:
body = message.get_content()
except AttributeError:
# Python2
body = message.get_payload()
mail.add_content(Content(
message.get_content_type(),
body.strip()
))
for k, v in message.items():
mail.add_header(Header(k, v))
return mail
| 33.565657 | 80 | 0.591333 |
b8cb3b8a78780be2ae66c680c345ee12e526f127 | 2,076 | py | Python | klifs_utils/local/kinases.py | volkamerlab/klifs_utils | 3cca66b449ea9999c2687f9b1321faca9b5aa63e | [
"MIT"
] | 6 | 2020-06-23T14:44:25.000Z | 2022-02-25T05:18:38.000Z | klifs_utils/local/kinases.py | hlzfoxcn/klifs_utils | d7fe549e8b66674444c19513af26d661ae157a47 | [
"MIT"
] | 11 | 2020-03-12T13:57:11.000Z | 2021-09-17T18:20:33.000Z | klifs_utils/local/kinases.py | dominiquesydow/klifs_utils | 3cca66b449ea9999c2687f9b1321faca9b5aa63e | [
"MIT"
] | 3 | 2020-04-22T15:45:10.000Z | 2021-01-04T03:30:48.000Z | """
klifs_utils
Utility functions to work with KLIFS data (local)
Kinase details.
"""
def kinase_groups(klifs_metadata):
"""
Get all kinase groups.
Returns
-------
list of str
Kinase group names.
"""
kinase_groups = klifs_metadata.group.unique().tolist()
return kinase_groups
def kinase_families(klifs_metadata, kinase_group=None):
"""
Get all kinase families for a kinase group.
Parameters
----------
kinase_group : None or str
Kinase group name (default is None, i.e. all kinase groups are selected).
Returns
-------
list of str
Kinase family names.
"""
if kinase_group:
klifs_metadata = klifs_metadata[klifs_metadata.group == kinase_group]
kinase_families = klifs_metadata.family.unique().tolist()
return kinase_families
def kinase_names(klifs_metadata):
"""
Get all kinase names for kinases belonging to a given kinase group, kinase family and/or species (default is None,
i.e. get all kinase names). If multiple parameters are set, only kinases fullfilling all conditions are returned.
Returns
-------
pandas.DataFrame
Kinase names with details.
"""
klifs_metadata = klifs_metadata.drop_duplicates(subset=['kinase', 'species'])[['kinase', 'species']]
return klifs_metadata
def kinases_from_kinase_names(klifs_metadata, kinase_names, species=None):
"""
Get all kinases (+details) by kinase name(s).
Parameters
----------
kinase_names : str or list of str
Kinase name.
species : None or str
Species name (default is None, i.e. all species are selected).
Returns
-------
pandas.DataFrame
Kinase(s) details.
"""
return
def kinases_from_kinase_ids(klifs_metadata, kinase_ids):
"""
Get all kinases (+details) by KLIFS kinase ID(s).
Parameters
----------
kinase_ids : int or list of int
KLIFS kinase ID(s).
Returns
-------
pandas.DataFrame
Kinase(s) details.
"""
return
| 20.969697 | 118 | 0.63921 |
d4125a6ab01578825f522f40572a7895defc219c | 20,104 | py | Python | hubspot/crm/extensions/accounting/models/accounting_app_urls.py | cclauss/hubspot-api-python | 7c60c0f572b98c73e1f1816bf5981396a42735f6 | [
"Apache-2.0"
] | null | null | null | hubspot/crm/extensions/accounting/models/accounting_app_urls.py | cclauss/hubspot-api-python | 7c60c0f572b98c73e1f1816bf5981396a42735f6 | [
"Apache-2.0"
] | null | null | null | hubspot/crm/extensions/accounting/models/accounting_app_urls.py | cclauss/hubspot-api-python | 7c60c0f572b98c73e1f1816bf5981396a42735f6 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Accounting Extension
These APIs allow you to interact with HubSpot's Accounting Extension. It allows you to: * Specify the URLs that HubSpot will use when making webhook requests to your external accounting system. * Respond to webhook calls made to your external accounting system by HubSpot # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.extensions.accounting.configuration import Configuration
class AccountingAppUrls(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'get_invoice_url': 'str',
'search_customer_url': 'str',
'get_invoice_pdf_url': 'str',
'customer_url_template': 'str',
'product_url_template': 'str',
'invoice_url_template': 'str',
'create_invoice_url': 'str',
'search_invoice_url': 'str',
'search_product_url': 'str',
'get_terms_url': 'str',
'create_customer_url': 'str',
'search_tax_url': 'str',
'exchange_rate_url': 'str',
'search_url': 'str',
'search_count_url': 'str'
}
attribute_map = {
'get_invoice_url': 'getInvoiceUrl',
'search_customer_url': 'searchCustomerUrl',
'get_invoice_pdf_url': 'getInvoicePdfUrl',
'customer_url_template': 'customerUrlTemplate',
'product_url_template': 'productUrlTemplate',
'invoice_url_template': 'invoiceUrlTemplate',
'create_invoice_url': 'createInvoiceUrl',
'search_invoice_url': 'searchInvoiceUrl',
'search_product_url': 'searchProductUrl',
'get_terms_url': 'getTermsUrl',
'create_customer_url': 'createCustomerUrl',
'search_tax_url': 'searchTaxUrl',
'exchange_rate_url': 'exchangeRateUrl',
'search_url': 'searchUrl',
'search_count_url': 'searchCountUrl'
}
def __init__(self, get_invoice_url=None, search_customer_url=None, get_invoice_pdf_url=None, customer_url_template=None, product_url_template=None, invoice_url_template=None, create_invoice_url=None, search_invoice_url=None, search_product_url=None, get_terms_url=None, create_customer_url=None, search_tax_url=None, exchange_rate_url=None, search_url=None, search_count_url=None, local_vars_configuration=None): # noqa: E501
"""AccountingAppUrls - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._get_invoice_url = None
self._search_customer_url = None
self._get_invoice_pdf_url = None
self._customer_url_template = None
self._product_url_template = None
self._invoice_url_template = None
self._create_invoice_url = None
self._search_invoice_url = None
self._search_product_url = None
self._get_terms_url = None
self._create_customer_url = None
self._search_tax_url = None
self._exchange_rate_url = None
self._search_url = None
self._search_count_url = None
self.discriminator = None
self.get_invoice_url = get_invoice_url
self.search_customer_url = search_customer_url
self.get_invoice_pdf_url = get_invoice_pdf_url
self.customer_url_template = customer_url_template
self.product_url_template = product_url_template
self.invoice_url_template = invoice_url_template
if create_invoice_url is not None:
self.create_invoice_url = create_invoice_url
if search_invoice_url is not None:
self.search_invoice_url = search_invoice_url
if search_product_url is not None:
self.search_product_url = search_product_url
if get_terms_url is not None:
self.get_terms_url = get_terms_url
if create_customer_url is not None:
self.create_customer_url = create_customer_url
if search_tax_url is not None:
self.search_tax_url = search_tax_url
if exchange_rate_url is not None:
self.exchange_rate_url = exchange_rate_url
if search_url is not None:
self.search_url = search_url
if search_count_url is not None:
self.search_count_url = search_count_url
@property
def get_invoice_url(self):
"""Gets the get_invoice_url of this AccountingAppUrls. # noqa: E501
A URL that specifies the endpoint where invoices can be retrieved. # noqa: E501
:return: The get_invoice_url of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._get_invoice_url
@get_invoice_url.setter
def get_invoice_url(self, get_invoice_url):
"""Sets the get_invoice_url of this AccountingAppUrls.
A URL that specifies the endpoint where invoices can be retrieved. # noqa: E501
:param get_invoice_url: The get_invoice_url of this AccountingAppUrls. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and get_invoice_url is None: # noqa: E501
raise ValueError("Invalid value for `get_invoice_url`, must not be `None`") # noqa: E501
self._get_invoice_url = get_invoice_url
@property
def search_customer_url(self):
"""Gets the search_customer_url of this AccountingAppUrls. # noqa: E501
A URL that specifies the endpoint where a customer search can be performed. # noqa: E501
:return: The search_customer_url of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._search_customer_url
@search_customer_url.setter
def search_customer_url(self, search_customer_url):
"""Sets the search_customer_url of this AccountingAppUrls.
A URL that specifies the endpoint where a customer search can be performed. # noqa: E501
:param search_customer_url: The search_customer_url of this AccountingAppUrls. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and search_customer_url is None: # noqa: E501
raise ValueError("Invalid value for `search_customer_url`, must not be `None`") # noqa: E501
self._search_customer_url = search_customer_url
@property
def get_invoice_pdf_url(self):
"""Gets the get_invoice_pdf_url of this AccountingAppUrls. # noqa: E501
A URL that specifies the endpoint where an invoice PDF can be retrieved. # noqa: E501
:return: The get_invoice_pdf_url of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._get_invoice_pdf_url
@get_invoice_pdf_url.setter
def get_invoice_pdf_url(self, get_invoice_pdf_url):
"""Sets the get_invoice_pdf_url of this AccountingAppUrls.
A URL that specifies the endpoint where an invoice PDF can be retrieved. # noqa: E501
:param get_invoice_pdf_url: The get_invoice_pdf_url of this AccountingAppUrls. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and get_invoice_pdf_url is None: # noqa: E501
raise ValueError("Invalid value for `get_invoice_pdf_url`, must not be `None`") # noqa: E501
self._get_invoice_pdf_url = get_invoice_pdf_url
@property
def customer_url_template(self):
"""Gets the customer_url_template of this AccountingAppUrls. # noqa: E501
A template URL that indicates the endpoint where a customer can be fetched by ID. Note that ${CUSTOMER_ID} in this URL will be replaced by the actual customer ID. For example: https://myapp.com/api/customers/${CUSTOMER_ID} # noqa: E501
:return: The customer_url_template of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._customer_url_template
@customer_url_template.setter
def customer_url_template(self, customer_url_template):
"""Sets the customer_url_template of this AccountingAppUrls.
A template URL that indicates the endpoint where a customer can be fetched by ID. Note that ${CUSTOMER_ID} in this URL will be replaced by the actual customer ID. For example: https://myapp.com/api/customers/${CUSTOMER_ID} # noqa: E501
:param customer_url_template: The customer_url_template of this AccountingAppUrls. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and customer_url_template is None: # noqa: E501
raise ValueError("Invalid value for `customer_url_template`, must not be `None`") # noqa: E501
self._customer_url_template = customer_url_template
@property
def product_url_template(self):
"""Gets the product_url_template of this AccountingAppUrls. # noqa: E501
A template URL that indicates the endpoint where a product can be fetched by ID. Note that ${PRODUCT_ID} in this URL will be replaced by the actual product ID. For example: https://myapp.com/api/products/${PRODUCT_ID} # noqa: E501
:return: The product_url_template of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._product_url_template
@product_url_template.setter
def product_url_template(self, product_url_template):
"""Sets the product_url_template of this AccountingAppUrls.
A template URL that indicates the endpoint where a product can be fetched by ID. Note that ${PRODUCT_ID} in this URL will be replaced by the actual product ID. For example: https://myapp.com/api/products/${PRODUCT_ID} # noqa: E501
:param product_url_template: The product_url_template of this AccountingAppUrls. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and product_url_template is None: # noqa: E501
raise ValueError("Invalid value for `product_url_template`, must not be `None`") # noqa: E501
self._product_url_template = product_url_template
@property
def invoice_url_template(self):
"""Gets the invoice_url_template of this AccountingAppUrls. # noqa: E501
A template URL that indicates the endpoint where an invoice can be fetched by ID. Note that ${INVOICE_ID} in this URL will be replaced by the actual invoice ID. For example: https://myapp.com/api/invoices/${INVOICE_ID} # noqa: E501
:return: The invoice_url_template of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._invoice_url_template
@invoice_url_template.setter
def invoice_url_template(self, invoice_url_template):
"""Sets the invoice_url_template of this AccountingAppUrls.
A template URL that indicates the endpoint where an invoice can be fetched by ID. Note that ${INVOICE_ID} in this URL will be replaced by the actual invoice ID. For example: https://myapp.com/api/invoices/${INVOICE_ID} # noqa: E501
:param invoice_url_template: The invoice_url_template of this AccountingAppUrls. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and invoice_url_template is None: # noqa: E501
raise ValueError("Invalid value for `invoice_url_template`, must not be `None`") # noqa: E501
self._invoice_url_template = invoice_url_template
@property
def create_invoice_url(self):
"""Gets the create_invoice_url of this AccountingAppUrls. # noqa: E501
A URL that specifies the endpoint where an invoices can be created. # noqa: E501
:return: The create_invoice_url of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._create_invoice_url
@create_invoice_url.setter
def create_invoice_url(self, create_invoice_url):
"""Sets the create_invoice_url of this AccountingAppUrls.
A URL that specifies the endpoint where an invoices can be created. # noqa: E501
:param create_invoice_url: The create_invoice_url of this AccountingAppUrls. # noqa: E501
:type: str
"""
self._create_invoice_url = create_invoice_url
@property
def search_invoice_url(self):
"""Gets the search_invoice_url of this AccountingAppUrls. # noqa: E501
A URL that specifies the endpoint where an invoice search can be performed. # noqa: E501
:return: The search_invoice_url of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._search_invoice_url
@search_invoice_url.setter
def search_invoice_url(self, search_invoice_url):
"""Sets the search_invoice_url of this AccountingAppUrls.
A URL that specifies the endpoint where an invoice search can be performed. # noqa: E501
:param search_invoice_url: The search_invoice_url of this AccountingAppUrls. # noqa: E501
:type: str
"""
self._search_invoice_url = search_invoice_url
@property
def search_product_url(self):
"""Gets the search_product_url of this AccountingAppUrls. # noqa: E501
A URL that specifies the endpoint where a product search can be performed. # noqa: E501
:return: The search_product_url of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._search_product_url
@search_product_url.setter
def search_product_url(self, search_product_url):
"""Sets the search_product_url of this AccountingAppUrls.
A URL that specifies the endpoint where a product search can be performed. # noqa: E501
:param search_product_url: The search_product_url of this AccountingAppUrls. # noqa: E501
:type: str
"""
self._search_product_url = search_product_url
@property
def get_terms_url(self):
"""Gets the get_terms_url of this AccountingAppUrls. # noqa: E501
A URL that specifies the endpoint where payment terms can be retrieved. # noqa: E501
:return: The get_terms_url of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._get_terms_url
@get_terms_url.setter
def get_terms_url(self, get_terms_url):
"""Sets the get_terms_url of this AccountingAppUrls.
A URL that specifies the endpoint where payment terms can be retrieved. # noqa: E501
:param get_terms_url: The get_terms_url of this AccountingAppUrls. # noqa: E501
:type: str
"""
self._get_terms_url = get_terms_url
@property
def create_customer_url(self):
"""Gets the create_customer_url of this AccountingAppUrls. # noqa: E501
A URL that specifies the endpoint where a new customer can be created. # noqa: E501
:return: The create_customer_url of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._create_customer_url
@create_customer_url.setter
def create_customer_url(self, create_customer_url):
"""Sets the create_customer_url of this AccountingAppUrls.
A URL that specifies the endpoint where a new customer can be created. # noqa: E501
:param create_customer_url: The create_customer_url of this AccountingAppUrls. # noqa: E501
:type: str
"""
self._create_customer_url = create_customer_url
@property
def search_tax_url(self):
"""Gets the search_tax_url of this AccountingAppUrls. # noqa: E501
A URL that specifies the endpoint where a tax search can be performed. # noqa: E501
:return: The search_tax_url of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._search_tax_url
@search_tax_url.setter
def search_tax_url(self, search_tax_url):
"""Sets the search_tax_url of this AccountingAppUrls.
A URL that specifies the endpoint where a tax search can be performed. # noqa: E501
:param search_tax_url: The search_tax_url of this AccountingAppUrls. # noqa: E501
:type: str
"""
self._search_tax_url = search_tax_url
@property
def exchange_rate_url(self):
"""Gets the exchange_rate_url of this AccountingAppUrls. # noqa: E501
A URL that specifies the endpoint where exchange rates can be queried. # noqa: E501
:return: The exchange_rate_url of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._exchange_rate_url
@exchange_rate_url.setter
def exchange_rate_url(self, exchange_rate_url):
"""Sets the exchange_rate_url of this AccountingAppUrls.
A URL that specifies the endpoint where exchange rates can be queried. # noqa: E501
:param exchange_rate_url: The exchange_rate_url of this AccountingAppUrls. # noqa: E501
:type: str
"""
self._exchange_rate_url = exchange_rate_url
@property
def search_url(self):
"""Gets the search_url of this AccountingAppUrls. # noqa: E501
:return: The search_url of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._search_url
@search_url.setter
def search_url(self, search_url):
"""Sets the search_url of this AccountingAppUrls.
:param search_url: The search_url of this AccountingAppUrls. # noqa: E501
:type: str
"""
self._search_url = search_url
@property
def search_count_url(self):
"""Gets the search_count_url of this AccountingAppUrls. # noqa: E501
:return: The search_count_url of this AccountingAppUrls. # noqa: E501
:rtype: str
"""
return self._search_count_url
@search_count_url.setter
def search_count_url(self, search_count_url):
"""Sets the search_count_url of this AccountingAppUrls.
:param search_count_url: The search_count_url of this AccountingAppUrls. # noqa: E501
:type: str
"""
self._search_count_url = search_count_url
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AccountingAppUrls):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, AccountingAppUrls):
return True
return self.to_dict() != other.to_dict()
| 38.88588 | 430 | 0.67499 |
ed8cb385ed41bae75d6d1c20ec3610c20fce9d37 | 2,574 | py | Python | topomc/symbol.py | ryan-mooore/topomc | 049bc0642c6c75ffe615ab86638d8848bf1a79d8 | [
"MIT"
] | 7 | 2020-05-21T02:50:22.000Z | 2022-02-06T13:39:41.000Z | topomc/symbol.py | ryan-mooore/topomc | 049bc0642c6c75ffe615ab86638d8848bf1a79d8 | [
"MIT"
] | 6 | 2020-04-07T09:48:11.000Z | 2022-02-20T22:59:58.000Z | topomc/symbol.py | ryan-mooore/topomc | 049bc0642c6c75ffe615ab86638d8848bf1a79d8 | [
"MIT"
] | 3 | 2021-01-18T04:28:26.000Z | 2021-11-26T11:19:47.000Z | import logging
from enum import Enum
from topomc.common.coordinates import Coordinates
from matplotlib import pyplot as plt
from svgpath2mpl import parse_path
from xml.dom import minidom
from os import path
from topomc.common.logger import Logger
class Symbol:
settings = []
def render(self, settings):
raise NotImplementedError
def debug(self, settings):
Logger.log(
logging.critical,
f"debugging is not supported for {self.__class__.__name__}",
)
raise NotImplementedError
def __init__(self, processes, klass):
return next(proc for proc in processes if isinstance(proc, klass))
def set_properties(self):
raise NotImplementedError("Cannot set properties of unspecified symbol type")
def plot(self):
raise NotImplementedError("Cannot plot unspecified symbol type")
class AreaSymbol(Symbol):
def set_properties(self, fillcolor, bordercolor, borderwidth):
self.fillcolor = fillcolor
self.bordercolor = bordercolor
self.borderwidth = borderwidth / 3
def plot(self, area):
plt.fill(
*Coordinates.to_list(area),
facecolor=self.fillcolor,
edgecolor=self.bordercolor,
linewidth=self.borderwidth,
)
class LinearSymbol(Symbol):
def set_properties(self, color, linewidth):
self.color = color
self.linewidth = linewidth / 3
def plot(self, line):
plt.plot(*Coordinates.to_list(line), color=self.color, linewidth=self.linewidth)
class PointSymbol(Symbol):
def set_properties(self, color, pointsize=1, icon=None):
self.color = color
if icon:
self.icon = icon
else:
doc = minidom.parse(
path.join(
path.dirname(__file__),
"assets",
"symbols",
f"{self.__class__.__name__}.svg",
)
)
icon = parse_path(
[p.getAttribute("d") for p in doc.getElementsByTagName("path")][0]
)
doc.unlink()
for vertice in icon.vertices:
vertice[1] = -vertice[1]
icon.vertices -= icon.vertices.mean(axis=0)
self.icon = icon
self.pointsize = pointsize * 2
def plot(self, point):
plt.plot(
point.x,
point.y,
color=self.color,
marker=self.icon,
markersize=self.pointsize,
linewidth=0,
)
| 27.382979 | 88 | 0.590132 |
b7c7a74d0ee7a309f09fdf026a6bf1aa27bd2529 | 2,745 | py | Python | dm_pix/_src/patch_test.py | LaudateCorpus1/dm_pix | a75741220b8c3ead32ff3e9d7d38eb315d5f0ed9 | [
"Apache-2.0"
] | 219 | 2021-07-15T12:00:08.000Z | 2022-03-28T01:48:22.000Z | dm_pix/_src/patch_test.py | tkhan3/dm_pix | a75741220b8c3ead32ff3e9d7d38eb315d5f0ed9 | [
"Apache-2.0"
] | 4 | 2021-08-06T17:59:30.000Z | 2022-01-03T11:20:22.000Z | dm_pix/_src/patch_test.py | tkhan3/dm_pix | a75741220b8c3ead32ff3e9d7d38eb315d5f0ed9 | [
"Apache-2.0"
] | 12 | 2021-07-16T08:32:07.000Z | 2021-12-19T08:36:45.000Z | # Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dm_pix._src.patch."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import chex
from dm_pix._src import patch
import jax.test_util as jtu
import numpy as np
import tensorflow as tf
def _create_test_images(shape):
images = np.arange(np.prod(np.array(shape)), dtype=np.float32)
return np.reshape(images, shape)
class PatchTest(chex.TestCase, jtu.JaxTestCase, parameterized.TestCase):
@chex.all_variants
@parameterized.named_parameters(
('padding_valid', 'VALID'),
('padding_same', 'SAME'),
)
def test_extract_patches(self, padding):
image_shape = (2, 5, 7, 3)
images = _create_test_images(image_shape)
sizes = (1, 2, 3, 1)
strides = (1, 1, 2, 1)
rates = (1, 2, 1, 1)
extract_patches = self.variant(
functools.partial(patch.extract_patches, padding=padding),
static_argnums=(1, 2, 3))
jax_patches = extract_patches(
images,
sizes,
strides,
rates,
)
tf_patches = tf.image.extract_patches(
images,
sizes=sizes,
strides=strides,
rates=rates,
padding=padding,
)
self.assertArraysEqual(jax_patches, tf_patches.numpy())
@chex.all_variants
@parameterized.product(
({
'sizes': (1, 2, 3),
'strides': (1, 1, 2, 1),
'rates': (1, 2, 1, 1),
}, {
'sizes': (1, 2, 3, 1),
'strides': (1, 1, 2),
'rates': (1, 2, 1, 1),
}, {
'sizes': (1, 2, 3, 1),
'strides': (1, 1, 2, 1),
'rates': (1, 2, 1),
}),
padding=('VALID', 'SAME'),
)
def test_extract_patches_raises(self, sizes, strides, rates, padding):
image_shape = (2, 5, 7, 3)
images = _create_test_images(image_shape)
extract_patches = self.variant(
functools.partial(patch.extract_patches, padding=padding),
static_argnums=(1, 2, 3))
with self.assertRaises(ValueError):
extract_patches(
images,
sizes,
strides,
rates,
)
if __name__ == '__main__':
absltest.main()
| 27.45 | 74 | 0.630601 |
cfb529398a718d79324ec7f0ff98db0e09ca84a8 | 2,281 | py | Python | frontend/location.py | nezihsunman/CS350ProjectCreatingALinuxCommand | 7be4139adffc3446d0b518f383cd566e3436dc68 | [
"Apache-2.0"
] | null | null | null | frontend/location.py | nezihsunman/CS350ProjectCreatingALinuxCommand | 7be4139adffc3446d0b518f383cd566e3436dc68 | [
"Apache-2.0"
] | null | null | null | frontend/location.py | nezihsunman/CS350ProjectCreatingALinuxCommand | 7be4139adffc3446d0b518f383cd566e3436dc68 | [
"Apache-2.0"
] | 1 | 2021-03-06T22:08:23.000Z | 2021-03-06T22:08:23.000Z | #!/usr/bin/env python2
"""
location.py - Library to get source location info from nodes.
This makes syntax errors nicer.
TODO: Move some of osh/word_ here.
"""
from __future__ import print_function
from _devbuild.gen.syntax_asdl import (
command_e, command_t, command__Pipeline, command__AndOr,
command__DoGroup, command__BraceGroup, command__Subshell,
command__WhileUntil, command__If, command__Case, command__TimeBlock,
arith_expr_e, arith_expr_t, compound_word, Token,
)
from asdl import runtime
from core.util import log
from mycpp.mylib import tagswitch
from osh import word_
from typing import cast
def SpanForCommand(node):
# type: (command_t) -> int
"""
like word_.LeftMostSpanForWord
"""
UP_node = node # type: command_t
tag = node.tag_()
if tag == command_e.Pipeline:
node = cast(command__Pipeline, UP_node)
return node.spids[0] # first |
if tag == command_e.AndOr:
node = cast(command__AndOr, UP_node)
return node.spids[0] # first && or ||
if tag == command_e.DoGroup:
node = cast(command__DoGroup, UP_node)
return node.spids[0] # do spid
if tag == command_e.BraceGroup:
node = cast(command__BraceGroup, UP_node)
return node.spids[0] # { spid
if tag == command_e.Subshell:
node = cast(command__Subshell, UP_node)
return node.spids[0] # ( spid
if tag == command_e.WhileUntil:
node = cast(command__WhileUntil, UP_node)
return node.spids[0] # while spid
if tag == command_e.If:
node = cast(command__If, UP_node)
return node.arms[0].spids[0] # if spid is in FIRST arm.
if tag == command_e.Case:
node = cast(command__Case, UP_node)
return node.spids[0] # case keyword spid
if tag == command_e.TimeBlock:
node = cast(command__TimeBlock, UP_node)
return node.spids[0] # time keyword spid
# We never have this case?
#if node.tag == command_e.CommandList:
# pass
return runtime.NO_SPID
def SpanForArithExpr(node):
# type: (arith_expr_t) -> int
UP_node = node
with tagswitch(node) as case:
if case(arith_expr_e.VarRef):
token = cast(Token, UP_node)
return token.span_id
elif case(arith_expr_e.Word):
w = cast(compound_word, UP_node)
return word_.LeftMostSpanForWord(w)
return runtime.NO_SPID
| 28.5125 | 72 | 0.704954 |
de868fb3162a30c7e73d796386c7a6fb9272187c | 1,222 | py | Python | koku/api/report/azure/openshift/view.py | rubik-ai/koku | 3255d1c217b7b6685cb2e130bf4e025946e76fac | [
"Apache-2.0"
] | 157 | 2018-04-30T16:27:53.000Z | 2022-03-31T08:17:21.000Z | koku/api/report/azure/openshift/view.py | rubik-ai/koku | 3255d1c217b7b6685cb2e130bf4e025946e76fac | [
"Apache-2.0"
] | 3,250 | 2018-04-26T14:14:25.000Z | 2022-03-31T23:49:15.000Z | koku/api/report/azure/openshift/view.py | rubik-ai/koku | 3255d1c217b7b6685cb2e130bf4e025946e76fac | [
"Apache-2.0"
] | 65 | 2018-05-10T14:11:50.000Z | 2022-03-18T19:22:58.000Z | #
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""View for OpenShift on Azure Usage Reports."""
from api.common.permissions.azure_access import AzureAccessPermission
from api.common.permissions.openshift_access import OpenShiftAccessPermission
from api.models import Provider
from api.report.azure.openshift.query_handler import OCPAzureReportQueryHandler
from api.report.azure.openshift.serializers import OCPAzureQueryParamSerializer
from api.report.view import ReportView
from reporting.models import OCPAzureTagsSummary
class OCPAzureView(ReportView):
"""OCP+Azure Base View."""
permission_classes = [AzureAccessPermission, OpenShiftAccessPermission]
provider = Provider.OCP_AZURE
serializer = OCPAzureQueryParamSerializer
query_handler = OCPAzureReportQueryHandler
tag_handler = [OCPAzureTagsSummary]
class OCPAzureCostView(OCPAzureView):
"""Get OpenShift on Azure cost usage data."""
report = "costs"
class OCPAzureInstanceTypeView(OCPAzureView):
"""Get OpenShift on Azure instance usage data."""
report = "instance_type"
class OCPAzureStorageView(OCPAzureView):
"""Get OpenShift on Azure storage usage data."""
report = "storage"
| 29.804878 | 79 | 0.788052 |
98edd2c7e9ca34cfa9069a91739725d9530d400f | 1,285 | py | Python | handlers/base.py | WXSD-Sales/ZoomToWebex | 16cc663620e2ef2904b0e2857d709aee96b78eb7 | [
"MIT"
] | null | null | null | handlers/base.py | WXSD-Sales/ZoomToWebex | 16cc663620e2ef2904b0e2857d709aee96b78eb7 | [
"MIT"
] | null | null | null | handlers/base.py | WXSD-Sales/ZoomToWebex | 16cc663620e2ef2904b0e2857d709aee96b78eb7 | [
"MIT"
] | null | null | null | import json
import tornado.web
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
cookie = self.get_secure_cookie("ZoomToWebex-User", max_age_days=1, min_version=2)
return cookie
def get_fedramp_user(self):
cookie = self.get_secure_cookie("MoveToFedRamp-User", max_age_days=1, min_version=2)
return cookie
def load_page(self, page="main", msft_token=True, zoom_token=True, meetings_count=None):
redirect_to = ""
if page != "main":
redirect_to += "?state={0}".format(page)
if page == "fedramp":
person = self.get_fedramp_user()
else:
person = self.get_current_user()
if not person:
self.redirect('/webex-oauth{0}'.format(redirect_to))
else:
person = json.loads(person)
print(person)
tokens = {}
if msft_token:
tokens.update({"msft_token": self.application.settings['db'].is_user(person['id'], "msft") })
if zoom_token:
tokens.update({"zoom_token": self.application.settings['db'].is_user(person['id'], "zoom") })
self.render("{0}.html".format(page), person=person, tokens=tokens, meetings_count=meetings_count)
| 40.15625 | 109 | 0.610895 |
fc80de1012eedf4e196a956cdc0ce7c719d2c5f4 | 142 | py | Python | tests/context.py | ToddBenson/sample | 81108f8966c9193b2c11790f6055b4543bc0efac | [
"BSD-2-Clause"
] | null | null | null | tests/context.py | ToddBenson/sample | 81108f8966c9193b2c11790f6055b4543bc0efac | [
"BSD-2-Clause"
] | null | null | null | tests/context.py | ToddBenson/sample | 81108f8966c9193b2c11790f6055b4543bc0efac | [
"BSD-2-Clause"
] | null | null | null | import sys
import os
from kata.exercise import pick_peaks
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
| 23.666667 | 82 | 0.753521 |
a10b2ebd4e1a4148e83b74394f2fbc9c9c404af0 | 2,273 | py | Python | src/chia_log/handlers/condition_checkers/time_since_last_farm_event.py | IgorVoiT/chiadog | f45b811828d5ee9e0afca114efb842d1541439a4 | [
"MIT"
] | 1 | 2021-08-18T13:04:16.000Z | 2021-08-18T13:04:16.000Z | src/chia_log/handlers/condition_checkers/time_since_last_farm_event.py | IgorVoiT/chiadog | f45b811828d5ee9e0afca114efb842d1541439a4 | [
"MIT"
] | null | null | null | src/chia_log/handlers/condition_checkers/time_since_last_farm_event.py | IgorVoiT/chiadog | f45b811828d5ee9e0afca114efb842d1541439a4 | [
"MIT"
] | null | null | null | # std
import logging
from typing import Optional
# project
from src.notifier import Event, EventService, EventType, EventPriority
from . import HarvesterConditionChecker
from ...parsers.harvester_activity_parser import HarvesterActivityMessage
from UI_Manager import UIManager, InfoType
class TimeSinceLastFarmEvent(HarvesterConditionChecker):
"""Check that elapsed time since last eligible farming event was
inline with expectations. Usually every < 10 seconds.
If this check fails, this might be indication of unstable connection.
This is non-high priority because triggering the event means that
the farmer already recovered. If the farming completely stops it will
be caught by the keep-alive check which generates a high priority event.
"""
def __init__(self):
UIManager().updateTextView("Enabled check for farming events.", InfoType.INFO)
self._info_threshold = 30
self._warning_threshold = 90
self._last_timestamp = None
def check(self, obj: HarvesterActivityMessage) -> Optional[Event]:
if self._last_timestamp is None:
self._last_timestamp = obj.timestamp
return None
event = None
seconds_since_last = (obj.timestamp - self._last_timestamp).seconds
if seconds_since_last > self._warning_threshold:
message = (
f"Experiencing networking issues? Harvester did not participate in any challenge "
f"for {seconds_since_last} seconds. It's now working again."
)
UIManager().updateErrorTextView(message, InfoType.WARNING)
event = Event(
type=EventType.USER, priority=EventPriority.NORMAL, service=EventService.HARVESTER, message=message
)
elif seconds_since_last > self._info_threshold:
# This threshold seems to be surpassed multiple times per day
# on the current network. So it only generates an INFO log.
UIManager().updateTextView(f"Last farming event was {seconds_since_last} seconds ago. Usually every 9-10 seconds. No reason to worry if it happens up to 20 times daily.", InfoType.INFO)
#TODO send event
self._last_timestamp = obj.timestamp
return event
| 43.711538 | 197 | 0.704355 |
bec30b7f181142c9a887aac4daf161f1b645830b | 692 | py | Python | swaps/model/market/pricedepth_bbo_event.py | DunnCreativeSS/cash_carry_leveraged_futures_arbitrageur | 1120ebfb487ce4987fe70e6645b36e0d7ce041ec | [
"Apache-2.0"
] | 1 | 2021-09-06T00:09:11.000Z | 2021-09-06T00:09:11.000Z | swaps/model/market/pricedepth_bbo_event.py | DunnCreativeSS/cash_carry_leveraged_futures_arbitrageur | 1120ebfb487ce4987fe70e6645b36e0d7ce041ec | [
"Apache-2.0"
] | null | null | null | swaps/model/market/pricedepth_bbo_event.py | DunnCreativeSS/cash_carry_leveraged_futures_arbitrageur | 1120ebfb487ce4987fe70e6645b36e0d7ce041ec | [
"Apache-2.0"
] | null | null | null | from swaps.model.market import PriceDepthBbo
class PriceDepthBboEvent:
"""
The price depth received by subscription of price depth.
:member
symbol: The symbol you subscribed.
timestamp: The UNIX formatted timestamp generated by server in UTC.
data: The price depth.
"""
def __init__(self):
self.ts = 0
self.ch = ""
self.tick = PriceDepthBbo()
def print_object(self, format_data=""):
from swaps.utils.print_mix_object import PrintBasic
PrintBasic.print_basic(self.ts, format_data + "Time")
PrintBasic.print_basic(self.ch, format_data + "Channel")
self.tick.print_object(format_data) | 27.68 | 75 | 0.663295 |
2bc8a8e97c9bf0fef8c811b58a840a824bf67514 | 3,277 | py | Python | contrib/zmq/zmq_sub3.4.py | xlkulu/SHTcoin | e8e50991583a3530211025cd48191bf274798555 | [
"MIT"
] | 1 | 2019-08-25T13:07:02.000Z | 2019-08-25T13:07:02.000Z | contrib/zmq/zmq_sub3.4.py | xlkulu/SHTcoin | e8e50991583a3530211025cd48191bf274798555 | [
"MIT"
] | 1 | 2019-08-25T13:11:54.000Z | 2019-08-25T13:11:54.000Z | contrib/zmq/zmq_sub3.4.py | xlkulu/SHTcoin | e8e50991583a3530211025cd48191bf274798555 | [
"MIT"
] | 1 | 2019-08-25T13:07:25.000Z | 2019-08-25T13:07:25.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
ZMQ example using python3's asyncio
Shtcoin should be started with the command line arguments:
shtcoind -testnet -daemon \
-zmqpubrawtx=tcp://127.0.0.1:28332 \
-zmqpubrawblock=tcp://127.0.0.1:28332 \
-zmqpubhashtx=tcp://127.0.0.1:28332 \
-zmqpubhashblock=tcp://127.0.0.1:28332
We use the asyncio library here. `self.handle()` installs itself as a
future at the end of the function. Since it never returns with the event
loop having an empty stack of futures, this creates an infinite loop. An
alternative is to wrap the contents of `handle` inside `while True`.
The `@asyncio.coroutine` decorator and the `yield from` syntax found here
was introduced in python 3.4 and has been deprecated in favor of the `async`
and `await` keywords respectively.
A blocking example using python 2.7 can be obtained from the git history:
https://github.com/bitcoin/bitcoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py
"""
import binascii
import asyncio
import zmq
import zmq.asyncio
import signal
import struct
import sys
if not (sys.version_info.major >= 3 and sys.version_info.minor >= 4):
print("This example only works with Python 3.4 and greater")
sys.exit(1)
port = 28332
class ZMQHandler():
def __init__(self):
self.loop = zmq.asyncio.install()
self.zmqContext = zmq.asyncio.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
@asyncio.coroutine
def handle(self) :
msg = yield from self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
sequence = "Unknown"
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"hashtx":
print('- HASH TX ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(binascii.hexlify(body[:80]))
elif topic == b"rawtx":
print('- RAW TX ('+sequence+') -')
print(binascii.hexlify(body))
# schedule ourselves to receive the next message
asyncio.ensure_future(self.handle())
def start(self):
self.loop.add_signal_handler(signal.SIGINT, self.stop)
self.loop.create_task(self.handle())
self.loop.run_forever()
def stop(self):
self.loop.stop()
self.zmqContext.destroy()
daemon = ZMQHandler()
daemon.start()
| 36.411111 | 107 | 0.648764 |
fa879c75d7e018e6dea0adbf4d03f0709ff50d26 | 3,344 | py | Python | insights/parsers/tests/test_ls_lib_firmware.py | lhuett/insights-core | 1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8 | [
"Apache-2.0"
] | 121 | 2017-05-30T20:23:25.000Z | 2022-03-23T12:52:15.000Z | insights/parsers/tests/test_ls_lib_firmware.py | lhuett/insights-core | 1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8 | [
"Apache-2.0"
] | 1,977 | 2017-05-26T14:36:03.000Z | 2022-03-31T10:38:53.000Z | insights/parsers/tests/test_ls_lib_firmware.py | lhuett/insights-core | 1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8 | [
"Apache-2.0"
] | 244 | 2017-05-30T20:22:57.000Z | 2022-03-26T10:09:39.000Z | import doctest
from insights.parsers import ls_lib_firmware
from insights.parsers.ls_lib_firmware import LsLibFW
from insights.tests import context_wrap
LS_LIB_FW = """
/lib/firmware:
total 37592
drwxr-xr-x. 83 0 0 8192 Aug 14 02:43 .
dr-xr-xr-x. 26 0 0 4096 Aug 14 02:22 ..
drwxr-xr-x. 2 0 0 40 Aug 14 02:42 3com
lrwxrwxrwx. 1 0 0 16 Aug 14 02:42 a300_pfp.fw -> qcom/a300_pfp.fw
lrwxrwxrwx. 1 0 0 16 Aug 14 02:42 a300_pm4.fw -> qcom/a300_pm4.fw
drwxr-xr-x. 2 0 0 34 Aug 14 02:42 acenic
drwxr-xr-x. 2 0 0 50 Aug 14 02:42 adaptec
drwxr-xr-x. 2 0 0 73 Aug 14 02:42 advansys
/lib/firmware/3com:
total 84
drwxr-xr-x. 2 0 0 40 Aug 14 02:42 .
drwxr-xr-x. 83 0 0 8192 Aug 14 02:43 ..
-rw-r--r--. 1 0 0 24880 Jun 6 10:14 3C359.bin
-rw-r--r--. 1 0 0 44548 Jun 6 10:14 typhoon.bin
/lib/firmware/acenic:
total 160
drwxr-xr-x. 2 0 0 34 Aug 14 02:42 .
drwxr-xr-x. 83 0 0 8192 Aug 14 02:43 ..
-rw-r--r--. 1 0 0 73116 Jun 6 10:14 tg1.bin
-rw-r--r--. 1 0 0 77452 Jun 6 10:14 tg2.bin
/lib/firmware/adaptec:
total 20
drwxr-xr-x. 2 0 0 50 Aug 14 02:42 .
drwxr-xr-x. 83 0 0 8192 Aug 14 02:43 ..
-rw-r--r--. 1 0 0 832 Jun 6 10:14 starfire_rx.bin
-rw-r--r--. 1 0 0 832 Jun 6 10:14 starfire_tx.bin
/lib/firmware/advansys:
total 40
drwxr-xr-x. 2 0 0 73 Aug 14 02:42 .
drwxr-xr-x. 83 0 0 8192 Aug 14 02:43 ..
-rw-r--r--. 1 0 0 5026 Jun 6 10:14 3550.bin
-rw-r--r--. 1 0 0 5340 Jun 6 10:14 38C0800.bin
-rw-r--r--. 1 0 0 6334 Jun 6 10:14 38C1600.bin
-rw-r--r--. 1 0 0 2308 Jun 6 10:14 mcode.bin
/lib/firmware/bnx2:
total 1448
drwxr-xr-x. 2 0 0 4096 Aug 14 02:43 .
drwxr-xr-x. 83 0 0 8192 Aug 14 02:43 ..
-rw-r--r--. 1 0 0 92628 Jun 6 10:14 bnx2-mips-06-4.6.16.fw
-rw-r--r--. 1 0 0 93172 Jun 6 10:14 bnx2-mips-06-5.0.0.j3.fw
-rw-r--r--. 1 0 0 94252 Jun 6 10:14 bnx2-mips-06-5.0.0.j6.fw
-rw-r--r--. 1 0 0 92824 Jun 6 10:14 bnx2-mips-06-6.2.3.fw
-rw-r--r--. 1 0 0 92760 Jun 6 10:14 bnx2-mips-09-4.6.17.fw
-rw-r--r--. 1 0 0 96996 Jun 6 10:14 bnx2-mips-09-5.0.0.j15.fw
/lib/firmware/bnx2x:
total 8772
drwxr-xr-x. 2 0 0 4096 Aug 14 02:43 .
drwxr-xr-x. 83 0 0 8192 Aug 14 02:43 ..
-rw-r--r--. 1 0 0 151568 Jun 6 10:14 bnx2x-e1-6.0.34.0.fw
-rw-r--r--. 1 0 0 151680 Jun 6 10:14 bnx2x-e1-6.2.5.0.fw
-rw-r--r--. 1 0 0 151688 Jun 6 10:14 bnx2x-e1-6.2.9.0.fw
-rw-r--r--. 1 0 0 161144 Jun 6 10:14 bnx2x-e1-7.0.20.0.fw
-rw-r--r--. 1 0 0 161248 Jun 6 10:14 bnx2x-e1-7.0.23.0.fw
""".strip()
def test_ls_lib_firmware():
lslib = LsLibFW(context_wrap(LS_LIB_FW))
assert "bnx2x" not in lslib
assert "/lib/firmware/bnx2" in lslib
assert "/lib/firmware/bnx2x" in lslib
assert lslib.dirs_of("/lib/firmware") == ['.', '..', '3com', 'acenic', 'adaptec', 'advansys']
assert lslib.files_of("/lib/firmware/bnx2x") == ['bnx2x-e1-6.0.34.0.fw', 'bnx2x-e1-6.2.5.0.fw', 'bnx2x-e1-6.2.9.0.fw', 'bnx2x-e1-7.0.20.0.fw', 'bnx2x-e1-7.0.23.0.fw']
assert "bnx2x-e1-6.0.34.0.fw" in lslib.files_of("/lib/firmware/bnx2x")
assert lslib.dir_contains("/lib/firmware/bnx2x", "bnx2x-e1-6.0.34.0.fw") is True
assert lslib.total_of("/lib/firmware") == 37592
def test_ls_lib_firmware_doc_examples():
env = {
'lslibfw': LsLibFW(context_wrap(LS_LIB_FW)),
}
failed, total = doctest.testmod(ls_lib_firmware, globs=env)
assert failed == 0
| 37.155556 | 170 | 0.630682 |
5906be3aeb87cd34ff470d2125255bd74e00bcd6 | 1,261 | py | Python | examples/quickstart.py | yopknopixx/dffml | 7f295bb01b235a915c9f8015564b97a708cd5325 | [
"MIT"
] | null | null | null | examples/quickstart.py | yopknopixx/dffml | 7f295bb01b235a915c9f8015564b97a708cd5325 | [
"MIT"
] | null | null | null | examples/quickstart.py | yopknopixx/dffml | 7f295bb01b235a915c9f8015564b97a708cd5325 | [
"MIT"
] | null | null | null | from dffml import Features, Feature
from dffml.noasync import train, accuracy, predict
from dffml_model_scikit import LinearRegressionModel
from dffml.accuracy import MeanSquaredErrorAccuracy
model = LinearRegressionModel(
features=Features(
Feature("Years", int, 1),
Feature("Expertise", int, 1),
Feature("Trust", float, 1),
),
predict=Feature("Salary", int, 1),
location="tempdir",
)
# Train the model
train(
model,
{"Years": 0, "Expertise": 1, "Trust": 0.1, "Salary": 10},
{"Years": 1, "Expertise": 3, "Trust": 0.2, "Salary": 20},
{"Years": 2, "Expertise": 5, "Trust": 0.3, "Salary": 30},
{"Years": 3, "Expertise": 7, "Trust": 0.4, "Salary": 40},
)
# Assess accuracy
scorer = MeanSquaredErrorAccuracy()
print(
"Accuracy:",
accuracy(
model,
scorer,
Feature("Salary", int, 1),
{"Years": 4, "Expertise": 9, "Trust": 0.5, "Salary": 50},
{"Years": 5, "Expertise": 11, "Trust": 0.6, "Salary": 60},
),
)
# Make prediction
for i, features, prediction in predict(
model,
{"Years": 6, "Expertise": 13, "Trust": 0.7},
{"Years": 7, "Expertise": 15, "Trust": 0.8},
):
features["Salary"] = prediction["Salary"]["value"]
print(features)
| 27.413043 | 66 | 0.596352 |
c372d5ac2bb8f9d05dd271109c5f1989b39700c5 | 80,226 | py | Python | flink-python/pyflink/datastream/data_stream.py | FrommyMind/flink | 589a566784451ef3943ee024d46fdd8d33d30a66 | [
"Apache-2.0"
] | 1 | 2022-02-16T07:59:13.000Z | 2022-02-16T07:59:13.000Z | flink-python/pyflink/datastream/data_stream.py | FrommyMind/flink | 589a566784451ef3943ee024d46fdd8d33d30a66 | [
"Apache-2.0"
] | 2 | 2022-03-09T12:58:57.000Z | 2022-03-29T12:23:39.000Z | flink-python/pyflink/datastream/data_stream.py | FrommyMind/flink | 589a566784451ef3943ee024d46fdd8d33d30a66 | [
"Apache-2.0"
] | 1 | 2022-03-09T08:50:37.000Z | 2022-03-09T08:50:37.000Z | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import typing
import uuid
from typing import Callable, Union, List, cast
from pyflink.common import typeinfo, ExecutionConfig, Row
from pyflink.datastream.slot_sharing_group import SlotSharingGroup
from pyflink.datastream.window import (TimeWindowSerializer, CountWindowSerializer, WindowAssigner,
Trigger, WindowOperationDescriptor)
from pyflink.common.typeinfo import RowTypeInfo, Types, TypeInformation, _from_java_type
from pyflink.common.watermark_strategy import WatermarkStrategy, TimestampAssigner
from pyflink.datastream.connectors import Sink
from pyflink.datastream.functions import (_get_python_env, FlatMapFunction, MapFunction, Function,
FunctionWrapper, SinkFunction, FilterFunction,
KeySelector, ReduceFunction, CoMapFunction,
CoFlatMapFunction, Partitioner, RuntimeContext,
ProcessFunction, KeyedProcessFunction,
KeyedCoProcessFunction, WindowFunction,
ProcessWindowFunction, InternalWindowFunction,
InternalIterableWindowFunction,
InternalIterableProcessWindowFunction, CoProcessFunction)
from pyflink.datastream.state import ValueStateDescriptor, ValueState, ListStateDescriptor
from pyflink.datastream.utils import convert_to_python_obj
from pyflink.java_gateway import get_gateway
__all__ = ['CloseableIterator', 'DataStream', 'KeyedStream', 'ConnectedStreams', 'WindowedStream',
'DataStreamSink', 'CloseableIterator']
class DataStream(object):
"""
A DataStream represents a stream of elements of the same type. A DataStream can be transformed
into another DataStream by applying a transformation as for example:
::
>>> DataStream.map(MapFunctionImpl())
>>> DataStream.filter(FilterFunctionImpl())
"""
def __init__(self, j_data_stream):
self._j_data_stream = j_data_stream
def get_name(self) -> str:
"""
Gets the name of the current data stream. This name is used by the visualization and logging
during runtime.
:return: Name of the stream.
"""
return self._j_data_stream.getName()
def name(self, name: str) -> 'DataStream':
"""
Sets the name of the current data stream. This name is used by the visualization and logging
during runtime.
:param name: Name of the stream.
:return: The named operator.
"""
self._j_data_stream.name(name)
return self
def uid(self, uid: str) -> 'DataStream':
"""
Sets an ID for this operator. The specified ID is used to assign the same operator ID across
job submissions (for example when starting a job from a savepoint).
Important: this ID needs to be unique per transformation and job. Otherwise, job submission
will fail.
:param uid: The unique user-specified ID of this transformation.
:return: The operator with the specified ID.
"""
self._j_data_stream.uid(uid)
return self
def set_uid_hash(self, uid_hash: str) -> 'DataStream':
"""
Sets an user provided hash for this operator. This will be used AS IS the create the
JobVertexID. The user provided hash is an alternative to the generated hashed, that is
considered when identifying an operator through the default hash mechanics fails (e.g.
because of changes between Flink versions).
Important: this should be used as a workaround or for trouble shooting. The provided hash
needs to be unique per transformation and job. Otherwise, job submission will fail.
Furthermore, you cannot assign user-specified hash to intermediate nodes in an operator
chain and trying so will let your job fail.
A use case for this is in migration between Flink versions or changing the jobs in a way
that changes the automatically generated hashes. In this case, providing the previous hashes
directly through this method (e.g. obtained from old logs) can help to reestablish a lost
mapping from states to their target operator.
:param uid_hash: The user provided hash for this operator. This will become the jobVertexID,
which is shown in the logs and web ui.
:return: The operator with the user provided hash.
"""
self._j_data_stream.setUidHash(uid_hash)
return self
def set_parallelism(self, parallelism: int) -> 'DataStream':
"""
Sets the parallelism for this operator.
:param parallelism: THe parallelism for this operator.
:return: The operator with set parallelism.
"""
self._j_data_stream.setParallelism(parallelism)
return self
def set_max_parallelism(self, max_parallelism: int) -> 'DataStream':
"""
Sets the maximum parallelism of this operator.
The maximum parallelism specifies the upper bound for dynamic scaling. It also defines the
number of key groups used for partitioned state.
:param max_parallelism: Maximum parallelism.
:return: The operator with set maximum parallelism.
"""
self._j_data_stream.setMaxParallelism(max_parallelism)
return self
def get_type(self) -> TypeInformation:
"""
Gets the type of the stream.
:return: The type of the DataStream.
"""
return typeinfo._from_java_type(self._j_data_stream.getType())
def get_execution_environment(self):
"""
Returns the StreamExecutionEnvironment that was used to create this DataStream.
:return: The Execution Environment.
"""
from pyflink.datastream import StreamExecutionEnvironment
return StreamExecutionEnvironment(
j_stream_execution_environment=self._j_data_stream.getExecutionEnvironment())
def get_execution_config(self) -> ExecutionConfig:
return ExecutionConfig(j_execution_config=self._j_data_stream.getExecutionConfig())
def force_non_parallel(self) -> 'DataStream':
"""
Sets the parallelism and maximum parallelism of this operator to one. And mark this operator
cannot set a non-1 degree of parallelism.
:return: The operator with only one parallelism.
"""
self._j_data_stream.forceNonParallel()
return self
def set_buffer_timeout(self, timeout_millis: int) -> 'DataStream':
"""
Sets the buffering timeout for data produced by this operation. The timeout defines how long
data may linger ina partially full buffer before being sent over the network.
Lower timeouts lead to lower tail latencies, but may affect throughput. Timeouts of 1 ms
still sustain high throughput, even for jobs with high parallelism.
A value of '-1' means that the default buffer timeout should be used. A value of '0'
indicates that no buffering should happen, and all records/events should be immediately sent
through the network, without additional buffering.
:param timeout_millis: The maximum time between two output flushes.
:return: The operator with buffer timeout set.
"""
self._j_data_stream.setBufferTimeout(timeout_millis)
return self
def start_new_chain(self) -> 'DataStream':
"""
Starts a new task chain beginning at this operator. This operator will be chained (thread
co-located for increased performance) to any previous tasks even if possible.
:return: The operator with chaining set.
"""
self._j_data_stream.startNewChain()
return self
def disable_chaining(self) -> 'DataStream':
"""
Turns off chaining for this operator so thread co-location will not be used as an
optimization.
Chaining can be turned off for the whole job by
StreamExecutionEnvironment.disableOperatorChaining() however it is not advised for
performance consideration.
:return: The operator with chaining disabled.
"""
self._j_data_stream.disableChaining()
return self
def slot_sharing_group(self, slot_sharing_group: Union[str, SlotSharingGroup]) -> 'DataStream':
"""
Sets the slot sharing group of this operation. Parallel instances of operations that are in
the same slot sharing group will be co-located in the same TaskManager slot, if possible.
Operations inherit the slot sharing group of input operations if all input operations are in
the same slot sharing group and no slot sharing group was explicitly specified.
Initially an operation is in the default slot sharing group. An operation can be put into
the default group explicitly by setting the slot sharing group to 'default'.
:param slot_sharing_group: The slot sharing group name or which contains name and its
resource spec.
:return: This operator.
"""
if isinstance(slot_sharing_group, SlotSharingGroup):
self._j_data_stream.slotSharingGroup(slot_sharing_group.get_java_slot_sharing_group())
else:
self._j_data_stream.slotSharingGroup(slot_sharing_group)
return self
def set_description(self, description: str) -> 'DataStream':
"""
Sets the description for this operator.
Description is used in json plan and web ui, but not in logging and metrics where only
name is available. Description is expected to provide detailed information about the
operator, while name is expected to be more simple, providing summary information only,
so that we can have more user-friendly logging messages and metric tags without losing
useful messages for debugging.
:param description: The description for this operator.
:return: The operator with new description.
.. versionadded:: 1.15.0
"""
self._j_data_stream.setDescription(description)
return self
def map(self, func: Union[Callable, MapFunction], output_type: TypeInformation = None) \
-> 'DataStream':
"""
Applies a Map transformation on a DataStream. The transformation calls a MapFunction for
each element of the DataStream. Each MapFunction call returns exactly one element.
Note that If user does not specify the output data type, the output data will be serialized
as pickle primitive byte array.
:param func: The MapFunction that is called for each element of the DataStream.
:param output_type: The type information of the MapFunction output data.
:return: The transformed DataStream.
"""
if not isinstance(func, MapFunction) and not callable(func):
raise TypeError("The input must be a MapFunction or a callable function")
class MapProcessFunctionAdapter(ProcessFunction):
def __init__(self, map_func):
if isinstance(map_func, MapFunction):
self._open_func = map_func.open
self._close_func = map_func.close
self._map_func = map_func.map
else:
self._open_func = None
self._close_func = None
self._map_func = map_func
def open(self, runtime_context: RuntimeContext):
if self._open_func:
self._open_func(runtime_context)
def close(self):
if self._close_func:
self._close_func()
def process_element(self, value, ctx: 'ProcessFunction.Context'):
yield self._map_func(value)
return self.process(MapProcessFunctionAdapter(func), output_type) \
.name("Map")
def flat_map(self,
func: Union[Callable, FlatMapFunction],
output_type: TypeInformation = None) -> 'DataStream':
"""
Applies a FlatMap transformation on a DataStream. The transformation calls a FlatMapFunction
for each element of the DataStream. Each FlatMapFunction call can return any number of
elements including none.
:param func: The FlatMapFunction that is called for each element of the DataStream.
:param output_type: The type information of output data.
:return: The transformed DataStream.
"""
if not isinstance(func, FlatMapFunction) and not callable(func):
raise TypeError("The input must be a FlatMapFunction or a callable function")
class FlatMapProcessFunctionAdapter(ProcessFunction):
def __init__(self, flat_map_func):
if isinstance(flat_map_func, FlatMapFunction):
self._open_func = flat_map_func.open
self._close_func = flat_map_func.close
self._flat_map_func = flat_map_func.flat_map
else:
self._open_func = None
self._close_func = None
self._flat_map_func = flat_map_func
def open(self, runtime_context: RuntimeContext):
if self._open_func:
self._open_func(runtime_context)
def close(self):
if self._close_func:
self._close_func()
def process_element(self, value, ctx: 'ProcessFunction.Context'):
yield from self._flat_map_func(value)
return self.process(FlatMapProcessFunctionAdapter(func), output_type) \
.name("FlatMap")
def key_by(self,
key_selector: Union[Callable, KeySelector],
key_type: TypeInformation = None) -> 'KeyedStream':
"""
Creates a new KeyedStream that uses the provided key for partitioning its operator states.
:param key_selector: The KeySelector to be used for extracting the key for partitioning.
:param key_type: The type information describing the key type.
:return: The DataStream with partitioned state(i.e. KeyedStream).
"""
if not isinstance(key_selector, KeySelector) and not callable(key_selector):
raise TypeError("Parameter key_selector should be type of KeySelector or a callable "
"function.")
class AddKey(ProcessFunction):
def __init__(self, key_selector):
if isinstance(key_selector, KeySelector):
self._key_selector_open_func = key_selector.open
self._key_selector_close_func = key_selector.close
self._get_key_func = key_selector.get_key
else:
self._key_selector_open_func = None
self._key_selector_close_func = None
self._get_key_func = key_selector
def open(self, runtime_context: RuntimeContext):
if self._key_selector_open_func:
self._key_selector_open_func(runtime_context)
def close(self):
if self._key_selector_close_func:
self._key_selector_close_func()
def process_element(self, value, ctx: 'ProcessFunction.Context'):
yield Row(self._get_key_func(value), value)
output_type_info = typeinfo._from_java_type(
self._j_data_stream.getTransformation().getOutputType())
if key_type is None:
key_type = Types.PICKLED_BYTE_ARRAY()
gateway = get_gateway()
stream_with_key_info = self.process(
AddKey(key_selector),
output_type=Types.ROW([key_type, output_type_info]))
stream_with_key_info.name(gateway.jvm.org.apache.flink.python.util.PythonConfigUtil
.STREAM_KEY_BY_MAP_OPERATOR_NAME)
JKeyByKeySelector = gateway.jvm.KeyByKeySelector
key_stream = KeyedStream(
stream_with_key_info._j_data_stream.keyBy(
JKeyByKeySelector(),
Types.ROW([key_type]).get_java_type_info()), output_type_info,
self)
return key_stream
def filter(self, func: Union[Callable, FilterFunction]) -> 'DataStream':
"""
Applies a Filter transformation on a DataStream. The transformation calls a FilterFunction
for each element of the DataStream and retains only those element for which the function
returns true. Elements for which the function returns false are filtered.
:param func: The FilterFunction that is called for each element of the DataStream.
:return: The filtered DataStream.
"""
if not isinstance(func, FilterFunction) and not callable(func):
raise TypeError("The input must be a FilterFunction or a callable function")
class FilterProcessFunctionAdapter(ProcessFunction):
def __init__(self, filter_func):
if isinstance(filter_func, FilterFunction):
self._open_func = filter_func.open
self._close_func = filter_func.close
self._filter_func = filter_func.filter
else:
self._open_func = None
self._close_func = None
self._filter_func = filter_func
def open(self, runtime_context: RuntimeContext):
if self._open_func:
self._open_func(runtime_context)
def close(self):
if self._close_func:
self._close_func()
def process_element(self, value, ctx: 'ProcessFunction.Context'):
if self._filter_func(value):
yield value
output_type = typeinfo._from_java_type(
self._j_data_stream.getTransformation().getOutputType())
return self.process(FilterProcessFunctionAdapter(func), output_type=output_type) \
.name("Filter")
def union(self, *streams: 'DataStream') -> 'DataStream':
"""
Creates a new DataStream by merging DataStream outputs of the same type with each other. The
DataStreams merged using this operator will be transformed simultaneously.
:param streams: The DataStream to union outputwith.
:return: The DataStream.
"""
j_data_streams = []
for data_stream in streams:
if isinstance(data_stream, KeyedStream):
j_data_streams.append(data_stream._values()._j_data_stream)
else:
j_data_streams.append(data_stream._j_data_stream)
gateway = get_gateway()
JDataStream = gateway.jvm.org.apache.flink.streaming.api.datastream.DataStream
j_data_stream_arr = get_gateway().new_array(JDataStream, len(j_data_streams))
for i in range(len(j_data_streams)):
j_data_stream_arr[i] = j_data_streams[i]
j_united_stream = self._j_data_stream.union(j_data_stream_arr)
return DataStream(j_data_stream=j_united_stream)
def connect(self, ds: 'DataStream') -> 'ConnectedStreams':
"""
Creates a new 'ConnectedStreams' by connecting 'DataStream' outputs of (possible)
different types with each other. The DataStreams connected using this operator can
be used with CoFunctions to apply joint transformations.
:param ds: The DataStream with which this stream will be connected.
:return: The `ConnectedStreams`.
"""
return ConnectedStreams(self, ds)
def shuffle(self) -> 'DataStream':
"""
Sets the partitioning of the DataStream so that the output elements are shuffled uniformly
randomly to the next operation.
:return: The DataStream with shuffle partitioning set.
"""
return DataStream(self._j_data_stream.shuffle())
def project(self, *field_indexes: int) -> 'DataStream':
"""
Initiates a Project transformation on a Tuple DataStream.
Note that only Tuple DataStreams can be projected.
:param field_indexes: The field indexes of the input tuples that are retained. The order of
fields in the output tuple corresponds to the order of field indexes.
:return: The projected DataStream.
"""
if not isinstance(self.get_type(), typeinfo.TupleTypeInfo):
raise Exception('Only Tuple DataStreams can be projected.')
gateway = get_gateway()
j_index_arr = gateway.new_array(gateway.jvm.int, len(field_indexes))
for i in range(len(field_indexes)):
j_index_arr[i] = field_indexes[i]
return DataStream(self._j_data_stream.project(j_index_arr))
def rescale(self) -> 'DataStream':
"""
Sets the partitioning of the DataStream so that the output elements are distributed evenly
to a subset of instances of the next operation in a round-robin fashion.
The subset of downstream operations to which the upstream operation sends elements depends
on the degree of parallelism of both the upstream and downstream operation. For example, if
the upstream operation has parallelism 2 and the downstream operation has parallelism 4,
then one upstream operation would distribute elements to two downstream operations. If, on
the other hand, the downstream operation has parallelism 4 then two upstream operations will
distribute to one downstream operation while the other two upstream operations will
distribute to the other downstream operations.
In cases where the different parallelisms are not multiples of each one or several
downstream operations will have a differing number of inputs from upstream operations.
:return: The DataStream with rescale partitioning set.
"""
return DataStream(self._j_data_stream.rescale())
def rebalance(self) -> 'DataStream':
"""
Sets the partitioning of the DataStream so that the output elements are distributed evenly
to instances of the next operation in a round-robin fashion.
:return: The DataStream with rebalance partition set.
"""
return DataStream(self._j_data_stream.rebalance())
def forward(self) -> 'DataStream':
"""
Sets the partitioning of the DataStream so that the output elements are forwarded to the
local sub-task of the next operation.
:return: The DataStream with forward partitioning set.
"""
return DataStream(self._j_data_stream.forward())
def broadcast(self) -> 'DataStream':
"""
Sets the partitioning of the DataStream so that the output elements are broadcasted to every
parallel instance of the next operation.
:return: The DataStream with broadcast partitioning set.
"""
return DataStream(self._j_data_stream.broadcast())
def process(self, func: ProcessFunction, output_type: TypeInformation = None) -> 'DataStream':
"""
Applies the given ProcessFunction on the input stream, thereby creating a transformed output
stream.
The function will be called for every element in the input streams and can produce zero or
more output elements.
:param func: The ProcessFunction that is called for each element in the stream.
:param output_type: TypeInformation for the result type of the function.
:return: The transformed DataStream.
"""
from pyflink.fn_execution import flink_fn_execution_pb2
j_python_data_stream_function_operator, j_output_type_info = \
_get_one_input_stream_operator(
self,
func,
flink_fn_execution_pb2.UserDefinedDataStreamFunction.PROCESS, # type: ignore
output_type)
return DataStream(self._j_data_stream.transform(
"PROCESS",
j_output_type_info,
j_python_data_stream_function_operator))
def assign_timestamps_and_watermarks(self, watermark_strategy: WatermarkStrategy) -> \
'DataStream':
"""
Assigns timestamps to the elements in the data stream and generates watermarks to signal
event time progress. The given {@link WatermarkStrategy} is used to create a
TimestampAssigner and WatermarkGenerator.
:param watermark_strategy: The strategy to generate watermarks based on event timestamps.
:return: The stream after the transformation, with assigned timestamps and watermarks.
"""
if watermark_strategy._timestamp_assigner is not None:
# in case users have specified custom TimestampAssigner, we need to extract and
# generate watermark according to the specified TimestampAssigner.
class TimestampAssignerProcessFunctionAdapter(ProcessFunction):
def __init__(self, timestamp_assigner: TimestampAssigner):
self._extract_timestamp_func = timestamp_assigner.extract_timestamp
def process_element(self, value, ctx: 'ProcessFunction.Context'):
yield value, self._extract_timestamp_func(value, ctx.timestamp())
# step 1: extract the timestamp according to the specified TimestampAssigner
timestamped_data_stream = self.process(
TimestampAssignerProcessFunctionAdapter(watermark_strategy._timestamp_assigner),
Types.TUPLE([self.get_type(), Types.LONG()]))
timestamped_data_stream.name("Extract-Timestamp")
# step 2: assign timestamp and watermark
gateway = get_gateway()
JCustomTimestampAssigner = gateway.jvm.org.apache.flink.streaming.api.functions.python \
.eventtime.CustomTimestampAssigner
j_watermarked_data_stream = (
timestamped_data_stream._j_data_stream.assignTimestampsAndWatermarks(
watermark_strategy._j_watermark_strategy.withTimestampAssigner(
JCustomTimestampAssigner())))
# step 3: remove the timestamp field which is added in step 1
JRemoveTimestampMapFunction = gateway.jvm.org.apache.flink.streaming.api.functions \
.python.eventtime.RemoveTimestampMapFunction
result = DataStream(j_watermarked_data_stream.map(
JRemoveTimestampMapFunction(), self._j_data_stream.getType()))
result.name("Remove-Timestamp")
return result
else:
# if user not specify a TimestampAssigner, then return directly assign the Java
# watermark strategy.
return DataStream(self._j_data_stream.assignTimestampsAndWatermarks(
watermark_strategy._j_watermark_strategy))
def partition_custom(self, partitioner: Union[Callable, Partitioner],
key_selector: Union[Callable, KeySelector]) -> 'DataStream':
"""
Partitions a DataStream on the key returned by the selector, using a custom partitioner.
This method takes the key selector to get the key to partition on, and a partitioner that
accepts the key type.
Note that this method works only on single field keys, i.e. the selector cannot return
tuples of fields.
:param partitioner: The partitioner to assign partitions to keys.
:param key_selector: The KeySelector with which the DataStream is partitioned.
:return: The partitioned DataStream.
"""
if not isinstance(partitioner, Partitioner) and not callable(partitioner):
raise TypeError("Parameter partitioner should be type of Partitioner or a callable "
"function.")
if not isinstance(key_selector, KeySelector) and not callable(key_selector):
raise TypeError("Parameter key_selector should be type of KeySelector or a callable "
"function.")
gateway = get_gateway()
class CustomPartitioner(ProcessFunction):
"""
A wrapper class for partition_custom map function. It indicates that it is a partition
custom operation that we need to apply PythonPartitionCustomOperator
to run the map function.
"""
def __init__(self, partitioner, key_selector):
if isinstance(partitioner, Partitioner):
self._partitioner_open_func = partitioner.open
self._partitioner_close_func = partitioner.close
self._partition_func = partitioner.partition
else:
self._partitioner_open_func = None
self._partitioner_close_func = None
self._partition_func = partitioner
if isinstance(key_selector, KeySelector):
self._key_selector_open_func = key_selector.open
self._key_selector_close_func = key_selector.close
self._get_key_func = key_selector.get_key
else:
self._key_selector_open_func = None
self._key_selector_close_func = None
self._get_key_func = key_selector
def open(self, runtime_context: RuntimeContext):
if self._partitioner_open_func:
self._partitioner_open_func(runtime_context)
if self._key_selector_open_func:
self._key_selector_open_func(runtime_context)
self.num_partitions = int(runtime_context.get_job_parameter(
"NUM_PARTITIONS", "-1"))
if self.num_partitions <= 0:
raise ValueError(
"The partition number should be a positive value, got %s"
% self.num_partitions)
def close(self):
if self._partitioner_close_func:
self._partitioner_close_func()
if self._key_selector_close_func:
self._key_selector_close_func()
def process_element(self, value, ctx: 'ProcessFunction.Context'):
partition = self._partition_func(self._get_key_func(value), self.num_partitions)
yield Row(partition, value)
original_type_info = self.get_type()
stream_with_partition_info = self.process(
CustomPartitioner(partitioner, key_selector),
output_type=Types.ROW([Types.INT(), original_type_info]))
stream_with_partition_info._j_data_stream.getTransformation().getOperatorFactory() \
.getOperator().setContainsPartitionCustom(True)
stream_with_partition_info.name(
gateway.jvm.org.apache.flink.python.util.PythonConfigUtil
.STREAM_PARTITION_CUSTOM_MAP_OPERATOR_NAME)
JPartitionCustomKeySelector = gateway.jvm.PartitionCustomKeySelector
JIdParitioner = gateway.jvm.org.apache.flink.api.java.functions.IdPartitioner
partitioned_stream_with_partition_info = DataStream(
stream_with_partition_info._j_data_stream.partitionCustom(
JIdParitioner(), JPartitionCustomKeySelector()))
partitioned_stream = partitioned_stream_with_partition_info.map(
lambda x: x[1], original_type_info)
partitioned_stream.name(gateway.jvm.org.apache.flink.python.util.PythonConfigUtil
.KEYED_STREAM_VALUE_OPERATOR_NAME)
return DataStream(partitioned_stream._j_data_stream)
def add_sink(self, sink_func: SinkFunction) -> 'DataStreamSink':
"""
Adds the given sink to this DataStream. Only streams with sinks added will be executed once
the StreamExecutionEnvironment.execute() method is called.
:param sink_func: The SinkFunction object.
:return: The closed DataStream.
"""
return DataStreamSink(self._j_data_stream.addSink(sink_func.get_java_function()))
def sink_to(self, sink: Sink) -> 'DataStreamSink':
"""
Adds the given sink to this DataStream. Only streams with sinks added will be
executed once the
:func:`~pyflink.datastream.stream_execution_environment.StreamExecutionEnvironment.execute`
method is called.
:param sink: The user defined sink.
:return: The closed DataStream.
"""
return DataStreamSink(self._j_data_stream.sinkTo(sink.get_java_function()))
def execute_and_collect(self, job_execution_name: str = None, limit: int = None) \
-> Union['CloseableIterator', list]:
"""
Triggers the distributed execution of the streaming dataflow and returns an iterator over
the elements of the given DataStream.
The DataStream application is executed in the regular distributed manner on the target
environment, and the events from the stream are polled back to this application process and
thread through Flink's REST API.
The returned iterator must be closed to free all cluster resources.
:param job_execution_name: The name of the job execution.
:param limit: The limit for the collected elements.
"""
JPythonConfigUtil = get_gateway().jvm.org.apache.flink.python.util.PythonConfigUtil
JPythonConfigUtil.configPythonOperator(self._j_data_stream.getExecutionEnvironment())
self._apply_chaining_optimization()
if job_execution_name is None and limit is None:
return CloseableIterator(self._j_data_stream.executeAndCollect(), self.get_type())
elif job_execution_name is not None and limit is None:
return CloseableIterator(self._j_data_stream.executeAndCollect(job_execution_name),
self.get_type())
if job_execution_name is None and limit is not None:
return list(map(lambda data: convert_to_python_obj(data, self.get_type()),
self._j_data_stream.executeAndCollect(limit)))
else:
return list(map(lambda data: convert_to_python_obj(data, self.get_type()),
self._j_data_stream.executeAndCollect(job_execution_name, limit)))
def print(self, sink_identifier: str = None) -> 'DataStreamSink':
"""
Writes a DataStream to the standard output stream (stdout).
For each element of the DataStream the object string is written.
NOTE: This will print to stdout on the machine where the code is executed, i.e. the Flink
worker, and is not fault tolerant.
:param sink_identifier: The string to prefix the output with.
:return: The closed DataStream.
"""
if sink_identifier is not None:
j_data_stream_sink = self._align_output_type()._j_data_stream.print(sink_identifier)
else:
j_data_stream_sink = self._align_output_type()._j_data_stream.print()
return DataStreamSink(j_data_stream_sink)
def _apply_chaining_optimization(self):
"""
Chain the Python operators if possible.
"""
gateway = get_gateway()
JPythonOperatorChainingOptimizer = gateway.jvm.org.apache.flink.python.chain. \
PythonOperatorChainingOptimizer
j_transformation = JPythonOperatorChainingOptimizer.apply(
self._j_data_stream.getExecutionEnvironment(),
self._j_data_stream.getTransformation())
self._j_data_stream = gateway.jvm.org.apache.flink.streaming.api.datastream.DataStream(
self._j_data_stream.getExecutionEnvironment(), j_transformation)
def _align_output_type(self) -> 'DataStream':
"""
Transform the pickled python object into String if the output type is PickledByteArrayInfo.
"""
from py4j.java_gateway import get_java_class
gateway = get_gateway()
ExternalTypeInfo_CLASS = get_java_class(
gateway.jvm.org.apache.flink.table.runtime.typeutils.ExternalTypeInfo)
RowTypeInfo_CLASS = get_java_class(
gateway.jvm.org.apache.flink.api.java.typeutils.RowTypeInfo)
output_type_info_class = self._j_data_stream.getTransformation().getOutputType().getClass()
if output_type_info_class.isAssignableFrom(
Types.PICKLED_BYTE_ARRAY().get_java_type_info()
.getClass()):
def python_obj_to_str_map_func(value):
if not isinstance(value, (str, bytes)):
value = str(value)
return value
transformed_data_stream = DataStream(
self.map(python_obj_to_str_map_func,
output_type=Types.STRING())._j_data_stream)
return transformed_data_stream
elif (output_type_info_class.isAssignableFrom(ExternalTypeInfo_CLASS) or
output_type_info_class.isAssignableFrom(RowTypeInfo_CLASS)):
def python_obj_to_str_map_func(value):
assert isinstance(value, Row)
return '{}[{}]'.format(value.get_row_kind(),
','.join([str(item) for item in value._values]))
transformed_data_stream = DataStream(
self.map(python_obj_to_str_map_func,
output_type=Types.STRING())._j_data_stream)
return transformed_data_stream
else:
return self
class DataStreamSink(object):
"""
A Stream Sink. This is used for emitting elements from a streaming topology.
"""
def __init__(self, j_data_stream_sink):
"""
The constructor of DataStreamSink.
:param j_data_stream_sink: A DataStreamSink java object.
"""
self._j_data_stream_sink = j_data_stream_sink
def name(self, name: str) -> 'DataStreamSink':
"""
Sets the name of this sink. THis name is used by the visualization and logging during
runtime.
:param name: The name of this sink.
:return: The named sink.
"""
self._j_data_stream_sink.name(name)
return self
def uid(self, uid: str) -> 'DataStreamSink':
"""
Sets an ID for this operator. The specified ID is used to assign the same operator ID across
job submissions (for example when starting a job from a savepoint).
Important: this ID needs to be unique per transformation and job. Otherwise, job submission
will fail.
:param uid: The unique user-specified ID of this transformation.
:return: The operator with the specified ID.
"""
self._j_data_stream_sink.uid(uid)
return self
def set_uid_hash(self, uid_hash: str) -> 'DataStreamSink':
"""
Sets an user provided hash for this operator. This will be used AS IS the create the
JobVertexID. The user provided hash is an alternative to the generated hashed, that is
considered when identifying an operator through the default hash mechanics fails (e.g.
because of changes between Flink versions).
Important: this should be used as a workaround or for trouble shooting. The provided hash
needs to be unique per transformation and job. Otherwise, job submission will fail.
Furthermore, you cannot assign user-specified hash to intermediate nodes in an operator
chain and trying so will let your job fail.
A use case for this is in migration between Flink versions or changing the jobs in a way
that changes the automatically generated hashes. In this case, providing the previous hashes
directly through this method (e.g. obtained from old logs) can help to reestablish a lost
mapping from states to their target operator.
:param uid_hash: The user provided hash for this operator. This will become the jobVertexID,
which is shown in the logs and web ui.
:return: The operator with the user provided hash.
"""
self._j_data_stream_sink.setUidHash(uid_hash)
return self
def set_parallelism(self, parallelism: int) -> 'DataStreamSink':
"""
Sets the parallelism for this operator.
:param parallelism: THe parallelism for this operator.
:return: The operator with set parallelism.
"""
self._j_data_stream_sink.setParallelism(parallelism)
return self
def set_description(self, description: str) -> 'DataStreamSink':
"""
Sets the description for this sink.
Description is used in json plan and web ui, but not in logging and metrics where only
name is available. Description is expected to provide detailed information about the sink,
while name is expected to be more simple, providing summary information only, so that we can
have more user-friendly logging messages and metric tags without losing useful messages for
debugging.
:param description: The description for this sink.
:return: The sink with new description.
.. versionadded:: 1.15.0
"""
self._j_data_stream_sink.setDescription(description)
return self
def disable_chaining(self) -> 'DataStreamSink':
"""
Turns off chaining for this operator so thread co-location will not be used as an
optimization.
Chaining can be turned off for the whole job by
StreamExecutionEnvironment.disableOperatorChaining() however it is not advised for
performance consideration.
:return: The operator with chaining disabled.
"""
self._j_data_stream_sink.disableChaining()
return self
def slot_sharing_group(self, slot_sharing_group: Union[str, SlotSharingGroup]) \
-> 'DataStreamSink':
"""
Sets the slot sharing group of this operation. Parallel instances of operations that are in
the same slot sharing group will be co-located in the same TaskManager slot, if possible.
Operations inherit the slot sharing group of input operations if all input operations are in
the same slot sharing group and no slot sharing group was explicitly specified.
Initially an operation is in the default slot sharing group. An operation can be put into
the default group explicitly by setting the slot sharing group to 'default'.
:param slot_sharing_group: The slot sharing group name or which contains name and its
resource spec.
:return: This operator.
"""
if isinstance(slot_sharing_group, SlotSharingGroup):
self._j_data_stream_sink.slotSharingGroup(
slot_sharing_group.get_java_slot_sharing_group())
else:
self._j_data_stream_sink.slotSharingGroup(slot_sharing_group)
return self
class KeyedStream(DataStream):
"""
A KeyedStream represents a DataStream on which operator state is partitioned by key using a
provided KeySelector. Typical operations supported by a DataStream are also possible on a
KeyedStream, with the exception of partitioning methods such as shuffle, forward and keyBy.
Reduce-style operations, such as reduce and sum work on elements that have the same key.
"""
def __init__(self, j_keyed_stream, original_data_type_info, origin_stream: DataStream):
"""
Constructor of KeyedStream.
:param j_keyed_stream: A java KeyedStream object.
:param original_data_type_info: Original data typeinfo.
:param origin_stream: The DataStream before key by.
"""
super(KeyedStream, self).__init__(j_data_stream=j_keyed_stream)
self._original_data_type_info = original_data_type_info
self._origin_stream = origin_stream
def map(self, func: Union[Callable, MapFunction], output_type: TypeInformation = None) \
-> 'DataStream':
"""
Applies a Map transformation on a KeyedStream. The transformation calls a MapFunction for
each element of the DataStream. Each MapFunction call returns exactly one element.
Note that If user does not specify the output data type, the output data will be serialized
as pickle primitive byte array.
:param func: The MapFunction that is called for each element of the DataStream.
:param output_type: The type information of the MapFunction output data.
:return: The transformed DataStream.
"""
if not isinstance(func, MapFunction) and not callable(func):
raise TypeError("The input must be a MapFunction or a callable function")
class MapKeyedProcessFunctionAdapter(KeyedProcessFunction):
def __init__(self, map_func):
if isinstance(map_func, MapFunction):
self._open_func = map_func.open
self._close_func = map_func.close
self._map_func = map_func.map
else:
self._open_func = None
self._close_func = None
self._map_func = map_func
def open(self, runtime_context: RuntimeContext):
if self._open_func:
self._open_func(runtime_context)
def close(self):
if self._close_func:
self._close_func()
def process_element(self, value, ctx: 'KeyedProcessFunction.Context'):
yield self._map_func(value)
return self.process(MapKeyedProcessFunctionAdapter(func), output_type) \
.name("Map") # type: ignore
def flat_map(self,
func: Union[Callable, FlatMapFunction],
output_type: TypeInformation = None) -> 'DataStream':
"""
Applies a FlatMap transformation on a KeyedStream. The transformation calls a
FlatMapFunction for each element of the DataStream. Each FlatMapFunction call can return
any number of elements including none.
:param func: The FlatMapFunction that is called for each element of the DataStream.
:param output_type: The type information of output data.
:return: The transformed DataStream.
"""
if not isinstance(func, FlatMapFunction) and not callable(func):
raise TypeError("The input must be a FlatMapFunction or a callable function")
class FlatMapKeyedProcessFunctionAdapter(KeyedProcessFunction):
def __init__(self, flat_map_func):
if isinstance(flat_map_func, FlatMapFunction):
self._open_func = flat_map_func.open
self._close_func = flat_map_func.close
self._flat_map_func = flat_map_func.flat_map
else:
self._open_func = None
self._close_func = None
self._flat_map_func = flat_map_func
def open(self, runtime_context: RuntimeContext):
if self._open_func:
self._open_func(runtime_context)
def close(self):
if self._close_func:
self._close_func()
def process_element(self, value, ctx: 'KeyedProcessFunction.Context'):
yield from self._flat_map_func(value)
return self.process(FlatMapKeyedProcessFunctionAdapter(func), output_type) \
.name("FlatMap")
def reduce(self, func: Union[Callable, ReduceFunction]) -> 'DataStream':
"""
Applies a reduce transformation on the grouped data stream grouped on by the given
key position. The `ReduceFunction` will receive input values based on the key value.
Only input values with the same key will go to the same reducer.
Example:
::
>>> ds = env.from_collection([(1, 'a'), (2, 'a'), (3, 'a'), (4, 'b'])
>>> ds.key_by(lambda x: x[1]).reduce(lambda a, b: a[0] + b[0], b[1])
:param func: The ReduceFunction that is called for each element of the DataStream.
:return: The transformed DataStream.
"""
if not isinstance(func, ReduceFunction) and not callable(func):
raise TypeError("The input must be a ReduceFunction or a callable function")
output_type = _from_java_type(self._original_data_type_info.get_java_type_info())
class ReduceProcessKeyedProcessFunctionAdapter(KeyedProcessFunction):
def __init__(self, reduce_function):
if isinstance(reduce_function, ReduceFunction):
self._open_func = reduce_function.open
self._close_func = reduce_function.close
self._reduce_function = reduce_function.reduce
else:
self._open_func = None
self._close_func = None
self._reduce_function = reduce_function
self._reduce_value_state = None # type: ValueState
def open(self, runtime_context: RuntimeContext):
if self._open_func:
self._open_func(runtime_context)
self._reduce_value_state = runtime_context.get_state(
ValueStateDescriptor("_reduce_state" + str(uuid.uuid4()), output_type))
from pyflink.fn_execution.datastream.runtime_context import StreamingRuntimeContext
self._in_batch_execution_mode = \
cast(StreamingRuntimeContext, runtime_context)._in_batch_execution_mode
def close(self):
if self._close_func:
self._close_func()
def process_element(self, value, ctx: 'KeyedProcessFunction.Context'):
reduce_value = self._reduce_value_state.value()
if reduce_value is not None:
reduce_value = self._reduce_function(reduce_value, value)
else:
# register a timer for emitting the result at the end when this is the
# first input for this key
if self._in_batch_execution_mode:
ctx.timer_service().register_event_time_timer(0x7fffffffffffffff)
reduce_value = value
self._reduce_value_state.update(reduce_value)
if not self._in_batch_execution_mode:
# only emitting the result when all the data for a key is received
yield reduce_value
def on_timer(self, timestamp: int, ctx: 'KeyedProcessFunction.OnTimerContext'):
current_value = self._reduce_value_state.value()
if current_value is not None:
yield current_value
return self.process(ReduceProcessKeyedProcessFunctionAdapter(func), output_type) \
.name("Reduce")
def filter(self, func: Union[Callable, FilterFunction]) -> 'DataStream':
if not isinstance(func, FilterFunction) and not callable(func):
raise TypeError("The input must be a FilterFunction or a callable function")
class FilterKeyedProcessFunctionAdapter(KeyedProcessFunction):
def __init__(self, filter_func):
if isinstance(filter_func, FilterFunction):
self._open_func = filter_func.open
self._close_func = filter_func.close
self._filter_func = filter_func.filter
else:
self._open_func = None
self._close_func = None
self._filter_func = filter_func
def open(self, runtime_context: RuntimeContext):
if self._open_func:
self._open_func(runtime_context)
def close(self):
if self._close_func:
self._close_func()
def process_element(self, value, ctx: 'KeyedProcessFunction.Context'):
if self._filter_func(value):
yield value
return self.process(FilterKeyedProcessFunctionAdapter(func), self._original_data_type_info)\
.name("Filter")
def add_sink(self, sink_func: SinkFunction) -> 'DataStreamSink':
return self._values().add_sink(sink_func)
def key_by(self, key_selector: Union[Callable, KeySelector],
key_type: TypeInformation = None) -> 'KeyedStream':
return self._origin_stream.key_by(key_selector, key_type)
def process(self, func: KeyedProcessFunction, # type: ignore
output_type: TypeInformation = None) -> 'DataStream':
"""
Applies the given ProcessFunction on the input stream, thereby creating a transformed output
stream.
The function will be called for every element in the input streams and can produce zero or
more output elements.
:param func: The KeyedProcessFunction that is called for each element in the stream.
:param output_type: TypeInformation for the result type of the function.
:return: The transformed DataStream.
"""
if not isinstance(func, KeyedProcessFunction):
raise TypeError("KeyedProcessFunction is required for KeyedStream.")
from pyflink.fn_execution import flink_fn_execution_pb2
j_python_data_stream_function_operator, j_output_type_info = \
_get_one_input_stream_operator(
self,
func,
flink_fn_execution_pb2.UserDefinedDataStreamFunction.KEYED_PROCESS, # type: ignore
output_type)
return DataStream(self._j_data_stream.transform(
"KEYED PROCESS",
j_output_type_info,
j_python_data_stream_function_operator))
def window(self, window_assigner: WindowAssigner) -> 'WindowedStream':
"""
Windows this data stream to a WindowedStream, which evaluates windows over a key
grouped stream. Elements are put into windows by a WindowAssigner. The grouping of
elements is done both by key and by window.
A Trigger can be defined to specify when windows are evaluated. However, WindowAssigners
have a default Trigger that is used if a Trigger is not specified.
:param window_assigner: The WindowAssigner that assigns elements to windows.
:return: The trigger windows data stream.
"""
return WindowedStream(self, window_assigner)
def union(self, *streams) -> 'DataStream':
return self._values().union(*streams)
def shuffle(self) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def project(self, *field_indexes) -> 'DataStream':
return self._values().project(*field_indexes)
def rescale(self) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def rebalance(self) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def forward(self) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def broadcast(self) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def partition_custom(self, partitioner: Union[Callable, Partitioner],
key_selector: Union[Callable, KeySelector]) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def print(self, sink_identifier=None):
return self._values().print()
def _values(self) -> 'DataStream':
"""
Since python KeyedStream is in the format of Row(key_value, original_data), it is used for
getting the original_data.
"""
transformed_stream = self.map(lambda x: x, output_type=self._original_data_type_info)
transformed_stream.name(get_gateway().jvm.org.apache.flink.python.util.PythonConfigUtil
.KEYED_STREAM_VALUE_OPERATOR_NAME)
return DataStream(transformed_stream._j_data_stream)
def set_parallelism(self, parallelism: int):
raise Exception("Set parallelism for KeyedStream is not supported.")
def name(self, name: str):
raise Exception("Set name for KeyedStream is not supported.")
def get_name(self) -> str:
raise Exception("Get name of KeyedStream is not supported.")
def uid(self, uid: str):
raise Exception("Set uid for KeyedStream is not supported.")
def set_uid_hash(self, uid_hash: str):
raise Exception("Set uid hash for KeyedStream is not supported.")
def set_max_parallelism(self, max_parallelism: int):
raise Exception("Set max parallelism for KeyedStream is not supported.")
def force_non_parallel(self):
raise Exception("Set force non-parallel for KeyedStream is not supported.")
def set_buffer_timeout(self, timeout_millis: int):
raise Exception("Set buffer timeout for KeyedStream is not supported.")
def start_new_chain(self) -> 'DataStream':
raise Exception("Start new chain for KeyedStream is not supported.")
def disable_chaining(self) -> 'DataStream':
raise Exception("Disable chaining for KeyedStream is not supported.")
def slot_sharing_group(self, slot_sharing_group: Union[str, SlotSharingGroup]) -> 'DataStream':
raise Exception("Setting slot sharing group for KeyedStream is not supported.")
class WindowedStream(object):
"""
A WindowedStream represents a data stream where elements are grouped by key, and for each
key, the stream of elements is split into windows based on a WindowAssigner. Window emission
is triggered based on a Trigger.
The windows are conceptually evaluated for each key individually, meaning windows can trigger
at different points for each key.
Note that the WindowedStream is purely an API construct, during runtime the WindowedStream will
be collapsed together with the KeyedStream and the operation over the window into one single
operation.
"""
def __init__(self, keyed_stream: KeyedStream, window_assigner: WindowAssigner):
self._keyed_stream = keyed_stream
self._window_assigner = window_assigner
self._allowed_lateness = 0
self._window_trigger = None # type: Trigger
def get_execution_environment(self):
return self._keyed_stream.get_execution_environment()
def get_input_type(self):
return _from_java_type(self._keyed_stream._original_data_type_info.get_java_type_info())
def trigger(self, trigger: Trigger):
"""
Sets the Trigger that should be used to trigger window emission.
"""
self._window_trigger = trigger
return self
def allowed_lateness(self, time_ms: int):
"""
Sets the time by which elements are allowed to be late. Elements that arrive behind the
watermark by more than the specified time will be dropped. By default, the allowed lateness
is 0.
Setting an allowed lateness is only valid for event-time windows.
"""
self._allowed_lateness = time_ms
return self
def apply(self,
window_function: WindowFunction, result_type: TypeInformation = None) -> DataStream:
"""
Applies the given window function to each window. The window function is called for each
evaluation of the window for each key individually. The output of the window function is
interpreted as a regular non-windowed stream.
Note that this function requires that all data in the windows is buffered until the window
is evaluated, as the function provides no means of incremental aggregation.
:param window_function: The window function.
:param result_type: Type information for the result type of the window function.
:return: The data stream that is the result of applying the window function to the window.
"""
internal_window_function = InternalIterableWindowFunction(
window_function) # type: InternalWindowFunction
return self._get_result_data_stream(internal_window_function, result_type)
def process(self,
process_window_function: ProcessWindowFunction,
result_type: TypeInformation = None):
"""
Applies the given window function to each window. The window function is called for each
evaluation of the window for each key individually. The output of the window function is
interpreted as a regular non-windowed stream.
Note that this function requires that all data in the windows is buffered until the window
is evaluated, as the function provides no means of incremental aggregation.
:param process_window_function: The window function.
:param result_type: Type information for the result type of the window function.
:return: The data stream that is the result of applying the window function to the window.
"""
internal_window_function = InternalIterableProcessWindowFunction(
process_window_function) # type: InternalWindowFunction
return self._get_result_data_stream(internal_window_function, result_type)
def _get_result_data_stream(
self, internal_window_function: InternalWindowFunction, result_type):
if self._window_trigger is None:
self._window_trigger = self._window_assigner.get_default_trigger(
self.get_execution_environment())
window_serializer = self._window_assigner.get_window_serializer()
window_state_descriptor = ListStateDescriptor(
"window-contents", self.get_input_type())
window_operation_descriptor = WindowOperationDescriptor(
self._window_assigner,
self._window_trigger,
self._allowed_lateness,
window_state_descriptor,
window_serializer,
internal_window_function)
from pyflink.fn_execution import flink_fn_execution_pb2
j_python_data_stream_function_operator, j_output_type_info = \
_get_one_input_stream_operator(
self._keyed_stream,
window_operation_descriptor,
flink_fn_execution_pb2.UserDefinedDataStreamFunction.WINDOW, # type: ignore
result_type)
return DataStream(self._keyed_stream._j_data_stream.transform(
"WINDOW",
j_output_type_info,
j_python_data_stream_function_operator))
class ConnectedStreams(object):
"""
ConnectedStreams represent two connected streams of (possibly) different data types.
Connected streams are useful for cases where operations on one stream directly
affect the operations on the other stream, usually via shared state between the streams.
An example for the use of connected streams would be to apply rules that change over time
onto another stream. One of the connected streams has the rules, the other stream the
elements to apply the rules to. The operation on the connected stream maintains the
current set of rules in the state. It may receive either a rule update and update the state
or a data element and apply the rules in the state to the element.
The connected stream can be conceptually viewed as a union stream of an Either type, that
holds either the first stream's type or the second stream's type.
"""
def __init__(self, stream1: DataStream, stream2: DataStream):
self.stream1 = stream1
self.stream2 = stream2
def key_by(self, key_selector1: Union[Callable, KeySelector],
key_selector2: Union[Callable, KeySelector],
key_type: TypeInformation = None) -> 'ConnectedStreams':
"""
KeyBy operation for connected data stream. Assigns keys to the elements of
input1 and input2 using keySelector1 and keySelector2 with explicit type information
for the common key type.
:param key_selector1: The `KeySelector` used for grouping the first input.
:param key_selector2: The `KeySelector` used for grouping the second input.
:param key_type: The type information of the common key type
:return: The partitioned `ConnectedStreams`
"""
ds1 = self.stream1
ds2 = self.stream2
if isinstance(self.stream1, KeyedStream):
ds1 = self.stream1._origin_stream
if isinstance(self.stream2, KeyedStream):
ds2 = self.stream2._origin_stream
return ConnectedStreams(
ds1.key_by(key_selector1, key_type),
ds2.key_by(key_selector2, key_type))
def map(self, func: CoMapFunction, output_type: TypeInformation = None) -> 'DataStream':
"""
Applies a CoMap transformation on a `ConnectedStreams` and maps the output to a common
type. The transformation calls a `CoMapFunction.map1` for each element of the first
input and `CoMapFunction.map2` for each element of the second input. Each CoMapFunction
call returns exactly one element.
:param func: The CoMapFunction used to jointly transform the two input DataStreams
:param output_type: `TypeInformation` for the result type of the function.
:return: The transformed `DataStream`
"""
if not isinstance(func, CoMapFunction):
raise TypeError("The input function must be a CoMapFunction!")
if self._is_keyed_stream():
class CoMapKeyedCoProcessFunctionAdapter(KeyedCoProcessFunction):
def __init__(self, co_map_func: CoMapFunction):
self._open_func = co_map_func.open
self._close_func = co_map_func.close
self._map1_func = co_map_func.map1
self._map2_func = co_map_func.map2
def open(self, runtime_context: RuntimeContext):
self._open_func(runtime_context)
def close(self):
self._close_func()
def process_element1(self, value, ctx: 'KeyedCoProcessFunction.Context'):
result = self._map1_func(value)
if result is not None:
yield result
def process_element2(self, value, ctx: 'KeyedCoProcessFunction.Context'):
result = self._map2_func(value)
if result is not None:
yield result
return self.process(CoMapKeyedCoProcessFunctionAdapter(func), output_type) \
.name("Co-Map")
else:
class CoMapCoProcessFunctionAdapter(CoProcessFunction):
def __init__(self, co_map_func: CoMapFunction):
self._open_func = co_map_func.open
self._close_func = co_map_func.close
self._map1_func = co_map_func.map1
self._map2_func = co_map_func.map2
def open(self, runtime_context: RuntimeContext):
self._open_func(runtime_context)
def close(self):
self._close_func()
def process_element1(self, value, ctx: 'CoProcessFunction.Context'):
result = self._map1_func(value)
if result is not None:
yield result
def process_element2(self, value, ctx: 'CoProcessFunction.Context'):
result = self._map2_func(value)
if result is not None:
yield result
return self.process(CoMapCoProcessFunctionAdapter(func), output_type) \
.name("Co-Map")
def flat_map(self, func: CoFlatMapFunction, output_type: TypeInformation = None) \
-> 'DataStream':
"""
Applies a CoFlatMap transformation on a `ConnectedStreams` and maps the output to a
common type. The transformation calls a `CoFlatMapFunction.flatMap1` for each element
of the first input and `CoFlatMapFunction.flatMap2` for each element of the second
input. Each CoFlatMapFunction call returns any number of elements including none.
:param func: The CoFlatMapFunction used to jointly transform the two input DataStreams
:param output_type: `TypeInformation` for the result type of the function.
:return: The transformed `DataStream`
"""
if not isinstance(func, CoFlatMapFunction):
raise TypeError("The input must be a CoFlatMapFunction!")
if self._is_keyed_stream():
class FlatMapKeyedCoProcessFunctionAdapter(KeyedCoProcessFunction):
def __init__(self, co_flat_map_func: CoFlatMapFunction):
self._open_func = co_flat_map_func.open
self._close_func = co_flat_map_func.close
self._flat_map1_func = co_flat_map_func.flat_map1
self._flat_map2_func = co_flat_map_func.flat_map2
def open(self, runtime_context: RuntimeContext):
self._open_func(runtime_context)
def close(self):
self._close_func()
def process_element1(self, value, ctx: 'KeyedCoProcessFunction.Context'):
result = self._flat_map1_func(value)
if result:
yield from result
def process_element2(self, value, ctx: 'KeyedCoProcessFunction.Context'):
result = self._flat_map2_func(value)
if result:
yield from result
return self.process(FlatMapKeyedCoProcessFunctionAdapter(func), output_type) \
.name("Co-Flat Map")
else:
class FlatMapCoProcessFunctionAdapter(CoProcessFunction):
def __init__(self, co_flat_map_func: CoFlatMapFunction):
self._open_func = co_flat_map_func.open
self._close_func = co_flat_map_func.close
self._flat_map1_func = co_flat_map_func.flat_map1
self._flat_map2_func = co_flat_map_func.flat_map2
def open(self, runtime_context: RuntimeContext):
self._open_func(runtime_context)
def close(self):
self._close_func()
def process_element1(self, value, ctx: 'CoProcessFunction.Context'):
result = self._flat_map1_func(value)
if result:
yield from result
def process_element2(self, value, ctx: 'CoProcessFunction.Context'):
result = self._flat_map2_func(value)
if result:
yield from result
return self.process(FlatMapCoProcessFunctionAdapter(func), output_type) \
.name("Co-Flat Map")
def process(self,
func: Union[CoProcessFunction, KeyedCoProcessFunction],
output_type: TypeInformation = None) -> 'DataStream':
if not isinstance(func, CoProcessFunction) and not isinstance(func, KeyedCoProcessFunction):
raise TypeError("The input must be a CoProcessFunction or KeyedCoProcessFunction!")
from pyflink.fn_execution.flink_fn_execution_pb2 import UserDefinedDataStreamFunction
if self._is_keyed_stream():
func_type = UserDefinedDataStreamFunction.KEYED_CO_PROCESS # type: ignore
func_name = "Keyed Co-Process"
else:
func_type = UserDefinedDataStreamFunction.CO_PROCESS # type: ignore
func_name = "Co-Process"
j_connected_stream = self.stream1._j_data_stream.connect(self.stream2._j_data_stream)
j_operator, j_output_type = _get_two_input_stream_operator(
self,
func,
func_type,
output_type)
return DataStream(j_connected_stream.transform(func_name, j_output_type, j_operator))
def _is_keyed_stream(self):
return isinstance(self.stream1, KeyedStream) and isinstance(self.stream2, KeyedStream)
def _get_one_input_stream_operator(data_stream: DataStream,
func: Union[Function,
FunctionWrapper,
WindowOperationDescriptor],
func_type: int,
output_type: Union[TypeInformation, List] = None):
"""
Create a Java one input stream operator.
:param func: a function object that implements the Function interface.
:param func_type: function type, supports MAP, FLAT_MAP, etc.
:param output_type: the data type of the function output data.
:return: A Java operator which is responsible for execution user defined python function.
"""
gateway = get_gateway()
import cloudpickle
serialized_func = cloudpickle.dumps(func)
j_input_types = data_stream._j_data_stream.getTransformation().getOutputType()
if output_type is None:
output_type_info = Types.PICKLED_BYTE_ARRAY() # type: TypeInformation
elif isinstance(output_type, list):
output_type_info = RowTypeInfo(output_type)
else:
output_type_info = output_type
j_output_type_info = output_type_info.get_java_type_info()
j_data_stream_python_function = gateway.jvm.DataStreamPythonFunction(
bytearray(serialized_func),
_get_python_env())
j_data_stream_python_function_info = gateway.jvm.DataStreamPythonFunctionInfo(
j_data_stream_python_function,
func_type)
j_conf = gateway.jvm.org.apache.flink.configuration.Configuration()
from pyflink.fn_execution.flink_fn_execution_pb2 import UserDefinedDataStreamFunction
if func_type == UserDefinedDataStreamFunction.PROCESS: # type: ignore
JDataStreamPythonFunctionOperator = gateway.jvm.PythonProcessOperator
elif func_type == UserDefinedDataStreamFunction.KEYED_PROCESS: # type: ignore
JDataStreamPythonFunctionOperator = gateway.jvm.PythonKeyedProcessOperator
elif func_type == UserDefinedDataStreamFunction.WINDOW: # type: ignore
window_serializer = typing.cast(WindowOperationDescriptor, func).window_serializer
if isinstance(window_serializer, TimeWindowSerializer):
j_namespace_serializer = \
gateway.jvm.org.apache.flink.table.runtime.operators.window.TimeWindow.Serializer()
elif isinstance(window_serializer, CountWindowSerializer):
j_namespace_serializer = \
gateway.jvm.org.apache.flink.table.runtime.operators.window.CountWindow.Serializer()
else:
j_namespace_serializer = \
gateway.jvm.org.apache.flink.streaming.api.utils.ByteArrayWrapperSerializer()
j_python_function_operator = gateway.jvm.PythonKeyedProcessOperator(
j_conf,
j_data_stream_python_function_info,
j_input_types,
j_output_type_info,
j_namespace_serializer)
return j_python_function_operator, j_output_type_info
else:
raise TypeError("Unsupported function type: %s" % func_type)
j_python_function_operator = JDataStreamPythonFunctionOperator(
j_conf,
j_data_stream_python_function_info,
j_input_types,
j_output_type_info)
return j_python_function_operator, j_output_type_info
def _get_two_input_stream_operator(connected_streams: ConnectedStreams,
func: Union[Function, FunctionWrapper],
func_type: int,
type_info: TypeInformation):
"""
Create a Java two input stream operator.
:param func: a function object that implements the Function interface.
:param func_type: function type, supports MAP, FLAT_MAP, etc.
:param type_info: the data type of the function output data.
:return: A Java operator which is responsible for execution user defined python function.
"""
gateway = get_gateway()
import cloudpickle
serialized_func = cloudpickle.dumps(func)
j_input_types1 = connected_streams.stream1._j_data_stream.getTransformation().getOutputType()
j_input_types2 = connected_streams.stream2._j_data_stream.getTransformation().getOutputType()
if type_info is None:
output_type_info = Types.PICKLED_BYTE_ARRAY() # type: TypeInformation
elif isinstance(type_info, list):
output_type_info = RowTypeInfo(type_info)
else:
output_type_info = type_info
j_output_type_info = output_type_info.get_java_type_info()
j_data_stream_python_function = gateway.jvm.DataStreamPythonFunction(
bytearray(serialized_func),
_get_python_env())
j_data_stream_python_function_info = gateway.jvm.DataStreamPythonFunctionInfo(
j_data_stream_python_function,
func_type)
from pyflink.fn_execution.flink_fn_execution_pb2 import UserDefinedDataStreamFunction
if func_type == UserDefinedDataStreamFunction.CO_PROCESS: # type: ignore
JTwoInputPythonFunctionOperator = gateway.jvm.PythonCoProcessOperator
elif func_type == UserDefinedDataStreamFunction.KEYED_CO_PROCESS: # type: ignore
JTwoInputPythonFunctionOperator = gateway.jvm.PythonKeyedCoProcessOperator
else:
raise TypeError("Unsupported function type: %s" % func_type)
j_conf = gateway.jvm.org.apache.flink.configuration.Configuration()
j_python_data_stream_function_operator = JTwoInputPythonFunctionOperator(
j_conf,
j_data_stream_python_function_info,
j_input_types1,
j_input_types2,
j_output_type_info)
return j_python_data_stream_function_operator, j_output_type_info
class CloseableIterator(object):
"""
Representing an Iterator that is also auto closeable.
"""
def __init__(self, j_closeable_iterator, type_info: TypeInformation = None):
self._j_closeable_iterator = j_closeable_iterator
self._type_info = type_info
def __iter__(self):
return self
def __next__(self):
return self.next()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def next(self):
if not self._j_closeable_iterator.hasNext():
raise StopIteration('No more data.')
return convert_to_python_obj(self._j_closeable_iterator.next(), self._type_info)
def close(self):
self._j_closeable_iterator.close()
| 45.428086 | 100 | 0.66001 |
538ba6bb2b40070eb6e528827d7752d34c656b7c | 1,316 | py | Python | docs/conf.py | cirospat/paniere-dataset-enti-locali | 58213e8414c291befad5932b283ff56fa18893e7 | [
"CC-BY-4.0"
] | null | null | null | docs/conf.py | cirospat/paniere-dataset-enti-locali | 58213e8414c291befad5932b283ff56fa18893e7 | [
"CC-BY-4.0"
] | null | null | null | docs/conf.py | cirospat/paniere-dataset-enti-locali | 58213e8414c291befad5932b283ff56fa18893e7 | [
"CC-BY-4.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys, os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
sys.path.append(os.path.abspath(os.pardir))
__version__ = '1.0'
# -- General configuration -----------------------------------------------------
source_suffix = '.rst'
master_doc = 'index'
project = 'Definizione standard per il paniere di dataset degli enti locali'
copyright = '= licenza CC BY'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
extlinks = {}
# -- Options for HTML output ---------------------------------------------------
html_theme = 'default'
html_static_path = ['static']
def setup(app):
# overrides for wide tables in RTD theme
app.add_stylesheet('theme_overrides.css') # path relative to static
"""
You might want to uncomment the “latex_documents = []” if you use CKJ characters in your document.
Because the pdflatex raises exception when generate Latex documents with CKJ characters.
"""
#latex_documents = []
latex_logo = "img/regionelombardia.png"
html_logo = "img/regionelombardia.png"
# Adding Custom CSS or JavaScript to a Sphinx Project: al seguente link ci sono esempi
# https://docs.readthedocs.io/en/latest/guides/adding-custom-css.html
templates_path = ['_templates']
| 25.803922 | 100 | 0.672492 |
bf902ad601de68e48495587f423b5e48e3fdf669 | 3,175 | py | Python | zun/objects/numa.py | magician03/zun | 4db10041fa4db0dd81f2e110b113172db3dc8f80 | [
"Apache-2.0"
] | 3 | 2018-09-07T02:31:05.000Z | 2018-10-17T10:30:47.000Z | zun/objects/numa.py | magician03/zun | 4db10041fa4db0dd81f2e110b113172db3dc8f80 | [
"Apache-2.0"
] | null | null | null | zun/objects/numa.py | magician03/zun | 4db10041fa4db0dd81f2e110b113172db3dc8f80 | [
"Apache-2.0"
] | 1 | 2018-09-07T02:26:23.000Z | 2018-09-07T02:26:23.000Z | # Copyright 2014 Red Hat Inc.
# Copyright 2017 IBM Corp
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects import fields
from zun.common import exception
from zun.objects import base
@base.ZunObjectRegistry.register
class NUMANode(base.ZunObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.IntegerField(read_only=True),
'cpuset': fields.SetOfIntegersField(),
'pinned_cpus': fields.SetOfIntegersField(),
}
@property
def free_cpus(self):
return self.cpuset - self.pinned_cpus or set()
@property
def avail_cpus(self):
return len(self.free_cpus)
def pin_cpus(self, cpus):
if cpus - self.cpuset:
raise exception.CPUPinningUnknown(requested=list(cpus),
cpuset=list(self.pinned_cpus))
if self.pinned_cpus & cpus:
raise exception.CPUPinningInvalid(requested=list(cpus),
free=list(self.cpuset -
self.pinned_cpus))
self.pinned_cpus |= cpus
def unpin_cpus(self, cpus):
if cpus - self.cpuset:
raise exception.CPUUnpinningUnknown(requested=list(cpus),
cpuset=list(self.pinned_cpus))
if (self.pinned_cpus & cpus) != cpus:
raise exception.CPUUnpinningInvalid(requested=list(cpus),
pinned=list(self.pinned_cpus))
self.pinned_cpus -= cpus
def _to_dict(self):
return {
'id': self.id,
'cpuset': list(self.cpuset),
'pinned_cpus': list(self.pinned_cpus)
}
@classmethod
def _from_dict(cls, data_dict):
cpuset = set(data_dict.get('cpuset', ''))
node_id = data_dict.get('id')
pinned_cpus = set(data_dict.get('pinned_cpus'))
return cls(id=node_id, cpuset=cpuset,
pinned_cpus=pinned_cpus)
@base.ZunObjectRegistry.register
class NUMATopology(base.ZunObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'nodes': fields.ListOfObjectsField('NUMANode'),
}
@classmethod
def _from_dict(cls, data_dict):
return cls(nodes=[
NUMANode._from_dict(node_dict)
for node_dict in data_dict.get('nodes', [])])
def _to_dict(self):
return {
'nodes': [n._to_dict() for n in self.nodes],
}
def to_list(self):
return [n._to_dict() for n in self.nodes]
| 32.397959 | 78 | 0.59622 |
066cc889e2a40ac6f2336a57936af6688919604a | 4,795 | py | Python | optimizer3.py | brianjsl/ndf_mnist | 936b6616c5aca2d555cf88c134ff92f3ff57c08b | [
"MIT"
] | 1 | 2022-03-15T04:32:40.000Z | 2022-03-15T04:32:40.000Z | optimizer3.py | brianjsl/ndf_mnist | 936b6616c5aca2d555cf88c134ff92f3ff57c08b | [
"MIT"
] | null | null | null | optimizer3.py | brianjsl/ndf_mnist | 936b6616c5aca2d555cf88c134ff92f3ff57c08b | [
"MIT"
] | null | null | null | import torch
from dataloader_ndf import OverlapMNISTNDF
import torchvision.transforms as transforms
import torchvision.transforms
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.nn.functional as F
from neural_field import NeuralField
from constants import IMG_DIR
import matplotlib.pyplot as plt
import PIL.Image as Image
from writePoints import writePoints
import cv2
import argparse
from tqdm import tqdm
data_transforms = transforms.Compose([
transforms.Normalize([0.5],[0.5])
])
def ndf(image, coordinate):
'''
Params:
@image: (1,32,32) Tensor Image
@coordinate: (2,1) Coordinate
'''
activations = {}
def getActivation(name):
def hook(model, input, output):
activations[name] = output.detach()
return hook
image = data_transforms(image)
image = torch.unsqueeze(image, 0)
model = torch.load('./checkpoints/new/chkpt_2.pt', map_location='cpu')
h1 = model.linear_relu_stack[1].register_forward_hook(getActivation('layer1'))
h2 = model.linear_relu_stack[3].register_forward_hook(getActivation('layer2'))
h3 = model.linear_relu_stack[5].register_forward_hook(getActivation('layer3'))
h4 = model.linear_relu_stack[7].register_forward_hook(getActivation('layer4'))
h5 = model.linear_relu_stack[9].register_forward_hook(getActivation('layer5'))
output = model((image, coordinate))
energy = torch.cat((activations['layer1'], activations['layer2'], activations['layer3'], \
activations['layer4'], activations['layer5']), 1)
# energy = activations['layer4']
h1.remove()
h2.remove()
h3.remove()
return energy.squeeze()
def optimize(target_image, target_coord, image):
'''
Given an input image and coordinate finds the energy minimized coordinate in image2.
Params1:
@target_image: image you want to minimize energy to
@target_coord: corresponding coordinate
@image: image you sample over
'''
energy1 = ndf(target_image, target_coord)
min_diff = float('inf')
min_coord = None
for i in tqdm(range(32)):
for j in range(32):
energy2 = ndf(image, torch.Tensor([i,j]).view(2,-1))
energy_diff = torch.norm(energy2-energy1)
if energy_diff < min_diff:
min_diff = energy_diff
min_coord = torch.Tensor([i,j]).view(2,-1)
return min_coord, min_diff
def argparser():
'''
Initializes argparser.
Arguments:
--image1_class: class of image 1
--image1_num: number of image 1
--image2_class: class of image 2
--image2_num: class of image2
'''
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--image1_class', type = str,
default = 00,
help='class of image 1'
)
parser.add_argument('--image1_num', type = str,
default = 0,
help='num of image 1'
)
parser.add_argument('--image2_class', type = str,
default = 00,
help='class of image 2'
)
parser.add_argument('--image2_num', type = str,
default = 0,
help='num of image 2'
)
config = parser.parse_args()
return config
if __name__ == '__main__':
config = argparser()
transform_to_tensor = transforms.ToTensor()
image1 = Image.open('./data/MNIST/overlapMNIST/train/'+config.image1_class+'/'+config.image1_num+'_'\
+config.image1_class+'.png')
image1 = transform_to_tensor(image1)
image1_with_points, coordinates = writePoints(image1.squeeze())
print(coordinates[0])
image2 = Image.open('./data/MNIST/overlapMNIST/train/'+config.image2_class+'/'+config.image2_num+'_'\
+config.image2_class+'.png')
image2 = transform_to_tensor(image2)
min_coords = []
for coord in coordinates:
coord = torch.Tensor(coord).view(2,-1)
min_coord, min_diff = optimize(image1, coord, image2)
print(min_coord)
print(min_diff)
min_coords.append(min_coord)
image2 = image2.squeeze().numpy()
for min_coord in min_coords:
cv2.circle(image2, (int(min_coord[0,0].item()), int(min_coord[1,0].item())), radius = 1, color=(0,255,0), thickness = 1)
plt.figure(figsize=[8,4]);
plt.subplot(121); plt.imshow(image1_with_points.squeeze(), cmap = 'gray'); plt.title('Image 1 with labeled points')
plt.subplot(122); plt.imshow(image2.squeeze(), cmap = 'gray'); plt.title('Image 2 with corresponding points')
plt.show()
| 33.068966 | 129 | 0.633368 |
ac87f1fdddf59b82434e0d3b3f4323801f9389dd | 44,276 | py | Python | pgmpy/readwrite/ProbModelXML.py | anaviltripathi/pgmpy | 24300984c66562219a09a3e9deb5e158bb021adb | [
"MIT"
] | null | null | null | pgmpy/readwrite/ProbModelXML.py | anaviltripathi/pgmpy | 24300984c66562219a09a3e9deb5e158bb021adb | [
"MIT"
] | null | null | null | pgmpy/readwrite/ProbModelXML.py | anaviltripathi/pgmpy | 24300984c66562219a09a3e9deb5e158bb021adb | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
"""
For the student example the ProbModelXML file should be:
<?xml version=“1.0” encoding=“UTF-8”?>
<ProbModelXML formatVersion=“1.0”>
<ProbNet type="BayesianNetwork">
<AdditionalConstraints />
<Comment>
Student example model from Probabilistic Graphical Models:
Principles and Techniques by Daphne Koller
</Comment>
<Language>
English
</Language>
<AdditionalProperties />
<Variables>
<Variable name="intelligence" type="FiniteState" role="Chance">
<Comment />
<Coordinates />
<AdditionalProperties />
<States>
<State name="smart"><AdditionalProperties /></State>
<State name="dumb"><AdditionalProperties /></State>
</States>
</Variable>
<Variable name="difficulty" type="FiniteState" role="Chance">
<Comment />
<Coordinates />
<AdditionalProperties />
<States>
<State name="difficult"><AdditionalProperties /></State>
<State name="easy"><AdditionalProperties /></State>
</States>
</Variable>
<Variable name="grade" type="FiniteState" role="Chance">
<Comment />
<Coordinates />
<AdditionalProperties />
<States>
<State name="grade_A"><AdditionalProperties /></State>
<State name="grade_B"><AdditionalProperties /></State>
<State name="grade_C"><AdditionalProperties /></State>
</States>
</Variable>
<Variable name="recommendation_letter" type="FiniteState"
role="Chance">
<Comment />
<Coordinates />
<AdditionalProperties />
<States>
<State name="good"><AdditionalProperties /></State>
<State name="bad"><AdditionalProperties /></State>
</States>
</Variable>
<Variable name="SAT" type="FiniteState" role="Chance">
<Comment />
<Coordinates />
<AdditionalProperties />
<States>
<State name="high"><AdditionalProperties /></State>
<State name="low"><AdditionalProperties /></State>
</States>
</Variable>
</Variables>
<Links>
<Link var1="difficulty" var2="grade" directed=1>
<Comment>Directed Edge from difficulty to grade</Comment>
<Label>diff_to_grad</Label>
<AdditionalProperties />
</Link>
<Link var1="intelligence" var2="grade" directed=1>
<Comment>Directed Edge from intelligence to grade</Comment>
<Label>intel_to_grad</Label>
<AdditionalProperties />
</Link>
<Link var1="intelligence" var2="SAT" directed=1>
<Comment>Directed Edge from intelligence to SAT</Comment>
<Label>intel_to_sat</Label>
<AdditionalProperties />
</Link>
<Link var1="grade" var2="recommendation_letter" directed=1>
<Comment>Directed Edge from grade to
recommendation_letter</Comment>
<Label>grad_to_reco</Label>
<AdditionalProperties />
</Link>
</Links>
<Potential type="Table" role="ConditionalProbability" label=string>
<Comment>CPDs in the form of table</Comment>
<AdditionalProperties />
<!--
There is no specification in the paper about
how the tables should be represented.
-->
</Potential>
</ProbNet>
<Policies />
<InferenceOptions />
<Evidence>
<EvidenceCase>
<Finding variable=string state=string stateIndex=integer
numericValue=number/>
</EvidenceCase>
</Evidence>
</ProbModelXML>
"""
import warnings
try:
from lxml import etree
except ImportError:
try:
import xml.etree.ElementTree as etree
except ImportError:
# import xml.etree.cElementTree as etree
# print("running with cElementTree on Python 2.5+")
# Commented out because behaviour is different from expected
warnings.warn("Failed to import ElementTree from any known place")
import networkx as nx
import numpy as np
from pgmpy.models import BayesianModel
from pgmpy.factors import TabularCPD
from pgmpy.extern import six
from pgmpy.extern.six.moves import map
# warnings.warn("Not Complete. Please use only for "
# "reading and writing Bayesian Models.")
def generate_probmodelxml(model, encoding='utf-8', prettyprint=True):
"""
Generate ProbModelXML lines for model.
Parameters
----------
model : Graph
The Bayesian or Markov Model
encoding : string (optional)
Encoding for text data
prettyprint: bool (optional)
If True uses line breaks and indenting in output XML.
Examples
--------
>>> G = nx.path_graph(5)
>>> s = pgmpy.readwrite.generate_ProbModelXML(G) # doctest: +SKIP
>>> for line in pgmpy.readwrite.generate_ProbModelXML(G): #doctest: +SKIP
... print(line)
"""
writer = ProbModelXMLWriter(G, encoding=encoding, prettyprint=prettyprint)
for line in str(writer).splitlines():
yield line
# @open_file(1, mode='wb')
def write_probmodelxml(model, path, encoding='utf-8', prettyprint=True):
"""
Write model in ProbModelXML format to path.
Parameters
----------
model : A NetworkX graph
Bayesian network or Markov network
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
encoding : string (optional)
Encoding for text data.
prettyprint : bool (optional)
If True use line breaks and indenting in output XML.
Examples
--------
>>> G = nx.path_graph(4)
>>> pgmpy.readwrite.write_probmodelxml(G, "test.probmodelxml")
"""
writer = ProbModelXMLWriter(model, path, encoding=encoding,
prettyprint=prettyprint)
writer.dump(path)
# @open_file(0, mode='rb')
def read_probmodelxml(path):
"""
Read model in ProbModelXML format from path.
Parameters
----------
path : file or string
file or filename from which to read.
Returns
-------
model : NetworkX Graph
A BayesianModel or MarkovModel object depending on the
type of model the XML represents.
Examples
--------
>>> G = pgmpy.readwrite.read_probmodelxml('test.probModelXML')
"""
reader = ProbModelXMLReader(path=path)
return reader.make_network()
def parse_probmodelxml(string):
"""
Read model in ProbModelXML format from string.
Parameters
----------
string : string
String containing ProbModelXML information.
(e.g., contents of a ProbModelXML file).
Returns
-------
model : NetworkX Graph
A BayesianModel or MarkovModel object depending on the XML given.
Examples
--------
>>> G = nx.path_graph(4)
>>> linefeed = chr(10)
>>> s = linefeed.join(pgmpy.readwrite.generate_probmodelxml(G))
>>> H = pgmpy.readwrite.parse_probmodelxml(s)
"""
reader = ProbModelXMLReader(string=string)
return reader.make_network()
def get_probmodel_data(model):
"""
Returns the model_data based on the given model.
Parameters
----------
model: BayesianModel instance
Model to write
Return
------
model_data: dict
dictionary containing model data of the given model.
Examples
--------
>>> model_data = pgmpy.readwrite.get_model_data(model)
>>> writer.get_model_data(model)
"""
if not isinstance(model, BayesianModel):
raise TypeError("Model must an instance of BayesianModel.")
model_data = {'probnet': {'type': 'BayesianNetwork', 'Variables': {}}}
variables = model.nodes()
for var in variables:
model_data['probnet']['Variables'][var] = model.node[var]
model_data['probnet']['edges'] = {}
edges = model.edges()
for edge in edges:
model_data['probnet']['edges'][str(edge)] = model.edge[edge[0]][edge[1]]
model_data['probnet']['Potentials'] = []
cpds = model.get_cpds()
for cpd in cpds:
potential_dict = {}
potential_dict['Variables'] = {}
evidence = cpd.variables[:0:-1]
if evidence:
potential_dict['Variables'][cpd.variable] = evidence
else:
potential_dict['Variables'][cpd.variable] = []
potential_dict['type'] = "Table"
potential_dict['role'] = "conditionalProbability"
potential_dict['Values'] = " ".join([str(val) for val in cpd.values.ravel().astype(float)]) + " "
model_data['probnet']['Potentials'].append(potential_dict)
return model_data
class ProbModelXMLWriter(object):
"""
Class for writing models in ProbModelXML format.
"""
def __init__(self, model_data, encoding='utf-8', prettyprint=True):
"""
Initialize a ProbModelXMLWriter Object.
Parameters
----------
model : A BayesianModel or MarkovModel
The model to write.
encoding : string (optional)
Encoding for text data
prettyprint : bool (optional)
If True uses line breaks and indentation in output XML.
Examples
--------
Refernces
---------
[1] http://leo.ugr.es/pgm2012/submissions/pgm2012_submission_43.pdf
[2] http://www.cisiad.uned.es/techreports/ProbModelXML.pdf
"""
# TODO: add policies, InferenceOptions, Evidence
# TODO: add parsing of language and comments and additional properties
self.data = model_data
self.encoding = encoding
self.prettyprint = prettyprint
# Creating initial tags
self.xml = etree.Element("ProbModelXML", attrib={'formatVersion': '1.0'})
self.probnet = etree.SubElement(self.xml, 'ProbNet')
self.variables = etree.SubElement(self.probnet, 'Variables')
self.links = etree.SubElement(self.probnet, 'Links')
self.potentials = etree.SubElement(self.probnet, 'Potentials')
self.additional_constraints = etree.SubElement(self.probnet, 'AdditionalConstraints')
# adding information for probnet
self.probnet.attrib['type'] = self.data['probnet']['type']
try:
etree.SubElement(self.probnet, 'Language').text = self.data['probnet']['Language']
except KeyError:
pass
try:
etree.SubElement(self.probnet, 'Comment').text = self.data['probnet']['Comment']
except KeyError:
pass
try:
self._add_additional_properties(self.xml, self.data['probnet']['AdditionalProperties'])
except KeyError:
etree.SubElement(self.probnet, 'AdditionalProperties')
try:
self._add_decision_criteria(self.data['probnet']['DecisionCriteria'])
except KeyError:
etree.SubElement(self.probnet, 'DecisionCriteria')
# Add Additional Constraints
if 'AdditionalConstraints' in self.data['probnet']:
for constraint in sorted(self.data['probnet']['AdditionalConstraints']):
self._add_constraint(constraint)
# Add variables
for variable in sorted(self.data['probnet']['Variables']):
self._add_variable(variable)
# Add edges
for edge in sorted(self.data['probnet']['edges']):
self._add_link(edge)
# Add Potentials
for potential in self.data['probnet']['Potentials']:
self._add_potential(potential, self.potentials)
def __str__(self):
"""
Return the XML as string.
"""
if self.prettyprint:
self.indent(self.xml)
return etree.tostring(self.xml, encoding=self.encoding)
@staticmethod
def _add_additional_properties(position, properties_dict):
"""
Sets AdditionalProperties of the ProbModelXML.
"""
add_prop = etree.SubElement(position, 'AdditionalProperties')
for key, value in properties_dict.items():
etree.SubElement(add_prop, 'Property', attrib={'name': key, 'value': value})
def _add_variable(self, variable):
"""
Adds a node to the ProbModelXML.
"""
# TODO: Add feature for accepting additional properties of states.
variable_data = self.data['probnet']['Variables'][variable]
variable_element = etree.SubElement(self.variables, 'Variable', attrib={'name': variable,
'type': variable_data['type'],
'role': variable_data['role']})
try:
etree.SubElement(variable_element, 'Comment').text = variable_data['Comment']
except KeyError:
pass
try:
etree.SubElement(variable_element, 'Coordinates', variable_data['Coordinates'])
except KeyError:
pass
try:
for key, value in sorted(variable_data['AdditionalProperties'].items()):
etree.SubElement(variable_element, 'Property', attrib={'name': key, 'value': value})
except KeyError:
etree.SubElement(variable_element, 'AdditionalProperties')
states = etree.SubElement(variable_element, 'States')
for s in sorted(variable_data['States']):
state = etree.SubElement(states, 'State', attrib={'name': s})
try:
self._add_additional_properties(state, variable_data['States'][s]['AdditionalProperties'])
except KeyError:
etree.SubElement(state, 'AdditionalProperties')
def _add_link(self, edge):
"""
Adds an edge to the ProbModelXML.
"""
edge_data = self.data['probnet']['edges'][edge]
if isinstance(edge, six.string_types):
edge = eval(edge)
link = etree.SubElement(self.links, 'Link', attrib={'var1': edge[0], 'var2': edge[1],
'directed': edge_data['directed']})
try:
etree.SubElement(link, 'Comment').text = edge_data['Comment']
except KeyError:
pass
try:
etree.SubElement(link, 'Label').text = edge_data['Label']
except KeyError:
pass
try:
self._add_additional_properties(link, edge_data['AdditionalProperties'])
except KeyError:
etree.SubElement(link, 'AdditionalProperties')
def _add_constraint(self, constraint):
"""
Adds constraint to the ProbModelXML.
"""
constraint_data = self.data['probnet']['AdditionalConstraints'][constraint]
constraint_element = etree.SubElement(
self.additional_constraints, 'Constraint', attrib={'name': constraint})
for argument in sorted(constraint_data):
name = argument
value = constraint_data[name]
etree.SubElement(constraint_element, 'Argument', attrib={'name': name, 'value': value})
def _add_decision_criteria(self, criteria_dict):
"""
Adds Decision Criteria to the ProbModelXML.
Parameters
----------
criteria_dict: dict
Dictionary containing Deecision Criteria data.
For example: {'effectiveness': {}, 'cost': {}}
Examples
-------
>>> writer = ProbModelXMLWriter(model)
>>> writer._add_decision_criteria(criteria_dict)
"""
decision_tag = etree.SubElement(self.xml, 'DecisionCriteria', attrib={})
for criteria in sorted(criteria_dict):
criteria_tag = etree.SubElement(decision_tag, 'Criterion', attrib={'name': criteria})
self._add_additional_properties(criteria_tag, criteria_dict[criteria])
def _add_potential(self, potential, parent_tag):
"""
Adds Potentials to the ProbModelXML.
Parameters
----------
potential: dict
Dictionary containing Potential data.
For example: {'role': 'Utility',
'Variables': ['D0', 'D1', 'C0', 'C1'],
'type': 'Tree/ADD',
'UtilityVaribale': 'U1'}
parent_tag: etree Element
etree element which would contain potential tag
For example: <Element Potentials at 0x7f315fc44b08>
<Element Branch at 0x7f315fc44c88>
<Element Branch at 0x7f315fc44d88>
<Element Subpotentials at 0x7f315fc44e48>
Examples
--------
>>> writer = ProbModelXMLWriter(model)
>>> writer._add_potential(potential, parent_tag)
"""
potential_type = potential['type']
try:
potential_tag = etree.SubElement(parent_tag, 'Potential', attrib={
'type': potential['type'], 'role': potential['role']})
except KeyError:
potential_tag = etree.SubElement(parent_tag, 'Potential', attrib={
'type': potential['type']})
self._add_element(potential, 'Comment', potential_tag)
if 'AdditionalProperties' in potential:
self._add_additional_properties(potential_tag, potential['AdditionalProperties'])
if potential_type == "delta":
etree.SubElement(potential_tag, 'Variable', attrib={'name': potential['Variable']})
self._add_element(potential, 'State', potential_tag)
self._add_element(potential, 'StateIndex', potential_tag)
self._add_element(potential, 'NumericValue', potential_tag)
else:
if 'UtilityVariable' in potential:
etree.SubElement(potential_tag, 'UtilityVariable', attrib={
'name': potential['UtilityVariable']})
if 'Variables' in potential:
variable_tag = etree.SubElement(potential_tag, 'Variables')
for var in sorted(potential['Variables']):
etree.SubElement(variable_tag, 'Variable', attrib={'name': var})
for child in sorted(potential['Variables'][var]):
etree.SubElement(variable_tag, 'Variable', attrib={'name': child})
self._add_element(potential, 'Values', potential_tag)
if 'UncertainValues' in potential:
value_tag = etree.SubElement(potential_tag, 'UncertainValues', attrib={})
for value in sorted(potential['UncertainValues']):
try:
etree.SubElement(value_tag, 'Value', attrib={
'distribution': value['distribution'],
'name': value['name']}).text = value['value']
except KeyError:
etree.SubElement(value_tag, 'Value', attrib={
'distribution': value['distribution']}).text = value['value']
if 'TopVariable' in potential:
etree.SubElement(potential_tag, 'TopVariable', attrib={'name': potential['TopVariable']})
if 'Branches' in potential:
branches_tag = etree.SubElement(potential_tag, 'Branches')
for branch in potential['Branches']:
branch_tag = etree.SubElement(branches_tag, 'Branch')
if 'States' in branch:
states_tag = etree.SubElement(branch_tag, 'States')
for state in sorted(branch['States']):
etree.SubElement(states_tag, 'State', attrib={'name': state['name']})
if 'Potential' in branch:
self._add_potential(branch['Potential'], branch_tag)
self._add_element(potential, 'Label', potential_tag)
self._add_element(potential, 'Reference', potential_tag)
if 'Thresholds' in branch:
thresholds_tag = etree.SubElement(branch_tag, 'Thresholds')
for threshold in branch['Thresholds']:
try:
etree.SubElement(thresholds_tag, 'Threshold', attrib={
'value': threshold['value'], 'belongsTo': threshold['belongsTo']})
except KeyError:
etree.SubElement(thresholds_tag, 'Threshold', attrib={
'value': threshold['value']})
self._add_element(potential, 'Model', potential_tag)
self._add_element(potential, 'Coefficients', potential_tag)
self._add_element(potential, 'CovarianceMatrix', potential_tag)
if 'Subpotentials' in potential:
subpotentials = etree.SubElement(potential_tag, 'Subpotentials')
for subpotential in potential['Subpotentials']:
self._add_potential(subpotential, subpotentials)
if 'Potential' in potential:
self._add_potential(potential['Potential'], potential_tag)
if 'NumericVariables' in potential:
numvar_tag = etree.SubElement(potential_tag, 'NumericVariables')
for var in sorted(potential['NumericVariables']):
etree.SubElement(numvar_tag, 'Variable', attrib={'name': var})
@staticmethod
def _add_element(potential, var, potential_tag):
"""
Helper function to add variable tag to the potential_tag
Parameters
----------
potential: dict
Dictionary containing Potential data.
For example: {'role': 'Utility',
'Variables': ['D0', 'D1', 'C0', 'C1'],
'type': 'Tree/ADD',
'UtilityVaribale': 'U1'}
var: string
New Element tag which needs to be added to the potential tag.
For example: 'type'
potential_tag: etree Element
etree element which would contain potential tag
For example: <Element Potentials at 0x7f315fc44b08>
<Element Branch at 0x7f315fc44c88>
<Element Branch at 0x7f315fc44d88>
<Element Subpotentials at 0x7f315fc44e48>
Examples
--------
>>> writer = ProbModelXMLWriter(model)
>>> writer._add_element(potential, 'State', parent_tag)
"""
if var in potential:
etree.SubElement(potential_tag, var).text = potential[var]
def dump(self, stream):
"""
Dumps the data to stream after appending header.
"""
if self.prettyprint:
self.indent(self.xml)
document = etree.ElementTree(self.xml)
header = '<?xml version="1.0" encoding="%s"?>' % self.encoding
stream.write(header.encode(self.encoding))
document.write(stream, encoding=self.encoding)
def indent(self, elem, level=0):
"""
Inplace prettyprint formatter.
"""
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def write_file(self, filename):
"""
Write the xml data into the file.
Parameters
----------
filename: Name of the file.
Examples
-------
>>> writer = ProbModelXMLWriter(model)
>>> writer.write_file(test_file)
"""
writer = self.__str__()[:-1].decode('utf-8')
with open(filename, 'w') as fout:
fout.write(writer)
class ProbModelXMLReader(object):
"""
Class for reading ProbModelXML format from files or strings.
"""
# TODO: add methods to parse policies, inferenceoption, evidence etc.
# TODO: add reading formatVersion
def __init__(self, path=None, string=None):
"""
Initialize an instance of ProbModelXMLReader class.
Parameters
----------
path : file or string
File containing ProbModelXML information.
string : string
String containing ProbModelXML information.
Example
-------
>>> reader = ProbModelXMLReader('test.ProbModelXML')
Structure of Probnet Object
---------------------------
{ probnet: { type:
Comment:
Language:
AdditionalProperties: { property_name1: property_value1,
property_name2: property_value2,
....
}
Variables: { variable_name1: { type:
roles:
Comment:
Coordinates:
AdditionalProperties: { property_name1: property_value1,
property_name2: property_value2,
....
}
states: { state1: {AdditionalProperties: {
....
....
}
state2: {AdditionalProperties: {
....
....
}
.....
}
}
variable_name2: {
...........
}
.........
}
edges: { (var1, var2): { directed:
Comment:
Label:
AdditionalProperties: { property_name1: property_value1,
property_name2: property_value2,
.....
}
(var3, var4): {
.....
.....
}
........
}
}
}
References
----------
[1] http://leo.ugr.es/pgm2012/submissions/pgm2012_submission_43.pdf
[2] http://www.cisiad.uned.es/techreports/ProbModelXML.pdf
"""
if path is not None:
self.xml = etree.ElementTree(file=path)
elif string is not None:
self.xml = etree.fromstring(string)
else:
raise ValueError("Must specify either 'path' or 'string' as kwarg.")
self.create_probnet()
def create_probnet(self):
"""
Returns a BayesianModel or MarkovModel object depending on the
type of ProbModelXML passed to ProbModelXMLReader class.
"""
self.probnet = {}
# Add general properties
probnet_elem = self.xml.find('ProbNet')
self.probnet['type'] = probnet_elem.attrib['type']
if probnet_elem.find('Comment') is not None:
self.add_comment(probnet_elem.find('Comment').text)
if probnet_elem.find('Language') is not None:
self.add_language(probnet_elem.find('Language').text)
if probnet_elem.find('AdditionalProperties') is not None:
self.probnet['AdditionalProperties'] = {}
for prop in probnet_elem.find('AdditionalProperties'):
self.add_additional_property(self.probnet['AdditionalProperties'], prop)
# Add additional Constraints
self.probnet['AdditionalConstraints'] = {}
for constraint in probnet_elem.findall('AdditionalConstraints/Constraint'):
self.add_probnet_additionalconstraints(constraint)
# Add Decision Criterion
self.probnet['DecisionCriteria'] = {}
for criterion in probnet_elem.findall('DecisionCriteria/Criterion'):
self.add_criterion(criterion)
# Add nodes
self.probnet['Variables'] = {}
for variable in probnet_elem.find('Variables'):
self.add_node(variable)
# Add edges
self.probnet['edges'] = {}
for edge in probnet_elem.findall('Links/Link'):
self.add_edge(edge)
# Add CPD
self.probnet['Potentials'] = []
for potential in probnet_elem.findall('Potentials/Potential'):
probnet_dict = {}
self.add_potential(potential, probnet_dict)
self.probnet['Potentials'].append(probnet_dict)
def add_probnet_additionalconstraints(self, constraint):
"""
Adds Additional Constraints to the probnet dict.
Parameters
----------
criterion: <Element Constraint at AdditionalConstraints Node in XML>
etree Element consisting Constraint tag.
Examples
-------
>>> reader = ProbModelXMLReader()
>>> reader.add_additionalconstraints(constraint)
"""
constraint_name = constraint.attrib['name']
self.probnet['AdditionalConstraints'][constraint_name] = {}
for argument in constraint.findall('Argument'):
argument_name = argument.attrib['name']
argument_value = argument.attrib['value']
self.probnet['AdditionalConstraints'][constraint_name][argument_name] = argument_value
def add_criterion(self, criterion):
"""
Adds Decision Criteria to the probnet dict.
Parameters
----------
criterion: <Element Criterion at Decision Criteria Node in XML>
etree Element consisting DecisionCritera tag.
Examples
-------
>>> reader = ProbModelXMLReader()
>>> reader.add_criterion(criterion)
"""
criterion_name = criterion.attrib['name']
self.probnet['DecisionCriteria'][criterion_name] = {}
if criterion.find('AdditionalProperties/Property') is not None:
for prop in criterion.findall('AdditionalProperties/Property'):
prop_name = prop.attrib['name']
prop_value = prop.attrib['value']
self.probnet['DecisionCriteria'][criterion_name]['AdditionalProperties'][prop_name] = prop_value
def add_comment(self, comment):
"""
Adds Comment to the probnet dict.
Parameters
----------
comment: string
String consisting of comment.
Examples
-------
>>> reader = ProbModelXMLReader()
>>> reader.add_comment(comment)
"""
self.probnet['Comment'] = comment
def add_language(self, language):
"""
Adds Language to the probnet dict.
Parameters
----------
comment: string
String consisting of language.
Examples
-------
>>> reader = ProbModelXMLReader()
>>> reader.add_language(language)
"""
self.probnet['Language'] = language
@staticmethod
def add_additional_property(place, prop):
place[prop.attrib['name']] = prop.attrib['value']
def add_node(self, variable):
"""
Adds Variables to the probnet dict.
Parameters
----------
variable: <Element Variable at Variables Node in XML>
etree Element consisting Variable tag.
Examples
-------
>>> reader = ProbModelXMLReader()
>>> reader.add_node(variable)
"""
# TODO: Do some checks with variable type and roles. Right now I don't know when they are to be used.
variable_name = variable.attrib['name']
self.probnet['Variables'][variable_name] = {}
self.probnet['Variables'][variable_name]['type'] = variable.attrib['type']
self.probnet['Variables'][variable_name]['role'] = variable.attrib['role']
if variable.find('Comment') is not None:
self.probnet['Variables'][variable_name]['Comment'] = variable.find('Comment').text
if variable.find('Coordinates') is not None:
self.probnet['Variables'][variable_name]['Coordinates'] = variable.find('Coordinates').attrib
if variable.find('AdditionalProperties/Property') is not None:
self.probnet['Variables'][variable_name]['AdditionalProperties'] = {}
for prop in variable.findall('AdditionalProperties/Property'):
self.probnet['Variables'][variable_name]['AdditionalProperties'][prop.attrib['name']] = \
prop.attrib['value']
if variable.find('States/State') is None:
warnings.warn("States not available for node: " + variable_name)
else:
self.probnet['Variables'][variable_name]['States'] = {state.attrib['name']: {
prop.attrib['name']: prop.attrib['value'] for
prop in state.findall('AdditionalProperties/Property')} for state in variable.findall(
'States/State')}
def add_edge(self, edge):
"""
Adds Edges to the probnet dict.
Parameters
----------
edge: <Element Link at Links Node in XML>
etree Element consisting Variable tag.
Examples
-------
>>> reader = ProbModelXMLReader()
>>> reader.add_edge(edge)
"""
var1 = edge.findall('Variable')[0].attrib['name']
var2 = edge.findall('Variable')[1].attrib['name']
self.probnet['edges'][(var1, var2)] = {}
self.probnet['edges'][(var1, var2)]['directed'] = edge.attrib['directed']
# TODO: check for the case of undirected graphs if we need to add to both elements of the dic for a single edge.
if edge.find('Comment') is not None:
self.probnet['edges'][(var1, var2)]['Comment'] = edge.find('Comment').text
if edge.find('Label') is not None:
self.probnet['edges'][(var1, var2)]['Label'] = edge.find('Label').text
if edge.find('AdditionalProperties/Property') is not None:
self.probnet['edges'][(var1, var2)]['AdditionalProperties'] = {}
for prop in edge.findall('AdditionalProperties/Property'):
self.probnet['edges'][(var1, var2)]['AdditionalProperties'][prop.attrib['name']] = prop.attrib['value']
def add_potential(self, potential, potential_dict):
"""
Adds Potential to the potential dict.
Parameters
----------
potential: <Element Potential at Potentials node in XML>
etree Element consisting Potential tag.
potential_dict: dict{}
Dictionary to parse Potential tag.
Examples
-------
>>> reader = ProbModelXMLReader()
>>> reader.add_potential(potential, potential_dict)
"""
potential_type = potential.attrib['type']
potential_dict['type'] = potential_type
try:
potential_dict['role'] = potential.attrib['role']
except KeyError:
pass
if potential.find('Comment') is not None:
potential_dict['Comment'] = potential.find('Comment').text
for prop in potential.findall('AdditionalProperties/Property'):
potential_dict['AdditionalProperties'][prop.attrib['name']] = prop.attrib['value']
if potential_type == "delta":
potential_dict['Variable'] = potential.find('Variable').attrib['name']
if potential.find('State') is not None:
potential_dict['State'] = potential.find('State').text
if potential.find('StateIndex') is not None:
potential_dict['StateIndex'] = potential.find('StateIndex').text
if potential.find('NumericValue') is not None:
potential_dict['NumericValue'] = potential.find('NumericValue').text
else:
if potential.find('UtilityVariable') is not None:
potential_dict['UtilityVaribale'] = potential.find('UtilityVariable').attrib['name']
if len(potential.findall('Variables/Variable')):
potential_dict['Variables'] = {}
var_list = []
for var in potential.findall('Variables/Variable'):
var_list.append(var.attrib['name'])
potential_dict['Variables'][var_list[0]] = var_list[1:]
if potential.find('Values') is not None:
potential_dict['Values'] = potential.find('Values').text
if len(potential.findall('UncertainValues/Value')):
potential_dict['UncertainValues'] = []
for value in potential.findall('UncertainValues/Value'):
try:
potential_dict['UncertainValues'].append(
{'distribution': value.attrib['distribution'], 'name': value.attrib['name'],
'value': value.text})
except KeyError:
potential_dict['UncertainValues'].append(
{'distribution': value.attrib['distribution'], 'value': value.text})
if potential.find('TopVariable') is not None:
potential_dict['TopVariable'] = potential.find('TopVariable').attrib['name']
if len(potential.findall('Branches/Branch')):
potential_dict['Branches'] = []
for branch in potential.findall('Branches/Branch'):
branch_dict = {}
if len(branch.findall('States/State')):
states = []
for state in branch.findall('States/State'):
states.append({'name': state.attrib['name']})
branch_dict['States'] = states
if branch.find('Potential') is not None:
branch_potential = {}
self.add_potential(branch.find('Potential'), branch_potential)
branch_dict['Potential'] = branch_potential
if branch.find('Label') is not None:
label = branch.find('Label').text
branch_dict['Label'] = label
if branch.find('Reference') is not None:
reference = branch.find('Reference').text
branch_dict['Reference'] = reference
if len(branch.findall('Thresholds/Threshold')):
thresholds = []
for threshold in branch.findall('Thresholds/Threshold'):
try:
thresholds.append({
'value': threshold.attrib['value'], 'belongsTo': threshold.attrib['belongsTo']})
except KeyError:
thresholds.append({'value': threshold.attrib['value']})
branch_dict['Thresholds'] = thresholds
potential_dict['Branches'].append(branch_dict)
if potential.find('Model') is not None:
potential_dict['Model'] = potential.find('Model').text
if len(potential.findall('Subpotentials/Potential')):
potential_dict['Subpotentials'] = []
for subpotential in potential.findall('Subpotentials/Potential'):
subpotential_dict = {}
self.add_potential(subpotential, subpotential_dict)
potential_dict['Subpotentials'].append(subpotential_dict)
if potential.find('Coefficients') is not None:
potential_dict['Coefficients'] = potential.find('Coefficients').text
if potential.find('CovarianceMatrix') is not None:
potential_dict['CovarianceMatrix'] = potential.find('CovarianceMatrix').text
if potential.find('Potential') is not None:
potential_dict['Potential'] = {}
self.add_potential(potential.find('Potential'), potential_dict['Potential'])
if len(potential.findall('NumericVariables/Variable')):
potential_dict['NumericVariables'] = []
for variable in potential.findall('NumericVariables/Variable'):
potential_dict['NumericVariables'].append(variable.attrib['name'])
def get_model(self):
"""
Returns the model instance of the ProbModel.
Return
---------------
model: an instance of BayesianModel.
Examples
-------
>>> reader = ProbModelXMLReader()
>>> reader.get_model()
"""
if self.probnet.get('type') == "BayesianNetwork":
model = BayesianModel(self.probnet['edges'].keys())
tabular_cpds = []
cpds = self.probnet['Potentials']
for cpd in cpds:
var = list(cpd['Variables'].keys())[0]
states = self.probnet['Variables'][var]['States']
evidence = cpd['Variables'][var]
evidence_card = [len(self.probnet['Variables'][evidence_var]['States'])
for evidence_var in evidence]
arr = list(map(float, cpd['Values'].split()))
values = np.array(arr)
values = values.reshape((len(states), values.size//len(states)))
tabular_cpds.append(TabularCPD(var, len(states), values, evidence, evidence_card))
model.add_cpds(*tabular_cpds)
variables = model.nodes()
for var in variables:
for prop_name, prop_value in self.probnet['Variables'][var].items():
model.node[var][prop_name] = prop_value
edges = model.edges()
for edge in edges:
for prop_name, prop_value in self.probnet['edges'][edge].items():
model.edge[edge[0]][edge[1]][prop_name] = prop_value
return model
else:
raise ValueError("Please specify only Bayesian Network.")
| 41.730443 | 120 | 0.542416 |
c1fce18938937dc80022b82a7bac16f319dfdbc5 | 18,283 | py | Python | pytorch3dunet/unet3d/trainer.py | vivian-wong/AM_defects_3dUNet | 91c3e42037e6af290b303b48c3d951c10832b2ab | [
"MIT"
] | null | null | null | pytorch3dunet/unet3d/trainer.py | vivian-wong/AM_defects_3dUNet | 91c3e42037e6af290b303b48c3d951c10832b2ab | [
"MIT"
] | null | null | null | pytorch3dunet/unet3d/trainer.py | vivian-wong/AM_defects_3dUNet | 91c3e42037e6af290b303b48c3d951c10832b2ab | [
"MIT"
] | null | null | null | import os
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from torch.optim.lr_scheduler import ReduceLROnPlateau
from pytorch3dunet.unet3d.utils import get_logger
from . import utils
logger = get_logger('UNet3DTrainer')
class UNet3DTrainer:
"""3D UNet trainer.
Args:
model (Unet3D): UNet 3D model to be trained
optimizer (nn.optim.Optimizer): optimizer used for training
lr_scheduler (torch.optim.lr_scheduler._LRScheduler): learning rate scheduler
WARN: bear in mind that lr_scheduler.step() is invoked after every validation step
(i.e. validate_after_iters) not after every epoch. So e.g. if one uses StepLR with step_size=30
the learning rate will be adjusted after every 30 * validate_after_iters iterations.
loss_criterion (callable): loss function
eval_criterion (callable): used to compute training/validation metric (such as Dice, IoU, AP or Rand score)
saving the best checkpoint is based on the result of this function on the validation set
device (torch.device): device to train on
loaders (dict): 'train' and 'val' loaders
checkpoint_dir (string): dir for saving checkpoints and tensorboard logs
max_num_epochs (int): maximum number of epochs
max_num_iterations (int): maximum number of iterations
validate_after_iters (int): validate after that many iterations
log_after_iters (int): number of iterations before logging to tensorboard
validate_iters (int): number of validation iterations, if None validate
on the whole validation set
eval_score_higher_is_better (bool): if True higher eval scores are considered better
best_eval_score (float): best validation score so far (higher better)
num_iterations (int): useful when loading the model from the checkpoint
num_epoch (int): useful when loading the model from the checkpoint
tensorboard_formatter (callable): converts a given batch of input/output/target image to a series of images
that can be displayed in tensorboard
skip_train_validation (bool): if True eval_criterion is not evaluated on the training set (used mostly when
evaluation is expensive)
"""
def __init__(self, model, optimizer, lr_scheduler, loss_criterion,
eval_criterion, device, loaders, checkpoint_dir,
max_num_epochs=100, max_num_iterations=1e5,
validate_after_iters=100, log_after_iters=100,
validate_iters=None, num_iterations=1, num_epoch=0,
eval_score_higher_is_better=True, best_eval_score=None,
tensorboard_formatter=None, skip_train_validation=False):
self.model = model
self.optimizer = optimizer
self.scheduler = lr_scheduler
self.loss_criterion = loss_criterion
self.eval_criterion = eval_criterion
self.device = device
self.loaders = loaders
self.checkpoint_dir = checkpoint_dir
self.max_num_epochs = max_num_epochs
self.max_num_iterations = max_num_iterations
self.validate_after_iters = validate_after_iters
self.log_after_iters = log_after_iters
self.validate_iters = validate_iters
self.eval_score_higher_is_better = eval_score_higher_is_better
logger.info(model)
logger.info(f'eval_score_higher_is_better: {eval_score_higher_is_better}')
if best_eval_score is not None:
self.best_eval_score = best_eval_score
else:
# initialize the best_eval_score
if eval_score_higher_is_better:
self.best_eval_score = float('-inf')
else:
self.best_eval_score = float('+inf')
self.writer = SummaryWriter(log_dir=os.path.join(checkpoint_dir, 'logs'))
assert tensorboard_formatter is not None, 'TensorboardFormatter must be provided'
self.tensorboard_formatter = tensorboard_formatter
self.num_iterations = num_iterations
self.num_epoch = num_epoch
self.skip_train_validation = skip_train_validation
@classmethod
def from_checkpoint(cls, checkpoint_path, model, optimizer, lr_scheduler, loss_criterion, eval_criterion, loaders,
tensorboard_formatter=None, skip_train_validation=False):
logger.info(f"Loading checkpoint '{checkpoint_path}'...")
state = utils.load_checkpoint(checkpoint_path, model, optimizer)
logger.info(
f"Checkpoint loaded. Epoch: {state['epoch']}. Best val score: {state['best_eval_score']}. Num_iterations: {state['num_iterations']}")
checkpoint_dir = os.path.split(checkpoint_path)[0]
return cls(model, optimizer, lr_scheduler,
loss_criterion, eval_criterion,
torch.device(state['device']),
loaders, checkpoint_dir,
eval_score_higher_is_better=state['eval_score_higher_is_better'],
best_eval_score=state['best_eval_score'],
num_iterations=state['num_iterations'],
num_epoch=state['epoch'],
max_num_epochs=state['max_num_epochs'],
max_num_iterations=state['max_num_iterations'],
validate_after_iters=state['validate_after_iters'],
log_after_iters=state['log_after_iters'],
validate_iters=state['validate_iters'],
tensorboard_formatter=tensorboard_formatter,
skip_train_validation=skip_train_validation)
@classmethod
def from_pretrained(cls, pre_trained, model, optimizer, lr_scheduler, loss_criterion, eval_criterion,
device, loaders,
max_num_epochs=100, max_num_iterations=1e5,
validate_after_iters=100, log_after_iters=100,
validate_iters=None, num_iterations=1, num_epoch=0,
eval_score_higher_is_better=True, best_eval_score=None,
tensorboard_formatter=None, skip_train_validation=False):
logger.info(f"Logging pre-trained model from '{pre_trained}'...")
utils.load_checkpoint(pre_trained, model, None)
checkpoint_dir = os.path.split(pre_trained)[0]
return cls(model, optimizer, lr_scheduler,
loss_criterion, eval_criterion,
device, loaders, checkpoint_dir,
eval_score_higher_is_better=eval_score_higher_is_better,
best_eval_score=best_eval_score,
num_iterations=num_iterations,
num_epoch=num_epoch,
max_num_epochs=max_num_epochs,
max_num_iterations=max_num_iterations,
validate_after_iters=validate_after_iters,
log_after_iters=log_after_iters,
validate_iters=validate_iters,
tensorboard_formatter=tensorboard_formatter,
skip_train_validation=skip_train_validation)
def fit(self):
for _ in range(self.num_epoch, self.max_num_epochs):
# train for one epoch
should_terminate = self.train(self.loaders['train'])
if should_terminate:
logger.info('Stopping criterion is satisfied. Finishing training')
return
self.num_epoch += 1
logger.info(f"Reached maximum number of epochs: {self.max_num_epochs}. Finishing training...")
def train(self, train_loader):
"""Trains the model for 1 epoch.
Args:
train_loader (torch.utils.data.DataLoader): training data loader
Returns:
True if the training should be terminated immediately, False otherwise
"""
train_losses = utils.RunningAverage()
train_eval_scores = utils.RunningAverage()
# sets the model in training mode
self.model.train()
for i, t in enumerate(train_loader):
logger.info(
f'Training iteration {self.num_iterations}. Batch {i}. Epoch [{self.num_epoch}/{self.max_num_epochs - 1}]')
input, target, weight = self._split_training_batch(t)
output, loss = self._forward_pass(input, target, weight)
train_losses.update(loss.item(), self._batch_size(input))
# compute gradients and update parameters
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if self.num_iterations % self.validate_after_iters == 0:
# set the model in eval mode
self.model.eval()
# evaluate on validation set
eval_score = self.validate(self.loaders['val'])
# set the model back to training mode
self.model.train()
# adjust learning rate if necessary
if isinstance(self.scheduler, ReduceLROnPlateau):
self.scheduler.step(eval_score)
else:
self.scheduler.step()
# log current learning rate in tensorboard
self._log_lr()
# remember best validation metric
is_best = self._is_best_eval_score(eval_score)
# save checkpoint
self._save_checkpoint(is_best)
if self.num_iterations % self.log_after_iters == 0:
# if model contains final_activation layer for normalizing logits apply it, otherwise both
# the evaluation metric as well as images in tensorboard will be incorrectly computed
if hasattr(self.model, 'final_activation') and self.model.final_activation is not None:
output = self.model.final_activation(output)
# compute eval criterion
if not self.skip_train_validation:
eval_score = self.eval_criterion(output, target)
train_eval_scores.update(eval_score.item(), self._batch_size(input))
# log stats, params and images
logger.info(
f'Training stats. Loss: {train_losses.avg}. Evaluation score: {train_eval_scores.avg}')
self._log_stats('train', train_losses.avg, train_eval_scores.avg)
self._log_params()
self._log_images(input, target, output, 'train_')
if self.should_stop():
return True
self.num_iterations += 1
return False
def should_stop(self):
"""
Training will terminate if maximum number of iterations is exceeded or the learning rate drops below
some predefined threshold (1e-6 in our case)
"""
if self.max_num_iterations < self.num_iterations:
logger.info(f'Maximum number of iterations {self.max_num_iterations} exceeded.')
return True
min_lr = 1e-6
lr = self.optimizer.param_groups[0]['lr']
if lr < min_lr:
logger.info(f'Learning rate below the minimum {min_lr}.')
return True
return False
def validate(self, val_loader):
logger.info('Validating...')
val_losses = utils.RunningAverage()
val_scores = utils.RunningAverage()
with torch.no_grad():
for i, t in enumerate(val_loader):
logger.info(f'Validation iteration {i}')
input, target, weight = self._split_training_batch(t)
output, loss = self._forward_pass(input, target, weight)
val_losses.update(loss.item(), self._batch_size(input))
# if model contains final_activation layer for normalizing logits apply it, otherwise
# the evaluation metric will be incorrectly computed
if hasattr(self.model, 'final_activation') and self.model.final_activation is not None:
output = self.model.final_activation(output)
if i % 100 == 0:
self._log_images(input, target, output, 'val_')
eval_score = self.eval_criterion(output, target)
val_scores.update(eval_score.item(), self._batch_size(input))
if self.validate_iters is not None and self.validate_iters <= i:
# stop validation
break
self._log_stats('val', val_losses.avg, val_scores.avg)
logger.info(f'Validation finished. Loss: {val_losses.avg}. Evaluation score: {val_scores.avg}')
return val_scores.avg
def _split_training_batch(self, t):
def _move_to_device(input):
if isinstance(input, tuple) or isinstance(input, list):
return tuple([_move_to_device(x) for x in input])
else:
return input.to(self.device)
t = _move_to_device(t)
weight = None
if len(t) == 2:
input, target = t
else:
input, target, weight = t
return input, target, weight
def _forward_pass(self, input, target, weight=None):
# forward pass
output = self.model(input)
# compute the loss
if weight is None:
loss = self.loss_criterion(output, target)
else:
loss = self.loss_criterion(output, target, weight)
return output, loss
def _is_best_eval_score(self, eval_score):
if self.eval_score_higher_is_better:
is_best = eval_score > self.best_eval_score
else:
is_best = eval_score < self.best_eval_score
if is_best:
logger.info(f'Saving new best evaluation metric: {eval_score}')
self.best_eval_score = eval_score
return is_best
def _save_checkpoint(self, is_best):
# remove `module` prefix from layer names when using `nn.DataParallel`
# see: https://discuss.pytorch.org/t/solved-keyerror-unexpected-key-module-encoder-embedding-weight-in-state-dict/1686/20
if isinstance(self.model, nn.DataParallel):
state_dict = self.model.module.state_dict()
else:
state_dict = self.model.state_dict()
utils.save_checkpoint({
'epoch': self.num_epoch + 1,
'num_iterations': self.num_iterations,
'model_state_dict': state_dict,
'best_eval_score': self.best_eval_score,
'eval_score_higher_is_better': self.eval_score_higher_is_better,
'optimizer_state_dict': self.optimizer.state_dict(),
'device': str(self.device),
'max_num_epochs': self.max_num_epochs,
'max_num_iterations': self.max_num_iterations,
'validate_after_iters': self.validate_after_iters,
'log_after_iters': self.log_after_iters,
'validate_iters': self.validate_iters
}, is_best, checkpoint_dir=self.checkpoint_dir,
logger=logger)
def _log_lr(self):
lr = self.optimizer.param_groups[0]['lr']
self.writer.add_scalar('learning_rate', lr, self.num_iterations)
def _log_stats(self, phase, loss_avg, eval_score_avg):
tag_value = {
f'{phase}_loss_avg': loss_avg,
f'{phase}_eval_score_avg': eval_score_avg
}
for tag, value in tag_value.items():
self.writer.add_scalar(tag, value, self.num_iterations)
def _log_params(self):
logger.info('Logging model parameters and gradients')
for name, value in self.model.named_parameters():
self.writer.add_histogram(name, value.data.cpu().numpy(), self.num_iterations)
self.writer.add_histogram(name + '/grad', value.grad.data.cpu().numpy(), self.num_iterations)
def _log_images(self, input, target, prediction, prefix=''):
inputs_map = {
'inputs': input,
'targets': target,
'predictions': prediction
}
img_sources = {}
for name, batch in inputs_map.items():
if isinstance(batch, list) or isinstance(batch, tuple):
for i, b in enumerate(batch):
img_sources[f'{name}{i}'] = b.data.cpu().numpy()
else:
img_sources[name] = batch.data.cpu().numpy()
for name, batch in img_sources.items():
<<<<<<< HEAD:unet3d/trainer.py
for tag, image in self._images_from_batch(name, batch):
self.writer.add_image(tag, image, self.num_iterations, dataformats='HW')
def _images_from_batch(self, name, batch):
tag_template = '{}/batch_{}/channel_{}/slice_{}'
tagged_images = []
if batch.ndim == 5:
# NCDHW
slice_idx = batch.shape[2] // 2 # get the middle slice
for batch_idx in range(batch.shape[0]):
for channel_idx in range(batch.shape[1]):
tag = tag_template.format(name, batch_idx, channel_idx, slice_idx)
img = batch[batch_idx, channel_idx, slice_idx, ...]
tagged_images.append((tag, self._normalize_img(img)))
else:
# batch has no channel dim: NDHW
slice_idx = batch.shape[1] // 2 # get the middle slice
for batch_idx in range(batch.shape[0]):
tag = tag_template.format(name, batch_idx, 0, slice_idx)
img = batch[batch_idx, slice_idx, ...]
tagged_images.append((tag, self._normalize_img(img)))
return tagged_images
@staticmethod
def _normalize_img(img):
import sys
return (img - np.min(img)) / (np.ptp(img)+sys.float_info.epsilon)
=======
for tag, image in self.tensorboard_formatter(name, batch):
self.writer.add_image(prefix + tag, image, self.num_iterations, dataformats='CHW')
>>>>>>> 62e10674c5cf9f44e252297203213b8c7f23c5f7:pytorch3dunet/unet3d/trainer.py
@staticmethod
def _batch_size(input):
if isinstance(input, list) or isinstance(input, tuple):
return input[0].size(0)
else:
return input.size(0)
| 43.739234 | 145 | 0.625116 |
9f7db5c73811c2f7b40332db5eba619d1de3231f | 1,060 | py | Python | smart_selects/views.py | DjangoAdminHackers/django-smart-selects | 50413ce7c7905fe3bdcda45308c23dc28dccab5b | [
"BSD-3-Clause"
] | 1 | 2017-07-12T07:51:29.000Z | 2017-07-12T07:51:29.000Z | smart_selects/views.py | DjangoAdminHackers/django-smart-selects | 50413ce7c7905fe3bdcda45308c23dc28dccab5b | [
"BSD-3-Clause"
] | null | null | null | smart_selects/views.py | DjangoAdminHackers/django-smart-selects | 50413ce7c7905fe3bdcda45308c23dc28dccab5b | [
"BSD-3-Clause"
] | null | null | null | from json import dumps
from django.http import HttpResponse
from .utils import get_filterchain_kwargs, get_filterchain_queryset, \
render_filterchain_choices
def filterchain(request, app, model, field, value, manager=None):
filter_kwargs = get_filterchain_kwargs(field, value)
filter_queryset = get_filterchain_queryset(app, model, manager)
rendered_choices = render_filterchain_choices(filter_queryset.filter(**filter_kwargs))
return HttpResponse(dumps(rendered_choices), content_type='application/json')
def filterchain_all(request, app, model, field, value, manager=None):
filter_kwargs = get_filterchain_kwargs(field, value)
filter_queryset = get_filterchain_queryset(app, model, manager)
rendered_choices = render_filterchain_choices(filter_queryset.filter(**filter_kwargs))
rendered_choices.append({'value': "", 'display': "---------"})
rendered_choices += render_filterchain_choices(filter_queryset.exclude(**filter_kwargs))
return HttpResponse(dumps(rendered_choices), content_type='application/json')
| 46.086957 | 92 | 0.787736 |
63e0b79fb3fa2098d13d000dccdf5fced1f45078 | 3,234 | py | Python | LRUCache/cache.py | RomaA2000/SDCourse | 7e08686f6769c5fa309651396f465be049f06933 | [
"MIT"
] | null | null | null | LRUCache/cache.py | RomaA2000/SDCourse | 7e08686f6769c5fa309651396f465be049f06933 | [
"MIT"
] | null | null | null | LRUCache/cache.py | RomaA2000/SDCourse | 7e08686f6769c5fa309651396f465be049f06933 | [
"MIT"
] | null | null | null | import functools
from node import Node
class LRUCache:
def __init__(self, size_limit):
self.size_limit = size_limit
self.tail = None
self.head = None
self.dict = dict()
def _add_to_head(self, data):
if self.head is None:
self.tail = Node(data=data)
self.head = self.tail
assert self.head is self.tail, "one element list"
else:
self.head = self.head.insert_before(data)
def _remove_from_tail(self):
if self.tail is not None:
if self.head is self.tail:
self.tail.remove()
self.head = None
self.tail = None
else:
new_tail = self.tail.prev
self.tail.remove()
self.tail = new_tail
def _add_key_value(self, key, value):
assert key not in self.dict, "key should not be in dict"
self._add_to_head((key, value))
assert self.head.data == (key, value)
self.dict[key] = self.head
assert key in self.dict
def __getitem__(self, key):
assert key in self.dict, "key should be in dict"
self.move_to_front(key)
return self.dict[key].data[1]
def move_to_front(self, key):
assert key in self.dict, "key should be added before function call"
node = self.dict[key]
key, value = node.data
node.remove()
self.dict.pop(key)
self._add_key_value(key, value)
assert key in self.dict, "key should be added after function call"
assert self.dict[key].data == (key, value), "data in dict should be equal to data at start"
assert self.head.data == (key, value), "data should be in head after function call"
def __setitem__(self, key, value):
if key in self.dict:
self.dict[key].data = (key, value)
self.move_to_front(key)
else:
if len(self.dict) > self.size_limit:
self._remove_from_tail()
self._add_key_value(key, value)
assert self.head.data == (key, value), "data in head node should be equal to data passed in function"
assert self.dict[key].data == (key, value), "data in dict should be equal to data passed in function"
def __contains__(self, key):
return key in self.dict
def __str__(self):
result = "{"
list_iter = self.head
while list_iter is not None:
result += " " + str(list_iter.data) + " "
assert list_iter.data[0] in self.dict, "keys in all nodes should be in dict"
assert list_iter == self.dict[list_iter.data[0]], "data in all nodes should be equal to data in dict"
list_iter = list_iter.next
result += "}"
return result
def cache(max_size):
def wraps(func):
cacher = LRUCache(max_size)
@functools.wraps(func)
def wrapper(*args, **kwargs):
key = str(args) + str(kwargs)
if key in cacher:
result = cacher[key]
else:
result = func(*args, **kwargs)
cacher[key] = result
return result
return wrapper
return wraps
| 32.666667 | 113 | 0.571119 |
493efee32190394a71a2492081a13033bc6ea342 | 28,504 | py | Python | tools/spelling/check_spelling_pedantic.py | dcillera/envoy | cb54ba8eec26f768f8c1ae412113b07bacde7321 | [
"Apache-2.0"
] | 17,703 | 2017-09-14T18:23:43.000Z | 2022-03-31T22:04:17.000Z | tools/spelling/check_spelling_pedantic.py | dcillera/envoy | cb54ba8eec26f768f8c1ae412113b07bacde7321 | [
"Apache-2.0"
] | 15,957 | 2017-09-14T16:38:22.000Z | 2022-03-31T23:56:30.000Z | tools/spelling/check_spelling_pedantic.py | dcillera/envoy | cb54ba8eec26f768f8c1ae412113b07bacde7321 | [
"Apache-2.0"
] | 3,780 | 2017-09-14T18:58:47.000Z | 2022-03-31T17:10:47.000Z | #! /usr/bin/env python3
from __future__ import print_function
import argparse
import locale
import math
import os
import re
import subprocess
import sys
from functools import partial
from itertools import chain
# Handle function rename between python 2/3.
try:
input = raw_input
except NameError:
pass
try:
cmp
except NameError:
def cmp(x, y):
return (x > y) - (x < y)
CURR_DIR = os.path.dirname(os.path.realpath(__file__))
# Special comment commands control behavior. These may appear anywhere
# within a comment, but only one per line. The command applies to the
# entire line on which it appears. The "off" command disables spell
# checking until the next "on" command, or end-of-file. The
# "skip-file" command disables spell checking in the entire file (even
# previous comments). In a multi-line (/* */) comment, "skip-block"
# disables spell checking for the remainder of the comment. For
# sequences of full-line comments (only white space before a //
# comment), "skip-block" disables spell checking the sequence of
# comments is interrupted by a blank line or a line with code.
SPELLCHECK_OFF = "SPELLCHECKER(off)" # disable SPELLCHECK_ON (or EOF)
SPELLCHECK_ON = "SPELLCHECKER(on)" # (re-)enable
SPELLCHECK_SKIP_FILE = "SPELLCHECKER(skip-file)" # disable checking this entire file
SPELLCHECK_SKIP_BLOCK = "SPELLCHECKER(skip-block)" # disable to end of comment
# Single line comments: // comment OR /* comment */
# Limit the characters that may precede // to help filter out some code
# mistakenly processed as a comment.
INLINE_COMMENT = re.compile(r'(?:^|[^:"])//( .*?$|$)|/\*+(.*?)\*+/')
# Multi-line comments: /* comment */ (multiple lines)
MULTI_COMMENT_START = re.compile(r'/\*(.*?)$')
MULTI_COMMENT_END = re.compile(r'^(.*?)\*/')
# Envoy TODO comment style.
TODO = re.compile(r'(TODO|NOTE)\s*\(@?[A-Za-z0-9-]+\):?')
# Ignore parameter names in doxygen comments.
METHOD_DOC = re.compile('@(param\s+\w+|return(\s+const)?\s+\w+)')
# Camel Case splitter
CAMEL_CASE = re.compile(r'[A-Z]?[a-z]+|[A-Z]+(?=[A-Z]|$)')
# Base64: we assume base64 encoded data in tests is never mixed with
# other comments on a single line.
BASE64 = re.compile(r'^[\s*]+([A-Za-z0-9/+=]{16,})\s*$')
NUMBER = re.compile(r'\d')
# Hex: match 1) longish strings of hex digits (to avoid matching "add" and
# other simple words that happen to look like hex), 2) 2 or more two digit
# hex numbers separated by colons, 3) "0x" prefixed hex numbers of any length,
# or 4) UUIDs.
HEX = re.compile(r'(?:^|\s|[(])([A-Fa-f0-9]{8,})(?:$|\s|[.,)])')
HEX_SIG = re.compile(r'(?:\W|^)([A-Fa-f0-9]{2}(:[A-Fa-f0-9]{2})+)(?:\W|$)')
PREFIXED_HEX = re.compile(r'0x[A-Fa-f0-9]+')
UUID = re.compile(r'[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}')
BIT_FIELDS = re.compile(r'[01]+[XxYy]+')
AB_FIELDS = re.compile(r'\W([AB]+)\W')
# Matches e.g. FC00::/8 or 2001::abcd/64. Does not match ::1/128, but
# aspell ignores that anyway.
IPV6_ADDR = re.compile(r'(?:\W|^)([A-Fa-f0-9]+:[A-Fa-f0-9:]+/[0-9]{1,3})(?:\W|$)')
# Quoted words: "word", 'word', or *word*.
QUOTED_WORD = re.compile(r'((["\'])[A-Za-z0-9.:-]+(\2))|(\*[A-Za-z0-9.:-]+\*)')
# Backtick-quoted words that look like code. Note the overlap with RST_LINK.
QUOTED_EXPR = re.compile(r'`[A-Za-z0-9:()<>_.,/{}\[\]&*-]+`')
# Tuple expressions like (abc, def).
TUPLE_EXPR = re.compile(r'\([A-Za-z0-9]+(?:, *[A-Za-z0-9]+){1,}\)')
# Command flags (e.g. "-rf") and percent specifiers.
FLAG = re.compile(r'\W([-%][A-Za-z]+)')
# Bare github users (e.g. @user).
USER = re.compile(r'\W(@[A-Za-z0-9-]+)')
# RST Links (e.g. `text <https://example.com>`_, :ref:`text <internal_ref>`)
RST_LINK = re.compile(r'`([^`<])+<([^ ]+)>`')
# RST inline literals.
RST_LITERAL = re.compile(r'``.*``')
# RST code block marker.
RST_CODE_BLOCK = '.. code-block::'
# Path names.
ABSPATH = re.compile(r'(?:\s|^)((/[A-Za-z0-9_.*-]+)+)(?:\s|$)')
FILEREF = re.compile(r'(?:\s|^)([A-Za-z0-9_./-]+\.(cc|h|py|sh))(?:\s|$)')
# Ordinals (1st, 2nd, 3rd, 4th, ...)
ORDINALS = re.compile(r'([0-9]*1st|[0-9]*2nd|[0-9]*3rd|[0-9]+th)')
# Start of string indent.
INDENT = re.compile(r'^( *)')
SMART_QUOTES = {
"\u2018": "'",
"\u2019": "'",
"\u201c": '"',
"\u201d": '"',
}
# Valid dictionary words. Anything else crashes aspell.
DICTIONARY_WORD = re.compile(r"^[A-Za-z']+$")
DEBUG = 0
COLOR = True
MARK = False
def red(s):
if COLOR:
return "\33[1;31m" + s + "\033[0m"
return s
def debug(s):
if DEBUG > 0:
print(s)
def debug1(s):
if DEBUG > 1:
print(s)
class SpellChecker:
"""Aspell-based spell checker."""
def __init__(self, dictionary_file):
self.dictionary_file = dictionary_file
self.aspell = None
self.prefixes = []
self.suffixes = []
self.prefix_re = None
self.suffix_re = None
def start(self):
words, prefixes, suffixes = self.load_dictionary()
self.prefixes = prefixes
self.suffixes = suffixes
self.prefix_re = re.compile("(?:\s|^)((%s)-)" % ("|".join(prefixes)), re.IGNORECASE)
self.suffix_re = re.compile("(-(%s))(?:\s|$)" % ("|".join(suffixes)), re.IGNORECASE)
# Generate aspell personal dictionary.
pws = os.path.join(CURR_DIR, '.aspell.en.pws')
with open(pws, 'w') as f:
f.write("personal_ws-1.1 en %d\n" % (len(words)))
f.writelines(words)
# Start an aspell process.
aspell_args = ["aspell", "pipe", "--lang=en_US", "--encoding=utf-8", "--personal=" + pws]
self.aspell = subprocess.Popen(
aspell_args,
bufsize=4096,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
# Read the version line that aspell emits on startup.
self.aspell.stdout.readline()
def stop(self):
if not self.aspell:
return
self.aspell.stdin.close()
self.aspell.wait()
self.aspell = None
def check(self, line):
if line.strip() == '':
return []
self.aspell.poll()
if self.aspell.returncode is not None:
print("aspell quit unexpectedly: return code %d" % (self.aspell.returncode))
sys.exit(2)
debug1("ASPELL< %s" % (line))
self.aspell.stdin.write(line + os.linesep)
self.aspell.stdin.flush()
errors = []
while True:
result = self.aspell.stdout.readline().strip()
debug1("ASPELL> %s" % (result))
# Check for end of results.
if result == "":
break
t = result[0]
if t == "*" or t == "-" or t == "+":
# *: found in dictionary.
# -: found run-together words in dictionary.
# +: found root word in dictionary.
continue
# & <original> <N> <offset>: m1, m2, ... mN, g1, g2, ...
# ? <original> 0 <offset>: g1, g2, ....
# # <original> <offset>
original, rem = result[2:].split(" ", 1)
if t == "#":
# Not in dictionary, but no suggestions.
errors.append((original, int(rem), []))
elif t == '&' or t == '?':
# Near misses and/or guesses.
_, rem = rem.split(" ", 1) # Drop N (may be 0).
o, rem = rem.split(": ", 1) # o is offset from start of line.
suggestions = rem.split(", ")
errors.append((original, int(o), suggestions))
else:
print("aspell produced unexpected output: %s" % (result))
sys.exit(2)
return errors
def load_dictionary(self):
# Read the custom dictionary.
all_words = []
with open(self.dictionary_file, 'r') as f:
all_words = f.readlines()
# Strip comments, invalid words, and blank lines.
words = [w for w in all_words if len(w.strip()) > 0 and re.match(DICTIONARY_WORD, w)]
suffixes = [w.strip()[1:] for w in all_words if w.startswith('-')]
prefixes = [w.strip()[:-1] for w in all_words if w.strip().endswith('-')]
# Allow acronyms and abbreviations to be spelled in lowercase.
# (e.g. Convert "HTTP" into "HTTP" and "http" which also matches
# "Http").
for word in words:
if word.isupper():
words += word.lower()
return (words, prefixes, suffixes)
def add_words(self, additions):
lines = []
with open(self.dictionary_file, 'r') as f:
lines = f.readlines()
additions = [w + os.linesep for w in additions]
additions.sort()
# Insert additions into the lines ignoring comments, suffixes, and blank lines.
idx = 0
add_idx = 0
while idx < len(lines) and add_idx < len(additions):
line = lines[idx]
if len(line.strip()) != 0 and line[0] != "#" and line[0] != '-':
c = cmp(additions[add_idx], line)
if c < 0:
lines.insert(idx, additions[add_idx])
add_idx += 1
elif c == 0:
add_idx += 1
idx += 1
# Append any remaining additions.
lines += additions[add_idx:]
with open(self.dictionary_file, 'w') as f:
f.writelines(lines)
self.stop()
self.start()
# Split camel case words and run them through the dictionary. Returns
# a replacement list of errors. The replacement list may contain just
# the original error (if the word is not camel case), may be empty if
# the split words are all spelled correctly, or may be a new set of
# errors referencing the misspelled sub-words.
def check_camel_case(checker, err):
(word, word_offset, _) = err
debug("check camel case %s" % (word))
parts = re.findall(CAMEL_CASE, word)
# Word is not camel case: the previous result stands.
if len(parts) <= 1:
debug(" -> not camel case")
return [err]
split_errs = []
part_offset = 0
for part in parts:
debug(" -> part: %s" % (part))
split_err = checker.check(part)
if split_err:
debug(" -> not found in dictionary")
split_errs += [(part, word_offset + part_offset, split_err[0][2])]
part_offset += len(part)
return split_errs
# Check for affixes and run them through the dictionary again. Returns
# a replacement list of errors which may just be the original errors
# or empty if an affix was successfully handled.
def check_affix(checker, err):
(word, word_offset, _) = err
debug("check affix %s" % (word))
for prefix in checker.prefixes:
debug(" -> try %s" % (prefix))
if word.lower().startswith(prefix.lower()):
root = word[len(prefix):]
if root != '':
debug(" -> check %s" % (root))
root_err = checker.check(root)
if not root_err:
debug(" -> ok")
return []
for suffix in checker.suffixes:
if word.lower().endswith(suffix.lower()):
root = word[:-len(suffix)]
if root != '':
debug(" -> try %s" % (root))
root_err = checker.check(root)
if not root_err:
debug(" -> ok")
return []
return [err]
# Find occurrences of the regex within comment and replace the numbered
# matching group with spaces. If secondary is defined, the matching
# group must also match secondary to be masked.
def mask_with_regex(comment, regex, group, secondary=None):
found = False
for m in regex.finditer(comment):
if secondary and secondary.search(m.group(group)) is None:
continue
start = m.start(group)
end = m.end(group)
comment = comment[:start] + (' ' * (end - start)) + comment[end:]
found = True
return (comment, found)
# Checks the comment at offset against the spell checker. Result is an array
# of tuples where each tuple is the misspelled word, it's offset from the
# start of the line, and an array of possible replacements.
def check_comment(checker, offset, comment):
# Strip smart quotes which cause problems sometimes.
for sq, q in SMART_QUOTES.items():
comment = comment.replace(sq, q)
# Replace TODO comments with spaces to preserve string offsets.
comment, _ = mask_with_regex(comment, TODO, 0)
# Ignore @param varname
comment, _ = mask_with_regex(comment, METHOD_DOC, 0)
# Similarly, look for base64 sequences, but they must have at least one
# digit.
comment, _ = mask_with_regex(comment, BASE64, 1, NUMBER)
# Various hex constants:
comment, _ = mask_with_regex(comment, HEX, 1)
comment, _ = mask_with_regex(comment, HEX_SIG, 1)
comment, _ = mask_with_regex(comment, PREFIXED_HEX, 0)
comment, _ = mask_with_regex(comment, BIT_FIELDS, 0)
comment, _ = mask_with_regex(comment, AB_FIELDS, 1)
comment, _ = mask_with_regex(comment, UUID, 0)
comment, _ = mask_with_regex(comment, IPV6_ADDR, 1)
# Single words in quotes:
comment, _ = mask_with_regex(comment, QUOTED_WORD, 0)
# RST inline literals:
comment, _ = mask_with_regex(comment, RST_LITERAL, 0)
# Mask the reference part of an RST link (but not the link text). Otherwise, check for a quoted
# code-like expression (which would mask the link text if not guarded).
comment, found = mask_with_regex(comment, RST_LINK, 0)
if not found:
comment, _ = mask_with_regex(comment, QUOTED_EXPR, 0)
comment, _ = mask_with_regex(comment, TUPLE_EXPR, 0)
# Command flags:
comment, _ = mask_with_regex(comment, FLAG, 1)
# Github user refs:
comment, _ = mask_with_regex(comment, USER, 1)
# Absolutew paths and references to source files.
comment, _ = mask_with_regex(comment, ABSPATH, 1)
comment, _ = mask_with_regex(comment, FILEREF, 1)
# Ordinals (1st, 2nd...)
comment, _ = mask_with_regex(comment, ORDINALS, 0)
if checker.prefix_re is not None:
comment, _ = mask_with_regex(comment, checker.prefix_re, 1)
if checker.suffix_re is not None:
comment, _ = mask_with_regex(comment, checker.suffix_re, 1)
# Everything got masked, return early.
if comment == "" or comment.strip() == "":
return []
# Mask leading punctuation.
if not comment[0].isalnum():
comment = ' ' + comment[1:]
errors = checker.check(comment)
# Fix up offsets relative to the start of the line vs start of the comment.
errors = [(w, o + offset, s) for (w, o, s) in errors]
# CamelCase words get split and re-checked
errors = [*chain.from_iterable(map(lambda err: check_camel_case(checker, err), errors))]
errors = [*chain.from_iterable(map(lambda err: check_affix(checker, err), errors))]
return errors
def print_error(file, line_offset, lines, errors):
# Highlight misspelled words.
line = lines[line_offset]
prefix = "%s:%d:" % (file, line_offset + 1)
for (word, offset, suggestions) in reversed(errors):
line = line[:offset] + red(word) + line[offset + len(word):]
print("%s%s" % (prefix, line.rstrip()))
if MARK:
# Print a caret at the start of each misspelled word.
marks = ' ' * len(prefix)
last = 0
for (word, offset, suggestions) in errors:
marks += (' ' * (offset - last)) + '^'
last = offset + 1
print(marks)
def print_fix_options(word, suggestions):
print("%s:" % (word))
print(" a: accept and add to dictionary")
print(" A: accept and add to dictionary as ALLCAPS (for acronyms)")
print(" f <word>: replace with the given word without modifying dictionary")
print(" i: ignore")
print(" r <word>: replace with given word and add to dictionary")
print(" R <word>: replace with given word and add to dictionary as ALLCAPS (for acronyms)")
print(" x: abort")
if not suggestions:
return
col_width = max(len(word) for word in suggestions)
opt_width = int(math.log(len(suggestions), 10)) + 1
padding = 2 # Two spaces of padding.
delim = 2 # Colon and space after number.
num_cols = int(78 / (col_width + padding + opt_width + delim))
num_rows = int(len(suggestions) / num_cols + 1)
rows = [""] * num_rows
indent = " " * padding
for idx, sugg in enumerate(suggestions):
row = idx % len(rows)
row_data = "%d: %s" % (idx, sugg)
rows[row] += indent + row_data.ljust(col_width + opt_width + delim)
for row in rows:
print(row)
def fix_error(checker, file, line_offset, lines, errors):
print_error(file, line_offset, lines, errors)
fixed = {}
replacements = []
additions = []
for (word, offset, suggestions) in errors:
if word in fixed:
# Same typo was repeated in a line, so just reuse the previous choice.
replacements += [fixed[word]]
continue
print_fix_options(word, suggestions)
replacement = ""
while replacement == "":
try:
choice = input("> ")
except EOFError:
choice = "x"
add = None
if choice == "x":
print("Spell checking aborted.")
sys.exit(2)
elif choice == "a":
replacement = word
add = word
elif choice == "A":
replacement = word
add = word.upper()
elif choice[:1] == "f":
replacement = choice[1:].strip()
if replacement == "":
print(
"Invalid choice: '%s'. Must specify a replacement (e.g. 'f corrected')." %
(choice))
continue
elif choice == "i":
replacement = word
elif choice[:1] == "r" or choice[:1] == "R":
replacement = choice[1:].strip()
if replacement == "":
print(
"Invalid choice: '%s'. Must specify a replacement (e.g. 'r corrected')." %
(choice))
continue
if choice[:1] == "R":
if replacement.upper() not in suggestions:
add = replacement.upper()
elif replacement not in suggestions:
add = replacement
else:
try:
idx = int(choice)
except ValueError:
idx = -1
if idx >= 0 and idx < len(suggestions):
replacement = suggestions[idx]
else:
print("Invalid choice: '%s'" % (choice))
fixed[word] = replacement
replacements += [replacement]
if add:
if re.match(DICTIONARY_WORD, add):
additions += [add]
else:
print(
"Cannot add %s to the dictionary: it may only contain letter and apostrophes"
% add)
if len(errors) != len(replacements):
print("Internal error %d errors with %d replacements" % (len(errors), len(replacements)))
sys.exit(2)
# Perform replacements on the line.
line = lines[line_offset]
for idx in range(len(replacements) - 1, -1, -1):
word, offset, _ = errors[idx]
replacement = replacements[idx]
if word == replacement:
continue
line = line[:offset] + replacement + line[offset + len(word):]
lines[line_offset] = line
# Update the dictionary.
checker.add_words(additions)
class Comment:
"""Comment represents a comment at a location within a file."""
def __init__(self, line, col, text, last_on_line):
self.line = line
self.col = col
self.text = text
self.last_on_line = last_on_line
# Extract comments from lines. Returns an array of Comment.
def extract_comments(lines):
in_comment = False
comments = []
for line_idx, line in enumerate(lines):
line_comments = []
last = 0
if in_comment:
mc_end = MULTI_COMMENT_END.search(line)
if mc_end is None:
# Full line is within a multi-line comment.
line_comments.append((0, line))
else:
# Start of line is the end of a multi-line comment.
line_comments.append((0, mc_end.group(1)))
last = mc_end.end()
in_comment = False
if not in_comment:
for inline in INLINE_COMMENT.finditer(line, last):
# Single-line comment.
m = inline.lastindex # 1 is //, 2 is /* ... */
line_comments.append((inline.start(m), inline.group(m)))
last = inline.end(m)
if last < len(line):
mc_start = MULTI_COMMENT_START.search(line, last)
if mc_start is not None:
# New multi-lie comment starts at end of line.
line_comments.append((mc_start.start(1), mc_start.group(1)))
in_comment = True
for idx, line_comment in enumerate(line_comments):
col, text = line_comment
last_on_line = idx + 1 >= len(line_comments)
comments.append(Comment(line=line_idx, col=col, text=text, last_on_line=last_on_line))
# Handle control statements and filter out comments that are part of
# RST code block directives.
result = []
n = 0
nc = len(comments)
while n < nc:
text = comments[n].text
if SPELLCHECK_SKIP_FILE in text:
# Skip the file: just don't return any comments.
return []
pos = text.find(SPELLCHECK_ON)
if pos != -1:
# Ignored because spellchecking isn't disabled. Just mask out the command.
comments[n].text = text[:pos] + ' ' * len(SPELLCHECK_ON) + text[pos
+ len(SPELLCHECK_ON):]
result.append(comments[n])
n += 1
elif SPELLCHECK_OFF in text or SPELLCHECK_SKIP_BLOCK in text:
skip_block = SPELLCHECK_SKIP_BLOCK in text
last_line = n
n += 1
while n < nc:
if skip_block:
if comments[n].line - last_line > 1:
# Gap in comments. We've skipped the block.
break
line = lines[comments[n].line]
if line[:comments[n].col].strip() != "":
# Some code here. We've skipped the block.
break
elif SPELLCHECK_ON in comments[n].text:
# Turn checking back on.
n += 1
break
n += 1
elif text.strip().startswith(RST_CODE_BLOCK):
# Start of a code block.
indent = len(INDENT.search(text).group(1))
last_line = comments[n].line
n += 1
while n < nc:
if comments[n].line - last_line > 1:
# Gap in comments. Code block is finished.
break
last_line = comments[n].line
if comments[n].text.strip() != "":
# Blank lines are ignored in code blocks.
if len(INDENT.search(comments[n].text).group(1)) <= indent:
# Back to original indent, or less. The code block is done.
break
n += 1
else:
result.append(comments[n])
n += 1
return result
def check_file(checker, file, lines, error_handler):
in_code_block = 0
code_block_indent = 0
num_errors = 0
comments = extract_comments(lines)
errors = []
for comment in comments:
errors += check_comment(checker, comment.col, comment.text)
if comment.last_on_line and len(errors) > 0:
# Handle all the errors in a line.
num_errors += len(errors)
error_handler(file, comment.line, lines, errors)
errors = []
return (len(comments), num_errors)
def execute(files, dictionary_file, fix):
checker = SpellChecker(dictionary_file)
checker.start()
handler = print_error
if fix:
handler = partial(fix_error, checker)
total_files = 0
total_comments = 0
total_errors = 0
for path in files:
with open(path, 'r') as f:
lines = f.readlines()
total_files += 1
(num_comments, num_errors) = check_file(checker, path, lines, handler)
total_comments += num_comments
total_errors += num_errors
if fix and num_errors > 0:
with open(path, 'w') as f:
f.writelines(lines)
checker.stop()
print(
"Checked %d file(s) and %d comment(s), found %d error(s)." %
(total_files, total_comments, total_errors))
return total_errors == 0
if __name__ == "__main__":
# Force UTF-8 across all open and popen calls. Fallback to 'C' as the
# language to handle hosts where en_US is not recognized (e.g. CI).
try:
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
except:
locale.setlocale(locale.LC_ALL, 'C.UTF-8')
default_dictionary = os.path.join(CURR_DIR, 'spelling_dictionary.txt')
parser = argparse.ArgumentParser(description="Check comment spelling.")
parser.add_argument(
'operation_type',
type=str,
choices=['check', 'fix'],
help="specify if the run should 'check' or 'fix' spelling.")
parser.add_argument(
'target_paths', type=str, nargs="*", help="specify the files for the script to process.")
parser.add_argument(
'-d', '--debug', action='count', default=0, help="Debug spell checker subprocess.")
parser.add_argument(
'--mark', action='store_true', help="Emits extra output to mark misspelled words.")
parser.add_argument(
'--dictionary',
type=str,
default=default_dictionary,
help="specify a location for Envoy-specific dictionary words")
parser.add_argument(
'--color',
type=str,
choices=['on', 'off', 'auto'],
default="auto",
help="Controls colorized output. Auto limits color to TTY devices.")
parser.add_argument(
'--test-ignore-exts',
dest='test_ignore_exts',
action='store_true',
help="For testing, ignore file extensions.")
args = parser.parse_args()
COLOR = args.color == "on" or (args.color == "auto" and sys.stdout.isatty())
DEBUG = args.debug
MARK = args.mark
paths = args.target_paths
if not paths:
paths = ['./api', './include', './source', './test', './tools']
# Exclude ./third_party/ directory from spell checking, even when requested through arguments.
# Otherwise git pre-push hook checks it for merged commits.
paths = [
path for path in paths
if not path.startswith('./third_party/') and not path.startswith('./third_party/')
]
exts = ['.cc', '.h', '.proto']
if args.test_ignore_exts:
exts = None
target_paths = []
for p in paths:
if os.path.isdir(p):
for root, _, files in os.walk(p):
target_paths += [
os.path.join(root, f)
for f in files
if (exts is None or os.path.splitext(f)[1] in exts)
]
if os.path.isfile(p) and (exts is None or os.path.splitext(p)[1] in exts):
target_paths += [p]
rv = execute(target_paths, args.dictionary, args.operation_type == 'fix')
if args.operation_type == 'check':
if not rv:
print(
"ERROR: spell check failed. Run 'tools/spelling/check_spelling_pedantic.py fix and/or add new "
"words to tools/spelling/spelling_dictionary.txt'")
sys.exit(1)
print("PASS")
| 33.732544 | 111 | 0.568341 |
3b36a877f959f7d76e31584b75dfc80a0f1c4b86 | 1,631 | py | Python | 08_Introduction to Data Visualization with Matplotlib/01_Introduction to Matplotlib/06_Small multiples with shared y axis.py | mohd-faizy/DataScience-With-Python | 13ebb10cf9083343056d5b782957241de1d595f9 | [
"MIT"
] | 5 | 2021-02-03T14:36:58.000Z | 2022-01-01T10:29:26.000Z | 08_Introduction to Data Visualization with Matplotlib/01_Introduction to Matplotlib/06_Small multiples with shared y axis.py | mohd-faizy/DataScience-With-Python | 13ebb10cf9083343056d5b782957241de1d595f9 | [
"MIT"
] | null | null | null | 08_Introduction to Data Visualization with Matplotlib/01_Introduction to Matplotlib/06_Small multiples with shared y axis.py | mohd-faizy/DataScience-With-Python | 13ebb10cf9083343056d5b782957241de1d595f9 | [
"MIT"
] | 3 | 2021-02-08T00:31:16.000Z | 2022-03-17T13:52:32.000Z | '''
06 - Small multiples with shared y axis
When creating small multiples, it is often preferable to make sure that the different
plots are displayed with the same scale used on the y-axis. This can be configured by
setting the sharey key-word to True.
In this exercise, you will create a Figure with two Axes objects that share their y-axis.
As before, the data is provided in seattle_weather and austin_weather DataFrames.
Instructions
- Create a Figure with an array of two Axes objects that share their y-axis range.
- Plot Seattle's "MLY-PRCP-NORMAL" in a solid blue line in the top Axes.
- Add Seattle's "MLY-PRCP-25PCTL" and "MLY-PRCP-75PCTL" in dashed blue lines to the top Axes.
- Plot Austin's "MLY-PRCP-NORMAL" in a solid red line in the top Axes and the "MLY-PRCP-25PCTL"
and "MLY-PRCP-75PCTL" in dashed red lines.
'''
# Create a figure and an array of axes: 2 rows, 1 column with shared y axis
fig, ax = plt.subplots(2, 1, sharey=True)
# Plot Seattle precipitation in the top axes
ax[0].plot(seattle_weather["MONTH"], seattle_weather["MLY-PRCP-NORMAL"], color='b')
ax[0].plot(seattle_weather["MONTH"], seattle_weather["MLY-PRCP-25PCTL"], color='b', linestyle='--')
ax[0].plot(seattle_weather["MONTH"], seattle_weather["MLY-PRCP-75PCTL"], color='b', linestyle='--')
# Plot Austin precipitation in the bottom axes
ax[1].plot(austin_weather["MONTH"], austin_weather["MLY-PRCP-NORMAL"], color='r')
ax[1].plot(austin_weather["MONTH"], austin_weather["MLY-PRCP-25PCTL"], color='r', linestyle='--')
ax[1].plot(austin_weather["MONTH"], austin_weather["MLY-PRCP-75PCTL"], color='r', linestyle='--')
plt.show() | 50.96875 | 99 | 0.735745 |
eb8dc7222b73f7c8de9bcdcc201cd0be7545ddc5 | 521 | py | Python | sort/bubble_sort.py | oresam123/Algorithms | 6d46b939e27af7c084d305f9403285ab993423b2 | [
"MIT"
] | 8 | 2017-07-02T15:17:25.000Z | 2017-09-18T15:37:51.000Z | sort/bubble_sort.py | oresam123/Algorithms | 6d46b939e27af7c084d305f9403285ab993423b2 | [
"MIT"
] | null | null | null | sort/bubble_sort.py | oresam123/Algorithms | 6d46b939e27af7c084d305f9403285ab993423b2 | [
"MIT"
] | 3 | 2018-10-03T15:36:36.000Z | 2020-11-20T10:06:14.000Z | """
https://en.wikipedia.org/wiki/Bubble_sort
Complexity: O(N^2)
"""
def bubble_sort(arr):
def swap(i, j):
arr[i], arr[j] = arr[j], arr[i]
n = len(arr)
swapped = True
while swapped:
swapped = False
for i in range(1, n):
if arr[i - 1] > arr[i]:
swap(i - 1, i)
swapped = True
array = [1, 5, 65, 23, 57, 1232, -1, -5, -2, 242, 100,
4, 423, 2, 564, 9, 0, 10, 43, 64, 32, 1, 999]
print(array)
bubble_sort(array)
print(array)
| 17.965517 | 54 | 0.493282 |
4e778f4e46d51edc7f4cd95d427e4f6dceae620a | 3,638 | py | Python | API/model/utils.py | 7AM7/Arabic-dialects-segmenter-with-flask | a69e060fa25a5905864dae7d500c4f46436e0c40 | [
"MIT"
] | 1 | 2021-07-07T06:54:43.000Z | 2021-07-07T06:54:43.000Z | API/model/utils.py | 7AM7/Arabic-dialects-segmenter-with-flask | a69e060fa25a5905864dae7d500c4f46436e0c40 | [
"MIT"
] | null | null | null | API/model/utils.py | 7AM7/Arabic-dialects-segmenter-with-flask | a69e060fa25a5905864dae7d500c4f46436e0c40 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# this script uses pretrained model to segment Arabic dialect data.
# it takes the pretrained model trained on joint dialects and the
# training vocab and produces segmented text
#
# Copyright (C) 2017, Qatar Computing Research Institute, HBKU, Qatar
# Las Update: Sun Oct 29 15:34:43 +03 2017
#
# BibTex: @inproceedings{samih2017learning,
# title={Learning from Relatives: Unified Dialectal Arabic Segmentation},
# author={Samih, Younes and Eldesouki, Mohamed and Attia, Mohammed and Darwish, Kareem and Abdelali, Ahmed and Mubarak, Hamdy and Kallmeyer, Laura},
# booktitle={Proceedings of the 21st Conference on Computational Natural Language Learning (CoNLL 2017)},
# pages={432--441},
# year={2017}}
#
__author__ = 'Ahmed Abdelali (aabdelali@hbku.edu.qa)'
import os
import numpy as np
from itertools import chain
from collections import Counter
from .model import build_model
HERE = os.path.dirname(os.path.realpath(__file__)) + '/'
lookup = os.path.join(HERE, 'data/lookup_list.txt')
vocab = os.path.join(HERE, 'data/vocab.txt')
model_path = os.path.join(HERE, 'models/joint.trian.3_keras_weights.h5')
def load_file(path):
"""
Load sentences. A line must contain at least a word and its tag.
Sentences are separated by empty lines.
"""
sentences = []
sentence = []
for line in open(path):
line = line.rstrip()
if not line:
if len(sentence) > 0:
if 'DOCSTART' not in sentence[0][0]:
sentences.append(sentence)
sentence = []
else:
word = line.split()
assert len(word) >= 2
sentence.append(word)
if len(sentence) > 0:
if 'DOCSTART' not in sentence[0][0]:
sentences.append(sentence)
words, tags = zip(*[zip(*row) for row in sentences])
return words, tags
def load_lookuplist(path):
"""
Load lookp list.
"""
listwords = {}
for line in open(path):
line = line.rstrip()
listwords[line.replace('+','')] = line
return listwords
def load_vocab(path):
"""
Load vocab/index2word list.
"""
index2word = []
for line in open(path):
line = line.strip()
index2word.append(line)
return index2word
def _fit_term_index(terms, reserved=[], preprocess=lambda x: x):
all_terms = chain(*terms)
all_terms = map(preprocess, all_terms)
term_freqs = Counter(all_terms).most_common()
id2term = reserved + [term for term, tf in term_freqs]
return id2term
def _invert_index(id2term):
return {term: i for i, term in enumerate(id2term)}
def getData(model):
seg_tags = ['E', 'S', 'B', 'M', 'WB']
idx2Label = {0:'E', 1:'S', 2:'B', 3:'M', 4:'WB'}
label2Idx = {'E':0, 'S':1, 'B':2, 'M':3, 'WB':4}
word_embedding_dim = 200
lstm_dim = 200
index2word = load_vocab(vocab)
word2index = _invert_index(index2word)
index2pos = seg_tags
pos2index = _invert_index(index2pos)
maxlen = 500 # Set to 500 max num of chars in one line.
max_features = len(index2word)
nb_seg_tags = len(index2pos)
lookupList = load_lookuplist(lookup)
if model:
return max_features, word_embedding_dim, maxlen, nb_seg_tags, lstm_dim
else:
return maxlen, word2index, index2word, idx2Label, lookupList
def load_model():
max_features, word_embedding_dim, maxlen, nb_seg_tags, lstm_dim = getData(model=True)
model = build_model(model_path, max_features, word_embedding_dim, maxlen, nb_seg_tags, lstm_dim)
model.load_weights(model_path)
return model | 32.19469 | 149 | 0.658054 |
998d6ee343ee9e7a38b8b14e6393345f5e4088ae | 607 | py | Python | abc225/abc225_c.py | Vermee81/practice-coding-contests | 78aada60fa75f208ee0eef337b33b27b1c260d18 | [
"MIT"
] | null | null | null | abc225/abc225_c.py | Vermee81/practice-coding-contests | 78aada60fa75f208ee0eef337b33b27b1c260d18 | [
"MIT"
] | null | null | null | abc225/abc225_c.py | Vermee81/practice-coding-contests | 78aada60fa75f208ee0eef337b33b27b1c260d18 | [
"MIT"
] | null | null | null | # https://atcoder.jp/contests/abc225/tasks/abc225_c
N, M = map(int, input().split())
b_arr = []
for _ in range(N):
b_arr.append(list(map(lambda x: int(x) - 1, input().split())))
for i in range(N):
for j in range(M):
now_column = b_arr[i][j] % 7
if now_column != (b_arr[0][0] % 7) + j:
print('No')
exit()
if j > 0:
if b_arr[i][j - 1] != b_arr[i][j] - 1:
print('No')
exit()
if i > 0:
if b_arr[i - 1][j] != b_arr[i][j] - 7:
print('No')
exit()
print('Yes')
| 26.391304 | 66 | 0.434926 |
7ee6df4c041009bc086cf6754e5fe2e11c3531a9 | 39,806 | py | Python | mpisppy/utils/sputils.py | vishalbelsare/mpi-sppy | 019fa1c04396a5bcadf758a31bc96217c17b43c9 | [
"BSD-3-Clause"
] | null | null | null | mpisppy/utils/sputils.py | vishalbelsare/mpi-sppy | 019fa1c04396a5bcadf758a31bc96217c17b43c9 | [
"BSD-3-Clause"
] | null | null | null | mpisppy/utils/sputils.py | vishalbelsare/mpi-sppy | 019fa1c04396a5bcadf758a31bc96217c17b43c9 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2020 by B. Knueven, D. Mildebrath, C. Muir, J-P Watson, and D.L. Woodruff
# This software is distributed under the 3-clause BSD License.
# Base and utility functions for mpisppy
# Note to developers: things called spcomm are way more than just a comm; SPCommunicator
import pyomo.environ as pyo
import sys
import os
import re
import time
import numpy as np
import mpisppy.scenario_tree as scenario_tree
from pyomo.core import Objective
try:
from mpi4py import MPI
haveMPI = True
global_rank = MPI.COMM_WORLD.Get_rank()
except:
haveMPI = False
from pyomo.core.expr.numeric_expr import LinearExpression
from mpisppy import tt_timer, global_toc
_spin_the_wheel_move_msg = \
"spin_the_wheel should now be used as the class "\
"mpisppy.spin_the_wheel.WheelSpinner using the method `spin()`. Output "\
"writers are now methods of the class WheelSpinner."
def spin_the_wheel(hub_dict, list_of_spoke_dict, comm_world=None):
raise RuntimeError(
_spin_the_wheel_move_msg + \
" See the example code below for a fix:\n"
'''
from mpisppy.spin_the_wheel import WheelSpinner
ws = WheelSpinner(hub_dict, list_of_spoke_dict)
ws.spin(comm_world=comm_world)
'''
)
def first_stage_nonant_npy_serializer(file_name, scenario, bundling):
# write just the nonants for ROOT in an npy file (e.g. for Conf Int)
root = scenario._mpisppy_node_list[0]
assert root.name == "ROOT"
root_nonants = np.fromiter((pyo.value(var) for var in root.nonant_vardata_list), float)
np.save(file_name, root_nonants)
def first_stage_nonant_writer( file_name, scenario, bundling ):
with open(file_name, 'w') as f:
root = scenario._mpisppy_node_list[0]
assert root.name == "ROOT"
for var in root.nonant_vardata_list:
var_name = var.name
if bundling:
dot_index = var_name.find('.')
assert dot_index >= 0
var_name = var_name[(dot_index+1):]
f.write(f"{var_name},{pyo.value(var)}\n")
def scenario_tree_solution_writer( directory_name, scenario_name, scenario, bundling ):
with open(os.path.join(directory_name, scenario_name+'.csv'), 'w') as f:
for var in scenario.component_data_objects(
ctype=pyo.Var,
descend_into=True,
active=True,
sort=True):
# should this be here?
if not var.stale:
var_name = var.name
if bundling:
dot_index = var_name.find('.')
assert dot_index >= 0
var_name = var_name[(dot_index+1):]
f.write(f"{var_name},{pyo.value(var)}\n")
def write_spin_the_wheel_first_stage_solution(spcomm, opt_dict, solution_file_name,
first_stage_solution_writer=first_stage_nonant_writer):
raise RuntimeError(_spin_the_wheel_move_msg)
def write_spin_the_wheel_tree_solution(spcomm, opt_dict, solution_directory_name,
scenario_tree_solution_writer=scenario_tree_solution_writer):
raise RuntimeError(_spin_the_wheel_move_msg)
def local_nonant_cache(spcomm):
raise RuntimeError(_spin_the_wheel_move_msg)
def get_objs(scenario_instance):
""" return the list of objective functions for scenario_instance"""
scenario_objs = scenario_instance.component_data_objects(pyo.Objective,
active=True, descend_into=True)
scenario_objs = list(scenario_objs)
if (len(scenario_objs) == 0):
raise RuntimeError("Scenario " + sname + " has no active "
"objective functions.")
if (len(scenario_objs) > 1):
print("WARNING: Scenario", sname, "has multiple active "
"objectives. Selecting the first objective for "
"inclusion in the extensive form.")
return scenario_objs
def create_EF(scenario_names, scenario_creator, scenario_creator_kwargs=None,
EF_name=None, suppress_warnings=False,
nonant_for_fixed_vars=True):
""" Create a ConcreteModel of the extensive form.
Args:
scenario_names (list of str):
Names for each scenario to be passed to the scenario_creator
function.
scenario_creator (callable):
Function which takes a scenario name as its first argument and
returns a concrete model corresponding to that scenario.
scenario_creator_kwargs (dict, optional):
Options to pass to `scenario_creator`.
EF_name (str, optional):
Name of the ConcreteModel of the EF.
suppress_warnings (boolean, optional):
If true, do not display warnings. Default False.
nonant_for_fixed_vars (bool--optional): If True, enforces
non-anticipativity constraints for all variables, including
those which have been fixed. Default is True.
Returns:
EF_instance (ConcreteModel):
ConcreteModel of extensive form with explicit
non-anticipativity constraints.
Note:
If any of the scenarios produced by scenario_creator do not have a
._mpisppy_probability attribute, this function displays a warning, and assumes
that all scenarios are equally likely.
"""
if scenario_creator_kwargs is None:
scenario_creator_kwargs = dict()
scen_dict = {
name: scenario_creator(name, **scenario_creator_kwargs)
for name in scenario_names
}
if (len(scen_dict) == 0):
raise RuntimeError("create_EF() received empty scenario list")
elif (len(scen_dict) == 1):
scenario_instance = list(scen_dict.values())[0]
scenario_instance._ef_scenario_names = list(scen_dict.keys())
if not suppress_warnings:
print("WARNING: passed single scenario to create_EF()")
# special code to patch in ref_vars
scenario_instance.ref_vars = dict()
scenario_instance._nlens = {node.name: len(node.nonant_vardata_list)
for node in scenario_instance._mpisppy_node_list}
for node in scenario_instance._mpisppy_node_list:
ndn = node.name
for i in range(scenario_instance._nlens[ndn]):
v = node.nonant_vardata_list[i]
if (ndn, i) not in scenario_instance.ref_vars:
scenario_instance.ref_vars[(ndn, i)] = v
# patch in EF_Obj
scenario_objs = get_objs(scenario_instance)
for obj_func in scenario_objs:
obj_func.deactivate()
obj = scenario_objs[0]
sense = pyo.minimize if obj.is_minimizing() else pyo.maximize
scenario_instance.EF_Obj = pyo.Objective(expr=obj.expr, sense=sense)
return scenario_instance #### special return for single scenario
# Check if every scenario has a specified probability
probs_specified = \
all([hasattr(scen, '_mpisppy_probability') for scen in scen_dict.values()])
if not probs_specified:
for scen in scen_dict.values():
scen._mpisppy_probability = 1 / len(scen_dict)
if not suppress_warnings:
print('WARNING: At least one scenario is missing _mpisppy_probability attribute.',
'Assuming equally-likely scenarios...')
EF_instance = _create_EF_from_scen_dict(scen_dict,
EF_name=EF_name,
nonant_for_fixed_vars=True)
return EF_instance
def _create_EF_from_scen_dict(scen_dict, EF_name=None,
nonant_for_fixed_vars=True):
""" Create a ConcreteModel of the extensive form from a scenario
dictionary.
Args:
scen_dict (dict): Dictionary whose keys are scenario names and
values are ConcreteModel objects corresponding to each
scenario.
EF_name (str--optional): Name of the resulting EF model.
nonant_for_fixed_vars (bool--optional): If True, enforces
non-anticipativity constraints for all variables, including
those which have been fixed. Default is True.
Returns:
EF_instance (ConcreteModel): ConcreteModel of extensive form with
explicity non-anticipativity constraints.
Notes:
The non-anticipativity constraints are enforced by creating
"reference variables" at each node in the scenario tree (excluding
leaves) and enforcing that all the variables for each scenario at
that node are equal to the reference variables.
This function is called directly when creating bundles for PH.
Does NOT assume that each scenario is equally likely. Raises an
AttributeError if a scenario object is encountered which does not
have a ._mpisppy_probability attribute.
Added the flag nonant_for_fixed_vars because original code only
enforced non-anticipativity for non-fixed vars, which is not always
desirable in the context of bundling. This allows for more
fine-grained control.
"""
is_min, clear = _models_have_same_sense(scen_dict)
if (not clear):
raise RuntimeError('Cannot build the extensive form out of models '
'with different objective senses')
sense = pyo.minimize if is_min else pyo.maximize
EF_instance = pyo.ConcreteModel(name=EF_name)
EF_instance.EF_Obj = pyo.Objective(expr=0.0, sense=sense)
# we don't strictly need these here, but it allows for eliding
# of single scenarios and bundles when convenient
EF_instance._mpisppy_data = pyo.Block(name="For non-Pyomo mpi-sppy data")
EF_instance._mpisppy_model = pyo.Block(name="For mpi-sppy Pyomo additions to the scenario model")
EF_instance._mpisppy_data.scenario_feasible = None
EF_instance._ef_scenario_names = []
EF_instance._mpisppy_probability = 0
for (sname, scenario_instance) in scen_dict.items():
EF_instance.add_component(sname, scenario_instance)
EF_instance._ef_scenario_names.append(sname)
# Now deactivate the scenario instance Objective
scenario_objs = get_objs(scenario_instance)
for obj_func in scenario_objs:
obj_func.deactivate()
obj_func = scenario_objs[0] # Select the first objective
try:
EF_instance.EF_Obj.expr += scenario_instance._mpisppy_probability * obj_func.expr
EF_instance._mpisppy_probability += scenario_instance._mpisppy_probability
except AttributeError as e:
raise AttributeError("Scenario " + sname + " has no specified "
"probability. Specify a value for the attribute "
" _mpisppy_probability and try again.") from e
# Normalization does nothing when solving the full EF, but is required for
# appropraite scaling of EFs used as bundles.
EF_instance.EF_Obj.expr /= EF_instance._mpisppy_probability
# For each node in the scenario tree, we need to collect the
# nonanticipative vars and create the constraints for them,
# which we do using a reference variable.
ref_vars = dict() # keys are _nonant_indices (i.e. a node name and a
# variable number)
ref_suppl_vars = dict()
EF_instance._nlens = dict()
nonant_constr = pyo.Constraint(pyo.Any, name='_C_EF_')
EF_instance.add_component('_C_EF_', nonant_constr)
nonant_constr_suppl = pyo.Constraint(pyo.Any, name='_C_EF_suppl')
EF_instance.add_component('_C_EF_suppl', nonant_constr_suppl)
for (sname, s) in scen_dict.items():
nlens = {node.name: len(node.nonant_vardata_list)
for node in s._mpisppy_node_list}
for (node_name, num_nonant_vars) in nlens.items(): # copy nlens to EF
if (node_name in EF_instance._nlens.keys() and
num_nonant_vars != EF_instance._nlens[node_name]):
raise RuntimeError("Number of non-anticipative variables is "
"not consistent at node " + node_name + " in scenario " +
sname)
EF_instance._nlens[node_name] = num_nonant_vars
nlens_ef_suppl = {node.name: len(node.nonant_ef_suppl_vardata_list)
for node in s._mpisppy_node_list}
for node in s._mpisppy_node_list:
ndn = node.name
for i in range(nlens[ndn]):
v = node.nonant_vardata_list[i]
if (ndn, i) not in ref_vars:
# create the reference variable as a singleton with long name
# xxxx maybe index by _nonant_index ???? rather than singleton VAR ???
ref_vars[(ndn, i)] = v
# Add a non-anticipativity constraint, except in the case when
# the variable is fixed and nonant_for_fixed_vars=False.
elif (nonant_for_fixed_vars) or (not v.is_fixed()):
expr = LinearExpression(linear_coefs=[1,-1],
linear_vars=[v,ref_vars[(ndn,i)]],
constant=0.)
nonant_constr[(ndn,i,sname)] = (expr, 0.0)
for i in range(nlens_ef_suppl[ndn]):
v = node.nonant_ef_suppl_vardata_list[i]
if (ndn, i) not in ref_suppl_vars:
# create the reference variable as a singleton with long name
# xxxx maybe index by _nonant_index ???? rather than singleton VAR ???
ref_suppl_vars[(ndn, i)] = v
# Add a non-anticipativity constraint, expect in the case when
# the variable is fixed and nonant_for_fixed_vars=False.
elif (nonant_for_fixed_vars) or (not v.is_fixed()):
expr = LinearExpression(linear_coefs=[1,-1],
linear_vars=[v,ref_suppl_vars[(ndn,i)]],
constant=0.)
nonant_constr_suppl[(ndn,i,sname)] = (expr, 0.0)
EF_instance.ref_vars = ref_vars
EF_instance.ref_suppl_vars = ref_suppl_vars
return EF_instance
def _models_have_same_sense(models):
''' Check if every model in the provided dict has the same objective sense.
Input:
models (dict) -- Keys are scenario names, values are Pyomo
ConcreteModel objects.
Returns:
is_minimizing (bool) -- True if and only if minimizing. None if the
check fails.
check (bool) -- True only if all the models have the same sense (or
no models were provided)
Raises:
ValueError -- If any of the models has either none or multiple
active objectives.
'''
if (len(models) == 0):
return True, True
senses = [find_active_objective(scenario).is_minimizing()
for scenario in models.values()]
sense = senses[0]
check = all(val == sense for val in senses)
if (check):
return (sense == pyo.minimize), check
return None, check
def is_persistent(solver):
return isinstance(solver,
pyo.pyomo.solvers.plugins.solvers.persistent_solver.PersistentSolver)
def ef_scenarios(ef):
""" An iterator to give the scenario sub-models in an ef
Args:
ef (ConcreteModel): the full extensive form model
Yields:
scenario name, scenario instance (str, ConcreteModel)
"""
for sname in ef._ef_scenario_names:
yield (sname, getattr(ef, sname))
def ef_nonants(ef):
""" An iterator to give representative Vars subject to non-anticipitivity
Args:
ef (ConcreteModel): the full extensive form model
Yields:
tree node name, full EF Var name, Var value
Note:
not on an EF object because not all ef's are part of an EF object
"""
for (ndn,i), var in ef.ref_vars.items():
yield (ndn, var, pyo.value(var))
def ef_nonants_csv(ef, filename):
""" Dump the nonant vars from an ef to a csv file; truly a dump...
Args:
ef (ConcreteModel): the full extensive form model
filename (str): the full name of the csv output file
"""
with open(filename, "w") as outfile:
outfile.write("Node, EF_VarName, Value\n")
for (ndname, varname, varval) in ef_nonants(ef):
outfile.write("{}, {}, {}\n".format(ndname, varname, varval))
def nonant_cache_from_ef(ef,verbose=False):
""" Populate a nonant_cache from an ef. Also works with multi-stage
Args:
ef (mpi-sppy ef): a solved ef
Returns:
nonant_cache (dict of numpy arrays): a special structure for nonant values
"""
nonant_cache = dict()
nodenames = set([ndn for (ndn,i) in ef.ref_vars])
for ndn in sorted(nodenames):
nonant_cache[ndn]=[]
i = 0
while ((ndn,i) in ef.ref_vars):
xvar = pyo.value(ef.ref_vars[(ndn,i)])
nonant_cache[ndn].append(xvar)
if verbose:
print("barfoo", i, xvar)
i+=1
return nonant_cache
def ef_ROOT_nonants_npy_serializer(ef, filename):
""" write the root node nonants to be ready by a numpy load
Args:
ef (ConcreteModel): the full extensive form model
filename (str): the full name of the .npy output file
"""
root_nonants = np.fromiter((v for ndn,var,v in ef_nonants(ef) if ndn == "ROOT"), float)
np.save(filename, root_nonants)
def write_ef_first_stage_solution(ef,
solution_file_name,
first_stage_solution_writer=first_stage_nonant_writer):
"""
Write a solution file, if a solution is available, to the solution_file_name provided
Args:
ef : A Concrete Model of the Extensive Form (output of create_EF).
We assume it has already been solved.
solution_file_name : filename to write the solution to
first_stage_solution_writer (optional) : custom first stage solution writer function
NOTE:
This utility is replicating WheelSpinner.write_first_stage_solution for EF
"""
if not haveMPI or (global_rank==0):
dirname = os.path.dirname(solution_file_name)
if dirname != '':
os.makedirs(os.path.dirname(solution_file_name), exist_ok=True)
representative_scenario = getattr(ef,ef._ef_scenario_names[0])
first_stage_solution_writer(solution_file_name,
representative_scenario,
bundling=False)
def write_ef_tree_solution(ef, solution_directory_name,
scenario_tree_solution_writer=scenario_tree_solution_writer):
""" Write a tree solution directory, if available, to the solution_directory_name provided
Args:
ef : A Concrete Model of the Extensive Form (output of create_EF).
We assume it has already been solved.
solution_file_name : filename to write the solution to
scenario_tree_solution_writer (optional) : custom scenario solution writer function
NOTE:
This utility is replicating WheelSpinner.write_tree_solution for EF
"""
if not haveMPI or (global_rank==0):
os.makedirs(solution_directory_name, exist_ok=True)
for scenario_name, scenario in ef_scenarios(ef):
scenario_tree_solution_writer(solution_directory_name,
scenario_name,
scenario,
bundling=False)
def extract_num(string):
''' Given a string, extract the longest contiguous
integer from the right-hand side of the string.
Example:
scenario324 -> 324
TODO: Add Exception Handling
'''
return int(re.compile(r'(\d+)$').search(string).group(1))
def node_idx(node_path,branching_factors):
'''
Computes a unique id for a given node in a scenario tree.
It follows the path to the node, computing the unique id for each ascendant.
Parameters
----------
node_path : list of int
A list of integer, specifying the path of the node.
branching_factors : list of int
branching_factors of the scenario tree.
Returns
-------
node_idx
Node unique id.
NOTE: Does not work with unbalanced trees.
'''
if node_path == []: #ROOT node
return 0
else:
stage_id = 0 #node unique id among stage t+1 nodes.
for t in range(len(node_path)):
stage_id = node_path[t]+branching_factors[t]*stage_id
node_idx = _nodenum_before_stage(len(node_path),branching_factors)+stage_id
return node_idx
def _extract_node_idx(nodename,branching_factors):
"""
Parameters
----------
nodename : str
The name of a node, e.g. 'ROOT_2_0_4'.
branching_factors : list
Branching factor of a scenario tree, e.g. [3,2,8,4,3].
Returns
-------
node_idx : int
A unique integer that can be used as a key to designate this scenario.
"""
if nodename =='ROOT':
return 0
else:
to_list = [int(x) for x in re.findall(r'\d+',nodename)]
return node_idx(to_list,branching_factors)
def parent_ndn(nodename):
if nodename == 'ROOT':
return None
else:
return re.search('(.+)_(\d+)',nodename).group(1)
def option_string_to_dict(ostr):
""" Convert a string to the standard dict for solver options.
Intended for use in the calling program; not internal use here.
Args:
ostr (string): space seperated options with = for arguments
Returns:
solver_options (dict): solver options
"""
def convert_value_string_to_number(s):
try:
return float(s)
except ValueError:
try:
return int(s)
except ValueError:
return s
solver_options = dict()
if ostr is None or ostr == "":
return None
for this_option_string in ostr.split():
this_option_pieces = this_option_string.strip().split("=")
if len(this_option_pieces) == 2:
option_key = this_option_pieces[0]
option_value = convert_value_string_to_number(this_option_pieces[1])
solver_options[option_key] = option_value
elif len(this_option_pieces) == 1:
option_key = this_option_pieces[0]
solver_options[option_key] = None
else:
raise RuntimeError("Illegally formed subsolve directive"\
+ " option=%s detected" % this_option)
return solver_options
################################################################################
# Various utilities related to scenario rank maps (some may not be in use)
def scens_to_ranks(scen_count, n_proc, rank, branching_factors = None):
""" Determine the rank assignments that are made in spbase.
NOTE: Callers to this should call _scentree.scen_names_to_ranks
Args:
scen_count (int): number of scenarios
n_proc (int): the number of intra ranks (within the cylinder)
rank (int): my rank (i.e., intra; i.e., within the cylinder)
Returns:
slices (list of ranges): the indices into all all_scenario_names to assign to rank
(the list entries are ranges that correspond to ranks)
scenario_name_to_rank (dict of dict): only for multi-stage
keys are comms (i.e., tree nodes); values are dicts with keys
that are scenario names and values that are ranks
"""
if not haveMPI:
raise RuntimeError("scens_to_ranks called, but cannot import mpi4py")
if scen_count < n_proc:
raise RuntimeError(
"More MPI ranks (%d) supplied than needed given the number of scenarios (%d) "
% (n_proc, scen_count)
)
# for now, we are treating two-stage as a special case
if (branching_factors is None):
avg = scen_count / n_proc
slices = [list(range(int(i * avg), int((i + 1) * avg))) for i in range(n_proc)]
return slices, None
else:
# OCT 2020: this block is never triggered and would fail.
# indecision as of May 2020 (delete this comment DLW)
# just make sure things are consistent with what xhat will do...
# TBD: streamline
all_scenario_names = ["ID"+str(i) for i in range(scen_count)]
tree = _ScenTree(branching_factors, all_scenario_names)
scenario_names_to_ranks, slices, = tree.scen_name_to_rank(n_proc, rank)
return slices, scenario_names_to_ranks
def _nodenum_before_stage(t,branching_factors):
#How many nodes in a tree of stage 1,2,...,t ?
#Only works with branching factors
return int(sum(np.prod(branching_factors[0:i]) for i in range(t)))
def find_leaves(all_nodenames):
#Take a list of all nodenames from a tree, and find the leaves of it.
#WARNING: We do NOT check that the tree is well constructed
if all_nodenames is None or all_nodenames == ['ROOT']:
return {'ROOT':False} # 2 stage problem: no leaf nodes in all_nodenames
#A leaf is simply a root with no child n°0
is_leaf = dict()
for ndn in all_nodenames:
if ndn+"_0" in all_nodenames:
is_leaf[ndn] = False
else:
is_leaf[ndn] = True
return is_leaf
class _TreeNode():
#Create the subtree generated by a node, with associated scenarios
# stages are 1-based, everything else is 0-based
# scenario lists are stored as (first, last) indices in all_scenarios
#This is also checking that the nodes from all_nodenames are well-named.
def __init__(self, Parent, scenfirst, scenlast, desc_leaf_dict, name):
#desc_leaf_dict is the output of find_leaves
self.scenfirst = scenfirst #id of the first scenario with this node
self.scenlast = scenlast #id of the last scenario with this node
self.name = name
numscens = scenlast - scenfirst + 1 #number of scenarios with this node
self.is_leaf = False
if Parent is None:
assert(self.name == "ROOT")
self.stage = 1
else:
self.stage = Parent.stage + 1
if len(desc_leaf_dict)==1 and list(desc_leaf_dict.keys()) == ['ROOT']:
#2-stage problem, we don't create leaf nodes
self.kids = []
elif not name+"_0" in desc_leaf_dict:
self.is_leaf = True
self.kids = []
else:
if len(desc_leaf_dict) < numscens:
raise RuntimeError(f"There are more scenarios ({numscens}) than remaining leaves, for the node {name}")
# make children
first = scenfirst
self.kids = list()
child_regex = re.compile(name+'_\d*\Z')
child_list = [x for x in desc_leaf_dict if child_regex.match(x) ]
for i in range(len(desc_leaf_dict)):
childname = name+f"_{i}"
if not childname in desc_leaf_dict:
if len(child_list) != i:
raise RuntimeError("The all_nodenames argument is giving an inconsistent tree."
f"The node {name} has {len(child_list)} children, but {childname} is not one of them.")
break
childdesc_regex = re.compile(childname+'(_\d*)*\Z')
child_leaf_dict = {ndn:desc_leaf_dict[ndn] for ndn in desc_leaf_dict \
if childdesc_regex.match(ndn)}
#We determine the number of children of this node
child_scens_num = sum(child_leaf_dict.values())
last = first+child_scens_num - 1
self.kids.append(_TreeNode(self, first, last,
child_leaf_dict, childname))
first += child_scens_num
if last != scenlast:
print("Hello", numscens)
raise RuntimeError(f"Tree node did not initialize correctly for node {name}")
def stage_max(self):
#Return the number of stages of a subtree.
#Also check that all the subtrees have the same number of stages
#i.e. that the leaves are always on the same stage.
if self.is_leaf:
return 1
else:
l = [child.stage_max() for child in self.kids]
if l.count(l[0]) != len(l):
maxstage = max(l)+ self.stage
minstage = min(l)+ self.stage
raise RuntimeError("The all_nodenames argument is giving an inconsistent tree. "
f"The node {self.name} has descendant leaves with stages going from {minstage} to {maxstage}")
return 1+l[0]
class _ScenTree():
def __init__(self, all_nodenames, ScenNames):
if all_nodenames is None:
all_nodenames = ['ROOT'] #2 stage problem: no leaf nodes
self.ScenNames = ScenNames
self.NumScens = len(ScenNames)
first = 0
last = self.NumScens - 1
desc_leaf_dict = find_leaves(all_nodenames)
self.rootnode = _TreeNode(None, first, last, desc_leaf_dict, "ROOT")
def _nonleaves(nd):
if nd.is_leaf:
return []
else:
retval = [nd]
for child in nd.kids:
retval+=_nonleaves(child)
return retval
self.nonleaves = _nonleaves(self.rootnode)
self.NumStages = \
2 if all_nodenames == ['ROOT'] else self.rootnode.stage_max()
self.NonLeafTerminals = \
[nd for nd in self.nonleaves if nd.stage == self.NumStages-1]
self.NumLeaves = len(desc_leaf_dict) - len(self.nonleaves)
if self.NumStages>2 and self.NumLeaves != self.NumScens:
raise RuntimeError("The all_nodenames argument is giving an inconsistent tree."
f"There are {self.NumLeaves} leaves for this tree, but {self.NumScens} scenarios are given.")
def scen_names_to_ranks(self, n_proc):
"""
Args:
n_proc: number of ranks in the cylinder (i.e., intra)
Returns:
scenario_names_to_rank (dict of dict):
keys are comms (i.e., tree nodes); values are dicts with keys
that are scenario names and values that are ranks within that comm
slices (list of lists)
indices correspond to ranks in self.mpicomm and the values are a list
of scenario indices
rank -> list of scenario indices for that rank
list_of_ranks_per_scenario_idx (list)
indices are scenario indices and values are the rank of that scenario
within self.mpicomm
scenario index -> rank
NOTE:
comm names are the same as the corresponding scenario tree node name
"""
scenario_names_to_rank = dict() # scenario_name_to_rank dict of dicts
# one processor for the cylinder is a special case
if n_proc == 1:
for nd in self.nonleaves:
scenario_names_to_rank[nd.name] = {s: 0 for s in self.ScenNames}
return scenario_names_to_rank, [list(range(self.NumScens))], [0]*self.NumScens
scen_count = len(self.ScenNames)
avg = scen_count / n_proc
# rank -> list of scenario indices for that rank
slices = [list(range(int(i * avg), int((i + 1) * avg))) for i in range(n_proc)]
# scenario index -> rank
list_of_ranks_per_scenario_idx = [ rank for rank, scen_idxs in enumerate(slices) for _ in scen_idxs ]
scenario_names_to_rank["ROOT"] = { s: rank for s,rank in zip(self.ScenNames, list_of_ranks_per_scenario_idx) }
def _recurse_do_node(node):
for child in node.kids:
first_scen_idx = child.scenfirst
last_scen_idx = child.scenlast
ranks_in_node = list_of_ranks_per_scenario_idx[first_scen_idx:last_scen_idx+1]
minimum_rank_in_node = ranks_in_node[0]
# IMPORTANT:
# this accords with the way SPBase.create_communicators assigns the "key" when
# creating its comm for this node. E.g., the key is the existing rank, which
# will then be offset by the minimum rank. As the ranks within each node are
# contiguous, this is enough to infer the rank each scenario will have in this
# node's comm
within_comm_ranks_in_node = [(rank-minimum_rank_in_node) for rank in ranks_in_node]
scenarios_in_nodes = self.ScenNames[first_scen_idx:last_scen_idx+1]
scenario_names_to_rank[child.name] = { s : rank for s,rank in zip(scenarios_in_nodes, within_comm_ranks_in_node) }
if child not in self.NonLeafTerminals:
_recurse_do_node(child)
_recurse_do_node(self.rootnode)
return scenario_names_to_rank, slices, list_of_ranks_per_scenario_idx
######## Utility to attach the one and only node to a two-stage scenario #######
def attach_root_node(model, firstobj, varlist, nonant_ef_suppl_list=None):
""" Create a root node as a list to attach to a scenario model
Args:
model (ConcreteModel): model to which this will be attached
firstobj (Pyomo Expression): First stage cost (e.g. model.FC)
varlist (list): Pyomo Vars in first stage (e.g. [model.A, model.B])
nonant_ef_suppl_list (list of pyo Var, Vardata or slices):
vars for which nonanticipativity constraints tighten the EF
(important for bundling)
Note:
attaches a list consisting of one scenario node to the model
"""
model._mpisppy_node_list = [
scenario_tree.ScenarioNode("ROOT",1.0,1,firstobj, None, varlist, model,
nonant_ef_suppl_list = nonant_ef_suppl_list)
]
### utilities to check the slices and the map ###
def check4losses(numscens, branching_factors,
scenario_names_to_rank,slices,list_of_ranks_per_scenario_idx):
""" Check the data structures; gag and die if it looks bad.
Args:
numscens (int): number of scenarios
branching_factors (list of int): branching factors
scenario_names_to_rank (dict of dict):
keys are comms (i.e., tree nodes); values are dicts with keys
that are scenario names and values that are ranks within that comm
slices (list of lists)
indices correspond to ranks in self.mpicomm and the values are a list
of scenario indices
rank -> list of scenario indices for that rank
list_of_ranks_per_scenario_idx (list)
indices are scenario indices and values are the rank of that scenario
within self.mpicomm
scenario index -> rank
"""
present = [False for _ in range(numscens)]
for rank, scenlist in enumerate(slices):
for scen in scenlist:
present[scen] = True
missingsome = False
for scen, there in enumerate(present):
if not there:
print(f"Scenario {scen} is not in slices")
missingsome = True
if missingsome:
raise RuntimeError("Internal error: slices is not correct")
# not stage presence...
stagepresents = {stage: [False for _ in range(numscens)] for stage in range(len(branching_factors))}
# loop over the entire structure, marking those found as present
for nodename, scenlist in scenario_names_to_rank.items():
stagenum = nodename.count('_')
for s in scenlist:
snum = int(s[8:])
stagepresents[stagenum][snum] = True
missingone = False
for stage in stagepresents:
for scen, there in enumerate(stagepresents[stage]):
if not there:
print(f"Scenario number {scen} missing from stage {stage}.")
missingsome = True
if missingsome:
raise RuntimeError("Internal error: scenario_name_to_rank")
print("check4losses: OK")
def disable_tictoc_output():
f = open(os.devnull,"w")
tt_timer._ostream = f
def reenable_tictoc_output():
# Primarily to re-enable after a disable
tt_timer._ostream.close()
tt_timer._ostream = sys.stdout
def find_active_objective(pyomomodel):
# return the only active objective or raise and error
obj = list(pyomomodel.component_data_objects(
Objective, active=True, descend_into=True))
if len(obj) != 1:
raise RuntimeError("Could not identify exactly one active "
"Objective for model '%s' (found %d objectives)"
% (pyomomodel.name, len(obj)))
return obj[0]
def create_nodenames_from_branching_factors(BFS):
"""
This function creates the node names of a tree without creating the whole tree.
Parameters
----------
BFS : list of integers
Branching factors.
Returns
-------
nodenames : list of str
a list of the node names induced by branching_factors, including leaf nodes.
"""
stage_nodes = ["ROOT"]
nodenames = ['ROOT']
if len(BFS)==1 : #2stage
return(nodenames)
for bf in BFS[:(len(BFS))]:
old_stage_nodes = stage_nodes
stage_nodes = []
for k in range(len(old_stage_nodes)):
stage_nodes += ['%s_%i'%(old_stage_nodes[k],b) for b in range(bf)]
nodenames += stage_nodes
return nodenames
def get_branching_factors_from_nodenames(all_nodenames):
#WARNING: Do not work with unbalanced trees
staget_node = "ROOT"
branching_factors = []
while staget_node+"_0" in all_nodenames:
child_regex = re.compile(staget_node+'_\d*\Z')
child_list = [x for x in all_nodenames if child_regex.match(x) ]
branching_factors.append(len(child_list))
staget_node += "_0"
if len(branching_factors)==1:
#2stage
return None
else:
return branching_factors
def number_of_nodes(branching_factors):
#How many nodes does a tree with a given branching_factors have ?
last_node_stage_num = [i-1 for i in branching_factors]
return node_idx(last_node_stage_num, branching_factors)
if __name__ == "__main__":
branching_factors = [2,2,2,3]
numscens = np.prod(branching_factors)
scennames = ["Scenario"+str(i) for i in range(numscens)]
testtree = _ScenTree(branching_factors, scennames)
print("nonleaves:")
for nd in testtree.nonleaves:
print(" ", nd.name)
print("NonLeafTerminals:")
for nd in testtree.NonLeafTerminals:
print(" ", nd.name)
n_proc = 8
sntr, slices, ranks_per_scenario = testtree.scen_names_to_ranks(n_proc)
print("map:")
for ndn,v in sntr.items():
print(ndn, v)
print(f"slices: {slices}")
check4losses(numscens, branching_factors, sntr, slices, ranks_per_scenario)
| 41.507821 | 130 | 0.623122 |
8ae610866a3da8d5ceaa936b1f8b732d7443438b | 4,816 | py | Python | nilmtk_contrib/disaggregate/ModelTestS2S.py | research-at-scuiot/nilmtk-contrib | 1e9907313eaa8ab9906b8d0edaf85a8155317d82 | [
"Apache-2.0"
] | null | null | null | nilmtk_contrib/disaggregate/ModelTestS2S.py | research-at-scuiot/nilmtk-contrib | 1e9907313eaa8ab9906b8d0edaf85a8155317d82 | [
"Apache-2.0"
] | null | null | null | nilmtk_contrib/disaggregate/ModelTestS2S.py | research-at-scuiot/nilmtk-contrib | 1e9907313eaa8ab9906b8d0edaf85a8155317d82 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function, division
from nilmtk.disaggregate import Disaggregator
from keras.layers import Conv1D, Dense, Dropout, Reshape, Flatten
import pandas as pd
import numpy as np
from collections import OrderedDict
from keras.models import Sequential, load_model
from sklearn.model_selection import train_test_split
class ModelTestS2S(Disaggregator):
def __init__(self, params):
self.MODEL_NAME = "ModelTestS2S"
self.models = OrderedDict()
def partial_fit(self, train_main, train_appliances, **load_kwargs):
train_main, train_appliances = self.call_preprocessing(train_main, train_appliances, 'train')
train_main = pd.concat(train_main, axis=0)
train_main = train_main.values.reshape((-1, 99, 1))
new_train_appliances = []
for app_name, app_dfs in train_appliances:
app_df = pd.concat(app_dfs, axis=0)
app_df_values = app_df.values.reshape((-1, 99))
new_train_appliances.append((app_name, app_df_values))
train_appliances = new_train_appliances
for appliance_name, power in train_appliances:
self.models[appliance_name] = self.return_network()
model = self.models[appliance_name]
train_x, v_x, train_y, v_y = train_test_split(train_main, power, test_size=.15, random_state=10)
model.fit(train_x, train_y, validation_data=[v_x, v_y], epochs=5, batch_size=512)
def disaggregate_chunk(self, test_main_list, model=None):
test_main_list = self.call_preprocessing(test_main_list, submeters_lst=None, method='test')
test_predictions = []
for test_mains_df in test_main_list:
disggregation_dict = {}
test_main_array = test_mains_df.values.reshape((-1, 99, 1))
for appliance in self.models:
prediction = []
model = self.models[appliance]
prediction = model.predict(test_main_array, batch_size=512)
n = len(prediction) + 99 - 1
sum_arr = np.zeros((n))
counts_arr = np.zeros((n))
for i in range(len(prediction)):
sum_arr[i:i + 99] += prediction[i].flatten()
counts_arr[i:i + 99] += 1
for i in range(len(sum_arr)):
sum_arr[i] = sum_arr[i] / counts_arr[i]
prediction = 51 + (sum_arr * 83)
valid_predictions = prediction.flatten()
valid_predictions = np.where(valid_predictions > 0, valid_predictions, 0)
df = pd.Series(valid_predictions)
disggregation_dict[appliance] = df
results = pd.DataFrame(disggregation_dict, dtype='float32')
test_predictions.append(results)
return test_predictions
def return_network(self):
model = Sequential()
model.add(Conv1D(30, 10, activation="relu", input_shape=(99, 1), strides=2))
model.add(Conv1D(30, 8, activation='relu', strides=2))
model.add(Conv1D(40, 6, activation='relu', strides=1))
model.add(Conv1D(50, 5, activation='relu', strides=1))
model.add(Dropout(.2))
model.add(Conv1D(50, 5, activation='relu', strides=1))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(.2))
model.add(Dense(99))
model.compile(loss='mse', optimizer='adam')
return model
def call_preprocessing(self, mains_lst, submeters_lst, method):
processed_mains_lst = []
for mains in mains_lst:
new_mains = mains.values.flatten()
new_mains = np.pad(new_mains, (49, 49), 'constant', constant_values=(0, 0))
new_mains = np.array([new_mains[i:i + 99] for i in range(len(new_mains) - 99 + 1)])
new_mains = (new_mains - 51) / 83
processed_mains_lst.append(pd.DataFrame(new_mains))
if method == 'train':
appliance_list = []
for app_index, (app_name, app_df_lst) in enumerate(submeters_lst):
processed_app_dfs = []
for app_df in app_df_lst:
new_app_readings = app_df.values.flatten()
new_app_readings = np.pad(new_app_readings, (49, 49), 'constant', constant_values=(0, 0))
new_app_readings = np.array([new_app_readings[i:i + 99] for i in range(len(new_app_readings) - 99 + 1)])
new_app_readings = (new_app_readings - 51) / 83
processed_app_dfs.append(pd.DataFrame(new_app_readings))
appliance_list.append((app_name, processed_app_dfs))
return processed_mains_lst, appliance_list
else:
return processed_mains_lst | 50.166667 | 124 | 0.623962 |
bca52aa12957cb0172122c8e7600b77acf7ac306 | 1,378 | py | Python | halocoin/ntwrk/message.py | Globetokenllc/halocoin | cc69cac05e0489e56c84492afdfcbc54d3b02bb2 | [
"Apache-2.0"
] | 68 | 2017-07-31T17:22:12.000Z | 2022-02-14T13:27:32.000Z | halocoin/ntwrk/message.py | Globetokenllc/halocoin | cc69cac05e0489e56c84492afdfcbc54d3b02bb2 | [
"Apache-2.0"
] | 6 | 2018-11-04T09:50:58.000Z | 2022-03-25T18:21:18.000Z | halocoin/ntwrk/message.py | Globetokenllc/halocoin | cc69cac05e0489e56c84492afdfcbc54d3b02bb2 | [
"Apache-2.0"
] | 26 | 2018-01-28T23:21:51.000Z | 2022-02-22T05:39:05.000Z | import uuid
import yaml
class Order:
def __init__(self, action, args, kwargs):
self.id = uuid.uuid4()
self.action = action
self.args = args
self.kwargs = kwargs
class Response:
def __init__(self, id, answer):
self.id = id
self.answer = answer
class Message:
def __init__(self, headers=None, body=""):
if headers is None:
headers = {}
self.__headers = headers
self.__body = body
def set_header(self, key, value):
self.__headers[key] = value
def get_header(self, key):
if key in self.__headers.keys():
return self.__headers[key]
else:
return None
def get_headers(self):
return self.__headers
def set_body(self, body):
self.__body = body
def add_body(self, b):
self.__body += b
def get_body(self):
return self.__body
def __str__(self):
return yaml.dump({'headers': self.__headers,
'body': self.__body})
def __repr__(self):
return self.__body
@staticmethod
def from_yaml(string):
try:
as_dict = yaml.load(string)
except:
raise ValueError('Could not load yaml representation of arrived message')
return Message(headers=as_dict['headers'], body=as_dict['body']) | 22.225806 | 85 | 0.576923 |
84fb56ec6942543406ca350b219ad9e63024a79b | 4,600 | py | Python | examples/campaign_management/validate_text_ad.py | Insutanto/google-ads-python | f63e318ca39f2ecc6546fba69994456815727578 | [
"Apache-2.0"
] | null | null | null | examples/campaign_management/validate_text_ad.py | Insutanto/google-ads-python | f63e318ca39f2ecc6546fba69994456815727578 | [
"Apache-2.0"
] | null | null | null | examples/campaign_management/validate_text_ad.py | Insutanto/google-ads-python | f63e318ca39f2ecc6546fba69994456815727578 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example shows use of the validateOnly header for an expanded text ad.
No objects will be created, but exceptions will still be thrown.
"""
import argparse
import sys
from google.ads.google_ads.client import GoogleAdsClient
from google.ads.google_ads.errors import GoogleAdsException
def main(client, customer_id, ad_group_id):
ad_group_ad_operation = client.get_type('AdGroupAdOperation', version='v4')
ad_group_ad = ad_group_ad_operation.create
ad_group_service = client.get_service('AdGroupService', version='v4')
ad_group_ad.ad_group.value = ad_group_service.ad_group_path(customer_id,
ad_group_id)
ad_group_ad.status = client.get_type('AdGroupAdStatusEnum',
version='v4').PAUSED
# Create an expanded text ad.
ad_group_ad.ad.expanded_text_ad.description.value = 'Luxury Cruise to Mars'
ad_group_ad.ad.expanded_text_ad.headline_part1.value = (
'Visit the Red Planet in style.')
# Adds a headline that will trigger a policy violation to demonstrate error
# handling.
ad_group_ad.ad.expanded_text_ad.headline_part2.value = (
'Low-gravity fun for everyone!!')
final_url = ad_group_ad.ad.final_urls.add()
final_url.value = 'http://www.example.com/'
ad_group_ad_service = client.get_service('AdGroupAdService', version='v4')
# Attempt the mutate with validate_only=True.
try:
response = ad_group_ad_service.mutate_ad_group_ads(customer_id,
[ad_group_ad_operation], partial_failure=False, validate_only=True)
print('"Expanded text ad validated successfully.')
except GoogleAdsException as ex:
# This will be hit if there is a validation error from the server.
print(f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}".')
print('There may have been validation error(s) while adding expanded '
'text ad.')
for error in ex.failure.errors:
# Note: Depending on the ad type, you may get back policy violation
# errors as either PolicyFindingError or PolicyViolationError.
# ExpandedTextAds return errors as PolicyFindingError, so only this
# case is illustrated here. For additional details, see
# https://developers.google.com/google-ads/api/docs/policy-exemption/overview
if (error.error_code.policy_finding_error ==
client.get_type('PolicyFindingErrorEnum',
version='v4').POLICY_FINDING):
if error.details.policy_finding_details:
count = 1
details = (error.details.policy_finding_details
.policy_topic_entries)
for entry in details:
print(f'{count}) Policy topic entry: \n{entry}\n')
count += 1
else:
print(f'\tNon-policy finding error with message '
f'"{error.message}".')
if error.location:
for field_path_element in (
error.location.field_path_elements):
print(f'\t\tOn field: {field_path_element.field_name}')
sys.exit(1)
if __name__ == '__main__':
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
google_ads_client = GoogleAdsClient.load_from_storage()
parser = argparse.ArgumentParser(
description='Shows how to use the ValidateOnly header.')
# The following argument(s) should be provided to run the example.
parser.add_argument('-c', '--customer_id', type=str,
required=True, help='The Google Ads customer ID.')
parser.add_argument('-a', '--ad_group_id', type=str,
required=True, help='The Ad Group ID.')
args = parser.parse_args()
main(google_ads_client, args.customer_id, args.ad_group_id)
| 45.544554 | 89 | 0.667609 |
43d7ad52bd8f2935303dcb6aa2a3e48cf7df578c | 741 | py | Python | iris_pipeline/datamodels/flat.py | zonca/iris_pipeline | a4c20a362037a94f66427521bb5cd5da1c918dd7 | [
"BSD-3-Clause"
] | null | null | null | iris_pipeline/datamodels/flat.py | zonca/iris_pipeline | a4c20a362037a94f66427521bb5cd5da1c918dd7 | [
"BSD-3-Clause"
] | 38 | 2019-03-07T01:25:03.000Z | 2022-03-01T13:02:29.000Z | iris_pipeline/datamodels/flat.py | zonca/iris_pipeline | a4c20a362037a94f66427521bb5cd5da1c918dd7 | [
"BSD-3-Clause"
] | 1 | 2019-02-28T02:39:06.000Z | 2019-02-28T02:39:06.000Z | from .tmt_reference import TMTReferenceFileModel
from jwst.datamodels.dynamicdq import dynamic_mask
__all__ = ['TMTFlatModel']
class TMTFlatModel(TMTReferenceFileModel):
"""
A data model for 2D flat-field images.
Parameters
__________
data : numpy float32 array
The science data
dq : numpy uint32 array
Data quality array
err : numpy float32 array
Error array
dq_def : numpy table
DQ flag definitions
"""
schema_url = "flat.schema.yaml"
def __init__(self, init=None, **kwargs):
super().__init__(init=init, **kwargs)
self.dq = dynamic_mask(self)
# Implicitly create arrays
self.dq = self.dq
self.err = self.err
| 20.583333 | 50 | 0.645074 |
db8f1887733fad98ef9424dae76cc492679eebb5 | 4,591 | py | Python | src/entsoe/entsoe.py | olemagnp/entsoeAPI | 658b84c632299c8fc8f5a8289527904b2fc1a2ee | [
"MIT"
] | null | null | null | src/entsoe/entsoe.py | olemagnp/entsoeAPI | 658b84c632299c8fc8f5a8289527904b2fc1a2ee | [
"MIT"
] | null | null | null | src/entsoe/entsoe.py | olemagnp/entsoeAPI | 658b84c632299c8fc8f5a8289527904b2fc1a2ee | [
"MIT"
] | null | null | null | from typing import List, Optional
import aiohttp
import datetime
from .consts import DAY_AHEAD_DOCUMENT, DATE_FORMAT
from .xmlreader import day_ahead_price_list
from .forex import Forex
class Price:
def __init__(
self,
begin: datetime.datetime,
end: datetime.datetime,
price_orig: float,
rate: Optional[float] = None,
) -> None:
self.begin = begin
self.end = end
self.price_orig = price_orig
if rate is None:
self.price_target = None
else:
self.price_target = price_orig * rate
def __str__(self) -> str:
return f"Price [begin={self.begin}, end={self.end}, price_orig={self.price_orig}, price_target={self.price_target}]"
def __repr__(self) -> str:
return self.__str__()
class EntsoeDayAhead:
def __init__(
self,
access_token: str,
area: str,
currency: str = "auto",
measurement_unit: str = "kWh",
session: Optional[aiohttp.ClientSession] = None,
url: str = "https://transparency.entsoe.eu/api",
forex: Optional[Forex] = None,
) -> None:
self.access_token = access_token
self.area = area
self.currency = currency
self.url = url
self.session = session if session is not None else aiohttp.ClientSession()
self.forex = forex
self.original_currency = None
self.measurement_unit = None
self.start = None
self.end = None
self.resolution = None
self.exchange_rate = None
if measurement_unit.lower() == "mwh":
self.measurement_unit = "MWh"
elif measurement_unit.lower() == "kwh":
self.measurement_unit = "kWh"
elif measurement_unit.lower() == "wh":
self.measurement_unit = "Wh"
self.points: List[Price] = []
def get_unit_multiplier(self, unit):
mults = {"": 1, "k": 1e3, "m": 1e6}
unit = unit.lower()
self_unit = self.measurement_unit.lower()
if not (unit.endswith("wh") and self_unit.endswith("wh")):
raise ValueError("Both units must be multipliers of Wh")
return mults[self_unit[0]] / mults[unit[0]]
async def update(self, day: datetime.datetime):
"""
Fetch day-ahead prices.
Args:
day: The day to fetch prices. I.e., if `day` is today, prices for today will be fetched. If this datetime is naive, it is assumed to be in utc time. This will likely lead to problems with other timezones.
"""
day_before = day.replace(hour=0, minute=0, second=0, microsecond=0)
if day_before.tzinfo != datetime.timezone.utc:
if day_before.tzinfo is None:
day_before = day_before.replace(tzinfo=datetime.timezone.utc)
else:
day_before = day_before.astimezone(datetime.timezone.utc)
start_point = day_before
start_date_str = start_point.strftime(DATE_FORMAT)
end_point = start_point + datetime.timedelta(days=1)
end_date_str = end_point.strftime(DATE_FORMAT)
async with self.session.get(
self.url,
params={
"securityToken": self.access_token,
"documentType": DAY_AHEAD_DOCUMENT,
"in_Domain": self.area,
"out_Domain": self.area,
"periodStart": start_date_str,
"periodEnd": end_date_str,
},
) as response:
res = await response.read()
d = day_ahead_price_list(res)
await self._update_state(d)
async def _update_state(self, state_dict):
"""
Update state with data from entsoe
Args:
state_dict: Dict with elements read from the entsoe response.
"""
self.original_currency = state_dict["currency"]
self.start = state_dict["start"]
self.end = state_dict["end"]
self.resolution = state_dict["resolution"]
if (self.original_currency != self.currency) and (self.forex is not None):
rates = await self.forex.get_rate(self.original_currency, self.currency)
rate = rates[self.currency]
else:
rate = None
self.exchange_rate = rate
unit_mult = self.get_unit_multiplier(state_dict["measurement_unit"])
self.points = [
Price(p["start"], p["end"], p["amount"] * unit_mult, rate)
for p in state_dict["points"]
]
self.points.sort(key=lambda p: p.begin)
| 32.792857 | 216 | 0.595513 |
f8bed013a0e62e4856103d343a6a6169cdbd0ba9 | 121 | py | Python | federatedml/ftl/data_util/log_util.py | chenj133/FATE | 7065fc73ab83f83e699efec69ff8efb499159ef4 | [
"Apache-2.0"
] | 32 | 2020-06-12T08:39:58.000Z | 2022-03-20T06:57:08.000Z | federatedml/ftl/data_util/log_util.py | ErikSun2020/FATE | bdda535c7d8a974fc2c43102837964b7da199730 | [
"Apache-2.0"
] | 14 | 2019-11-13T11:25:36.000Z | 2021-12-14T21:31:59.000Z | federatedml/ftl/data_util/log_util.py | ErikSun2020/FATE | bdda535c7d8a974fc2c43102837964b7da199730 | [
"Apache-2.0"
] | 16 | 2020-06-12T06:51:46.000Z | 2022-03-29T10:23:42.000Z | def create_shape_msg(components):
msg = ""
for c in components:
msg += str(c.shape) + " "
return msg
| 20.166667 | 33 | 0.570248 |
b854e72ac1d23fb35b34405a6981f8596a56cdd5 | 1,729 | py | Python | Courses/Udacity/CS101/Lesson_3_Problem_Set/08-Sudoku/supplied/studentMain.py | leparrav/Playground | dcb90a2dd2bc1867511cfe621eb21248a60e357f | [
"Unlicense"
] | 1 | 2019-02-13T12:02:26.000Z | 2019-02-13T12:02:26.000Z | Courses/Udacity/CS101/Lesson_3_Problem_Set/08-Sudoku/supplied/studentMain.py | leparrav/Playground | dcb90a2dd2bc1867511cfe621eb21248a60e357f | [
"Unlicense"
] | 1 | 2018-08-13T15:58:33.000Z | 2018-08-13T15:58:33.000Z | Courses/Udacity/CS101/Lesson_3_Problem_Set/08-Sudoku/supplied/studentMain.py | leparrav/Playground | dcb90a2dd2bc1867511cfe621eb21248a60e357f | [
"Unlicense"
] | 2 | 2017-08-10T20:01:29.000Z | 2021-07-01T08:39:13.000Z | # THREE GOLD STARS
# Sudoku [http://en.wikipedia.org/wiki/Sudoku]
# is a logic puzzle where a game
# is defined by a partially filled
# 9 x 9 square of digits where each square
# contains one of the digits 1,2,3,4,5,6,7,8,9.
# For this question we will generalize
# and simplify the game.
# Define a procedure, check_sudoku,
# that takes as input a square list
# of lists representing an n x n
# sudoku puzzle solution and returns the boolean
# True if the input is a valid
# sudoku square and returns the boolean False
# otherwise.
# A valid sudoku square satisfies these
# two properties:
# 1. Each column of the square contains
# each of the whole numbers from 1 to n exactly once.
# 2. Each row of the square contains each
# of the whole numbers from 1 to n exactly once.
# You may assume the the input is square and contains at
# least one row and column.
correct = [[1,2,3],
[2,3,1],
[3,1,2]]
incorrect = [[1,2,3,4],
[2,3,1,3],
[3,1,2,3],
[4,4,4,4]]
incorrect2 = [[1,2,3,4],
[2,3,1,4],
[4,1,2,3],
[3,4,1,2]]
incorrect3 = [[1,2,3,4,5],
[2,3,1,5,6],
[4,5,2,1,3],
[3,4,5,2,1],
[5,6,4,3,2]]
incorrect4 = [['a','b','c'],
['b','c','a'],
['c','a','b']]
incorrect5 = [ [1, 1.5],
[1.5, 1]]
def check_sudoku():
#print check_sudoku(incorrect)
#>>> False
#print check_sudoku(correct)
#>>> True
#print check_sudoku(incorrect2)
#>>> False
#print check_sudoku(incorrect3)
#>>> False
#print check_sudoku(incorrect4)
#>>> False
#print check_sudoku(incorrect5)
#>>> False
| 20.341176 | 59 | 0.568537 |
9019eadbce473f3836d5f71ae2288569cb90edd9 | 2,242 | py | Python | tests/unit/test_plugin_manager.py | jmcgill298/flake8 | 4439ea202526b50154d287f3e581222a4c86d782 | [
"MIT"
] | null | null | null | tests/unit/test_plugin_manager.py | jmcgill298/flake8 | 4439ea202526b50154d287f3e581222a4c86d782 | [
"MIT"
] | null | null | null | tests/unit/test_plugin_manager.py | jmcgill298/flake8 | 4439ea202526b50154d287f3e581222a4c86d782 | [
"MIT"
] | null | null | null | """Tests for flake8.plugins.manager.PluginManager."""
import mock
from flake8.plugins import manager
def create_entry_point_mock(name):
"""Create a mocked EntryPoint."""
ep = mock.Mock(spec=['name'])
ep.name = name
return ep
@mock.patch('pkg_resources.iter_entry_points')
def test_calls_pkg_resources_on_instantiation(iter_entry_points):
"""Verify that we call iter_entry_points when we create a manager."""
iter_entry_points.return_value = []
manager.PluginManager(namespace='testing.pkg_resources')
iter_entry_points.assert_called_once_with('testing.pkg_resources')
@mock.patch('pkg_resources.iter_entry_points')
def test_calls_pkg_resources_creates_plugins_automaticaly(iter_entry_points):
"""Verify that we create Plugins on instantiation."""
iter_entry_points.return_value = [
create_entry_point_mock('T100'),
create_entry_point_mock('T200'),
]
plugin_mgr = manager.PluginManager(namespace='testing.pkg_resources')
iter_entry_points.assert_called_once_with('testing.pkg_resources')
assert 'T100' in plugin_mgr.plugins
assert 'T200' in plugin_mgr.plugins
assert isinstance(plugin_mgr.plugins['T100'], manager.Plugin)
assert isinstance(plugin_mgr.plugins['T200'], manager.Plugin)
@mock.patch('pkg_resources.iter_entry_points')
def test_handles_mapping_functions_across_plugins(iter_entry_points):
"""Verify we can use the PluginManager call functions on all plugins."""
entry_point_mocks = [
create_entry_point_mock('T100'),
create_entry_point_mock('T200'),
]
iter_entry_points.return_value = entry_point_mocks
plugin_mgr = manager.PluginManager(namespace='testing.pkg_resources')
plugins = [plugin_mgr.plugins[name] for name in plugin_mgr.names]
assert list(plugin_mgr.map(lambda x: x)) == plugins
@mock.patch('pkg_resources.iter_entry_points')
def test_local_plugins(iter_entry_points):
"""Verify PluginManager can load given local plugins."""
iter_entry_points.return_value = []
plugin_mgr = manager.PluginManager(
namespace='testing.pkg_resources',
local_plugins=['X = path.to:Plugin']
)
assert plugin_mgr.plugins['X'].entry_point.module_name == 'path.to'
| 35.587302 | 77 | 0.751115 |
8663391b40caff605eff5865f9f4d4f5ea30a7d7 | 5,631 | py | Python | Dijkstra-AirportConnections-Lab2/20064993_lab_2_Daniyal Maniar/modifiedDijkstras_20064993.py | DanMan259/CMPE365 | 4f9ce90e1c289f173c99eba135d0a16df70523a4 | [
"Apache-2.0"
] | 1 | 2019-09-17T00:41:42.000Z | 2019-09-17T00:41:42.000Z | Dijkstra-AirportConnections-Lab2/20064993_lab_2_Daniyal Maniar/modifiedDijkstras_20064993.py | DanMan259/CMPE365_Algorithms | 4f9ce90e1c289f173c99eba135d0a16df70523a4 | [
"Apache-2.0"
] | null | null | null | Dijkstra-AirportConnections-Lab2/20064993_lab_2_Daniyal Maniar/modifiedDijkstras_20064993.py | DanMan259/CMPE365_Algorithms | 4f9ce90e1c289f173c99eba135d0a16df70523a4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 17 00:39:59 2019
@author: Daniyal Maniar
I certify that this submission contains my own work.
The modified algorithms was based upon the pseudo code provided in the definition of Lab 1.
"""
class graph:
# This definition of a graph allows to build the network and minimize lookup to each node
def __init__(self, maxVertex):
self.maxVertex = maxVertex
self.vertexs = {}
def addNode(self, node):
# This function allows to add a node to network
self.vertexs[node.name] = node
class node:
# This definition of a node allows to set a name or number for vertex and allow for multiple connections to other nodes
def __init__(self, name):
self.name = name
self.neighbours = []
def addNeighbours(self, node, dep, arr):
# This adds a neighbout definition to each node. It includes a departure and arrival time
self.neighbours.append({"node": node,"departure": dep,"arrival":arr})
class dijkstras:
def __init__(self, graph):
# This attaches a graph to the algorithm
self.graph = graph
def reset(self):
# This initializes and resets all the required data structures
self.reached = {}
self.estimate = {}
self.candidate = {}
self.cost = {}
self.predecessor = {}
for vertex in self.graph.vertexs:
self.reached[vertex] = False
self.estimate[vertex] = float('inf')
self.candidate[vertex] = False
self.cost[vertex] = float('inf')
self.predecessor[vertex] = None
def modifiedAlgorithm(self, start, end):
# This modified algorithm minimizes the time from a starting vertex to destination vertex
# This algorithm minimizes the arrival time
self.reset()
self.cost[start] = 0
self.reached[start] = True
for neighbour in self.graph.vertexs[start].neighbours:
if (self.estimate[neighbour["node"].name] > (neighbour["arrival"])):
self.estimate[neighbour["node"].name] = neighbour["arrival"]
self.predecessor[neighbour["node"].name] = {"name":start, "arrival": neighbour["arrival"]}
self.candidate[neighbour["node"].name] = True
for _ in range(self.graph.maxVertex):
best_candidate_estimate = float('inf')
for vertex in self.graph.vertexs:
if (self.candidate[vertex] == True) and (self.estimate[vertex] < best_candidate_estimate):
v = vertex
best_candidate_estimate = self.estimate[vertex]
self.cost[v] = self.estimate[v]
self.reached[v] = True
self.candidate[v] = False
for neighbour in self.graph.vertexs[v].neighbours:
if (self.predecessor[v] and (self.predecessor[v]["arrival"] < neighbour["departure"])) or not self.predecessor[v]:
if (self.reached[neighbour["node"].name] == False):
if (neighbour["arrival"] < self.estimate[neighbour["node"].name]):
self.estimate[neighbour["node"].name] = neighbour["arrival"]
self.candidate[neighbour["node"].name] = True
self.predecessor[neighbour["node"].name] = {"name":v, "arrival": neighbour["arrival"]}
if self.reached[end]:
return self.cost[end]
return None
def printSingleOutput(self, startVertex, endingVertex):
# This prints out the minimal route from a starting to ending vertex.
# It uses a predecessor structure to build a route definition.
self.modifiedAlgorithm(startVertex,endingVertex)
print ("Optimal route from "+str(startVertex)+" to "+str(endingVertex)+":\n")
current = endingVertex
path = []
while current!= startVertex:
if not self.predecessor[current]:
break
previous = self.predecessor[current]["name"]
path.append("Fly from "+str(previous)+" to "+str(current)+".")
current = self.predecessor[current]["name"]
for i in range(len(path)):
print(path[len(path)-i-1])
if not self.predecessor[endingVertex]:
arrival = "0"
else:
arrival = str(self.predecessor[endingVertex]["arrival"])
print("\nArrive at "+str(endingVertex)+" at time "+arrival+".\n")
def saveAllOutputs(self):
# This prints out all of the combinations of starting and ending vertex
for i in range(self.graph.maxVertex):
for j in range(self.graph.maxVertex):
self.printSingleOutput(i,j)
if __name__ == '__main__':
# Open the file and read the data
with open("2019_Lab_2_flights_real_data.txt") as f:
s = f.read()
s = s.strip()
s = s.splitlines()
# Set the first line to be the size of graph and remove it from the array
testGraph = graph(int(s.pop(0)))
# Add all the nodes to the graph
for i in range(testGraph.maxVertex):
testGraph.addNode(node(i))
# Add all the neighbours for each node
for i in s:
i = i.split()
testGraph.vertexs[int(i[0])].addNeighbours(testGraph.vertexs[int(i[1])], int(i[2]), int(i[3]))
# Initialize the class
testingAlgo = dijkstras(testGraph)
# Print Output for test case
startVertex = 93
endingVertex = 49
testingAlgo.printSingleOutput(startVertex,endingVertex)
#testingAlgo.saveAllOutputs()
| 44.690476 | 130 | 0.609483 |
e87136101cf28498d759f5d3430b83a4857780d5 | 22,435 | py | Python | uws_server/provenance.py | mservillat/uws-server | 4f555bfbb18f31ae0a41f8bbaa92bf9fb4a964d5 | [
"MIT"
] | 1 | 2016-04-06T11:00:10.000Z | 2016-04-06T11:00:10.000Z | uws_server/provenance.py | mservillat/uws_server | 4f555bfbb18f31ae0a41f8bbaa92bf9fb4a964d5 | [
"MIT"
] | null | null | null | uws_server/provenance.py | mservillat/uws_server | 4f555bfbb18f31ae0a41f8bbaa92bf9fb4a964d5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2016 by Mathieu Servillat
# Licensed under MIT (https://github.com/mservillat/uws-server/blob/master/LICENSE)
"""
Export UWS job description to a ProvDocument following the W3C PROV standard
"""
import prov
from voprov.models.model import VOProvDocument, VOProvBundle, VOPROV, PROV
# from prov.model import ProvDocument, ProvBundle
from voprov.visualization.dot import prov_to_dot
from pydotplus.graphviz import InvocationException
from .settings import *
from . import uws_classes
# examples:
# http://prov.readthedocs.org/en/latest/usage.html#simple-prov-document
# http://lists.g-vo.org/pipermail/prov-adhoc/2015-June/000025.html
INTERNAL_PROVENANCE_FILENAME = "internal_provenance.json"
def job2prov(jobid, user, depth=1, direction='BACK', members=0, agents=1, model='IVOA',
descriptions=0, configuration=1, attributes=1,
show_used=False, show_generated=False):
"""
Create ProvDocument based on job description
:param jobid: UWS job
:param user: current user
:return: ProvDocument
"""
# job.jdl.content = {
# 'description': description,
# 'parameters': parameters,
# 'results': results,
# 'executionduration': execdur,
# 'quote': quote
# }
# parameters[pname] = {
# 'type': p.get('type'),
# 'required': p.get('required'),
# 'default': p.get('default'),
# 'description': list(p)[0].text,
# }
# results[rname] = {
# 'content_type': r.get('content_type'),
# 'default': r.get('default'),
# 'description': list(r)[0].text,
# }
# Init
pdoc = VOProvDocument()
other_pdocs = []
w3c = False
if model == 'W3C':
w3c = True
# Load job
job = uws_classes.Job('', jobid, user, get_attributes=True, get_parameters=True, get_results=True)
# Load JDL
job.jdl.read(job.jobname, jobid=job.jobid)
# Declaring namespaces for various prefixes used in the example
pdoc.set_default_namespace(VOPROV.uri)
pdoc.add_namespace('voprov', VOPROV.uri)
pdoc.add_namespace('prov', PROV.uri)
pdoc.add_namespace('foaf', 'http://xmlns.com/foaf/0.1/')
pdoc.add_namespace('uws', 'http://www.ivoa.net/xml/UWS/v1.1#')
pdoc.add_namespace('opus_user', BASE_URL + '/user/')
ns_result = 'opus_store'
pdoc.add_namespace(ns_result, BASE_URL + '/store/?ID=')
pdoc.add_namespace('opus_job', BASE_URL + '/rest/')
pdoc.add_namespace('opus_jdl', BASE_URL + '/jdl/')
pdoc.add_namespace('media-type', 'https://www.w3.org/ns/iana/media-types/')
ns_jdl = job.jobname
pdoc.add_namespace(ns_jdl, BASE_URL + '/jdl/' + job.jobname + '/votable#')
# ns_job = job.jobname + '/' + job.jobid
# pdoc.add_namespace(ns_job, BASE_URL + '/jdl/' + job.jobname + '/votable#')
# Activity
act_id = 'opus_job:' + job.jobname + '/' + job.jobid
act = pdoc.activity(act_id, startTime=job.start_time, endTime=job.end_time)
act.add_attributes({
'prov:label': job.jobname, # + '/' + job.jobid,
})
# Descriptions
if descriptions:
adescid = '#' + job.jobname + "#description"
adescbundle = pdoc.bundle(adescid)
setattr(adescbundle, "_label", adescid)
# ActivityDescription
adesc = adescbundle.activityDescription('opus_jdl:' + job.jobname, job.jobname)
adesc.add_attributes({
'prov:label': job.jobname,
})
pdoc.isDescribedBy(act, adesc) #, other_attributes={
# 'prov:type': 'voprov:Description',
#})
adattrs = {}
for pkey in ['name', 'annotation', 'version', 'type', 'subtype', 'doculink']:
pvalue = job.jdl.content.get(pkey)
if pvalue:
if pkey == 'annotation':
adattrs['voprov:description'] = pvalue
else:
adattrs['voprov:' + pkey] = pvalue
for pkey in ['executionDuration', 'quote']:
pvalue = job.jdl.content.get(pkey)
if pvalue:
adattrs['uws:' + pkey] = pvalue
adesc.add_attributes(adattrs)
if descriptions > 1:
# UsageDescription
uds = []
for ename, edict in job.jdl.content.get('used', {}).items():
ed = ""
edattrs = {}
for ekey, evalue in edict.items():
if evalue and ekey not in ['default']:
if evalue:
if ekey == 'content_type':
# EntityDescription
ed = 'media-type:' + evalue
pdoc.entityDescription(ed, evalue, other_attributes={'prov:label': evalue})
elif ekey == 'annotation':
edattrs['voprov:description'] = evalue
else:
edattrs['voprov:' + ekey] = evalue
edattrs['prov:label'] = ename
uds.append(adescbundle.usageDescription('opus_jdl:' + job.jobname + '#' + ename, adesc, ename))
uds[-1].add_attributes(edattrs)
if ed:
pdoc.isRelatedTo(ed, uds[-1])
# GenerationDescription
gds = []
for ename, edict in job.jdl.content.get('generated', {}).items():
ed = ""
edattrs = {}
for ekey, evalue in edict.items():
if evalue and ekey not in ['default']:
if evalue:
if ekey == 'content_type':
# EntityDescription
ed = 'media-type:' + evalue
pdoc.entityDescription(ed, evalue, other_attributes={'prov:label': evalue})
elif ekey == 'annotation':
edattrs['voprov:description'] = evalue
else:
edattrs['voprov:' + ekey] = evalue
edattrs['prov:label'] = ename
gds.append(adescbundle.generationDescription('opus_jdl:' + job.jobname + '#' + ename, adesc, ename))
gds[-1].add_attributes(edattrs)
if ed:
pdoc.isRelatedTo(ed, gds[-1])
# Configuration descriptions
if configuration:
# ParameterDescription
pds = []
for pname, pdict in job.jdl.content.get('parameters', {}).items():
pdattrs = {}
for pkey, pvalue in pdict.items():
if pvalue:
if pkey == 'annotation':
pdattrs['voprov:description'] = pvalue
else:
pdattrs['voprov:' + pkey] = pvalue
pdattrs['prov:label'] = pname
pdname = 'opus_jdl:' + job.jobname + '#' + pname
pds.append(adescbundle.parameterDescription(pdname, adesc, pname, pdict.get('type', 'char')))
pds[-1].add_attributes(pdattrs)
# Agent: contact for the job in ActivityDescription
if agents:
contact_name = job.jdl.content.get('contact_name')
contact_email = job.jdl.content.get('contact_email')
if contact_email and not contact_name:
contact_name = contact_email
if contact_name:
# Is contact name in the server user list?
contact_id = contact_name
users_dicts = job.storage.get_users()
users = [u['name'] for u in users_dicts]
if contact_id in users:
contact_id = 'opus_user:' + contact_id
contact = pdoc.agent(contact_id)
contact.add_attributes({
'prov:label': contact_name,
#'foaf:name': contact_name,
})
if contact_email:
contact.add_attributes({
'foaf:mbox': "<mailto:{}>".format(contact_email)
})
# Link to ActivityDescription
pdoc.influence(adesc, contact, other_attributes={
'prov:role': 'contact'
})
# Agent: owner of the job
if agents:
owner = pdoc.agent('opus_user:' + job.owner)
owner.add_attributes({
'prov:label': job.owner,
#'foaf:name': job.owner,
})
act.wasAssociatedWith(owner, attributes={
'prov:role': 'owner'
})
# Parameters
if configuration:
# Add Parameter Collection?
# all_params = pdoc.collection('opus_job:' + job.jobname + '/' + job.jobid + '/parameters')
aconfid = '#' + job.jobid + '#configuration' # + '/' + job.jobid + '/parameters'
aconfbundle = pdoc.bundle(aconfid)
setattr(aconfbundle, "_label", aconfid)
params = []
for pname, pdict in job.jdl.content.get('parameters', {}).items():
# Add Parameter
if pname in job.parameters:
# the parameter was defined for this activity
value = job.parameters[pname]['value']
else:
# the default value was used
value = pdict['default']
str_value = str(value)
show_value = (str_value[:25] + '...') if len(str_value) > 25 else str_value
pattrs = {
'prov:label': pname + " = " + show_value,
}
params.append(aconfbundle.parameter('opus_job:' + job.jobname + '/' + job.jobid + '/parameters/' + pname, pname, value))
params[-1].add_attributes(pattrs)
# Activity-Parameter relation
pdoc.wasConfiguredBy(act, params[-1], "Parameter")
# Link to ParameterDescription
if descriptions > 1:
pdname = 'opus_jdl:' + job.jobname + '#' + pname
pdoc.isDescribedBy(params[-1], pdname)
else:
# Add attributes to the parameter directly
pdattrs = {}
for pkey, pvalue in pdict.items():
if pvalue:
if pkey == 'annotation':
pdattrs['voprov:description'] = pvalue
else:
pdattrs['voprov:' + pkey] = pvalue
params[-1].add_attributes(pdattrs)
# Member of Collection
# all_params.hadMember(params[-1])
# Activity-Collection relation
# pdoc.influence(act, all_params)
logger.debug(pdoc._bundles)
# Used entities
used_entities = []
if (depth != 0 and direction == 'BACK') or show_used:
# Explore used entities for the activity if depth > 0
e_in = []
for pname, pdict in job.jdl.content.get('used', {}).items():
# Look for value and entity record and get pqn (local id)
value = job.parameters.get(pname, {}).get('value', '')
label = pname
entity_id = job.parameters.get(pname, {}).get('entity_id', None)
logger.debug('Search for entity {} (pname={}, value={})'.format(entity_id, pname, value))
entity = job.storage.get_entity(entity_id, silent=True)
if entity:
# Entity recorded in DB
used_entities.append(entity_id)
pqns = [ns_result + ':' + entity_id]
label = entity['file_name']
location = entity['access_url']
logger.debug('Input entity found: {}'.format(entity))
elif '//' in value:
# Entity is a file or a URL (not a value or an ID)
pqns = [value.split('//')[-1]] # removes file:// if present, or longer path
used_entities.append(pqns[0])
location = value
logger.debug('No record found for input entity {}={}, assuming it is a file or a URL'.format(pname, value))
else:
# Entity is a value or an ID
location = None
if '*' in pdict['multiplicity'] or int(pdict['multiplicity']) > 1:
sep = pdict.get('separator', ' ')
if ',' in value:
sep = ','
pqns = value.split(sep)
else:
pqns = [value]
# For each entity corresponding to this role
for pqn in pqns:
# Add Entity
e_in.append(pdoc.entity(pqn))
e_in[-1].add_attributes({
'prov:label': label,
# 'prov:value': value,
'prov:location': location,
# 'prov:type': pdict['datatype'],
})
# Add Used relation
act.used(e_in[-1], attributes={
'prov:role': pname
})
# Link to description
ed = ""
if 'content_type' in pdict:
ed = 'media-type:' + pdict['content_type']
if descriptions > 1 and ed:
pdoc.isDescribedBy(pqn, ed)
# Explores entity origin if known entity and depth > 1
if entity and depth != 1 and direction == 'BACK':
if depth != 1 and entity.get('jobid'):
other_pdocs.append(
job2prov(
entity['jobid'], job.user,
depth=depth-2, direction=direction, members=members, agents=agents, model=model,
descriptions=descriptions, configuration=configuration,
show_generated=True
)
)
# Generated entities (if depth > 0)
if (depth != 0 and direction == 'FORWARD') or show_generated:
# Check if internal provenance is given, add as a generated bundle? or directly?
ipfile = os.path.join(JOBDATA_PATH, jobid, INTERNAL_PROVENANCE_FILENAME)
ipbundle = None
if os.path.isfile(ipfile):
ipdoc = prov.read(ipfile)
ipid = "#" + job.jobid + "#internal_provenance"
ipbundle = VOProvBundle(namespaces=ipdoc.namespaces, identifier=ipid)
setattr(ipbundle, "_label", ipid)
#inpbundle._identifier = "id:" + job.jobid + "_prov"
ipbundle.update(ipdoc)
#inpprov = inpdoc.bundle(job.jobid + "_prov")
pdoc.add_bundle(ipbundle)
pdoc.wasGeneratedBy(ipbundle.identifier, act)
#pdoc.update(inpdoc)
#for rec in inpdoc.get_records():
# if "Activity" in str(rec.get_type()):
# inf_act = pdoc.get_record(rec.identifier)[0]
# inf_act.wasInformedBy(act)
# Add job results as entities
e_out = []
for rname in job.results:
if rname not in ['stdout', 'stderr', 'provjson', 'provxml', 'provsvg']:
entity_id = job.results[rname]['entity_id']
# entity_id = job.jobid + '_' + rname
# if entity_id:
entity = job.storage.get_entity(entity_id, silent=True)
if entity:
entity_id = entity['entity_id']
rqn = ns_result + ':' + entity_id
content_type = entity['content_type']
else:
entity_id = rname
rqn = ':' + entity_id
content_type = job.results[rname]['content_type']
# Only show result if it is not already used (case of config files sometimes)
if entity_id not in used_entities:
# Add Entity
e_out.append(pdoc.entity(rqn))
eattrs = {
'prov:location': job.results[rname]['url'],
'voprov:content_type': content_type,
}
pdict = {}
if entity:
eattrs['prov:label'] = entity['file_name']
eattrs['voprov:result_name'] = entity['result_name']
pdict = job.jdl.content['generated'].get(entity['result_name'], {})
if not 'prov:label' in eattrs:
eattrs['prov:label'] = rname
e_out[-1].add_attributes(eattrs)
# Add Generation relation
e_out[-1].wasGeneratedBy(act, attributes={
'prov:role': rname,
})
#for e in e_in:
# e_out[-1].wasDerivedFrom(e)
#if agent:
# e_out[-1].wasAttributedTo(owner, attributes={
# 'prov:role': 'owner',
# })
# Add derivation link from internal provenance
if entity.get("from_entity", None) and ipbundle:
e_from = ipbundle.get_record(entity["from_entity"])[0]
e_out[-1].wasDerivedFrom(e_from)
#copy_act = pdoc.activity(act_id + '_copy_to_datastore', other_attributes={"prov:label": "copy_to_datastore"})
#copy_act.wasInformedBy(act)
#copy_act.used(entity["from_entity"])
#e_out[-1].wasGeneratedBy(copy_act)
# Add EntityDescription if exists
if pdict and descriptions > 1:
if 'content_type' in pdict:
ed = 'media-type:' + pdict['content_type']
pdoc.isDescribedBy(rqn, ed)
# Search forward for activities that used this entity
if entity and depth != 1 and direction == 'FORWARD':
used_query = job.storage.session.query(job.storage.Used).filter_by(entity_id=entity_id)
used_rows = used_query.all()
for row in used_rows:
other_pdocs.append(
job2prov(
row.jobid, job.user,
depth=depth-2, direction=direction, members=members, agents=agents, model=model,
descriptions=descriptions, configuration=configuration,
show_used=True
)
)
# Merge all prov documents
for opdoc in other_pdocs:
pdoc.update(opdoc)
pdoc = pdoc.unified()
# Filter similar relations
pdoc = pdoc.unified_relations()
if w3c:
pdoc = pdoc.get_w3c()
return pdoc
def unified_relations(bundle):
hash_records = []
if bundle.is_document():
for subbundle in bundle._bundles:
if not hasattr(bundle._bundles[subbundle], "_label"):
setattr(bundle._bundles[subbundle], "_label", "")
bundle._bundles[subbundle] = unified_relations(bundle._bundles[subbundle])
for record in bundle._records:
if record.is_relation():
hash_records.append(hash(str(record)))
else:
hash_records.append(hash(record))
for hash_record in list(set(hash_records)):
while hash_records.count(hash_record) > 1:
rec_index = hash_records.index(hash_record)
bundle._records.pop(rec_index)
hash_records.pop(rec_index)
return bundle
def prov2json(prov_doc, fname):
"""
Write ProvDocument as an JSON file
:param prov_doc: ProvDocument
:param fname: file name
:return:
"""
prov_doc.serialize(fname, format='json')
def prov2xml(prov_doc, fname):
"""
Write ProvDocument as an XML file
:param prov_doc: ProvDocument
:param fname: file name
:return:
"""
prov_doc.serialize(fname, format='xml')
def prov2dot(prov_doc, attributes=True, direction='BT'):
"""
Convert ProvDocument to dot graphical format
:param prov_doc:
:return:
"""
dot = prov_to_dot(prov_doc, use_labels=True, show_element_attributes=attributes, show_relation_attributes=attributes, direction=direction)
return dot
def prov2svg(prov_doc, fname):
"""
Convert ProvDocument to dot graphical format
:param prov_doc:
:param fname: file name
:return:
"""
try:
dot = prov2dot(prov_doc)
svg_content = dot.create(format="svg")
except InvocationException as e:
svg_content = '''
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg xmlns="http://www.w3.org/2000/svg" version="1.0"
width="38" height="32" viewBox="0 0 39.875 33.6667">
<path style="stroke: none; fill: #323296;" d="M 10,0 L 30.5,0 39.875,17.5 30.5,33.6667 10,33.6667 L 0,17.5 L 10,0 z"/>
</svg>
'''
with open(fname, "wb") as f:
f.write(svg_content)
def prov2svg_content(prov_doc, attributes=True, direction='BT'):
"""
Convert ProvDocument to dot graphical format then svg
:param prov_doc:
:return:
"""
try:
dot = prov2dot(prov_doc, attributes=attributes, direction=direction)
svg_content = dot.create(format="svg")
except InvocationException as e:
svg_content = '''
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg xmlns="http://www.w3.org/2000/svg" version="1.0"
width="38" height="32" viewBox="0 0 39.875 33.6667">
<path style="stroke: none; fill: #323296;" d="M 10,0 L 30.5,0 39.875,17.5 30.5,33.6667 10,33.6667 L 0,17.5 L 10,0 z"/>
</svg>
'''
return svg_content
def prov2png_content(prov_doc, attributes=True, direction='BT'):
"""
Convert ProvDocument to dot graphical format then png
:param prov_doc:
:return:
"""
try:
dot = prov2dot(prov_doc, attributes=attributes, direction=direction)
png_content = dot.create(format="png")
except InvocationException as e:
png_content = ""
return png_content
| 42.171053 | 142 | 0.526499 |
84e23ebf04bd79873abc9d5988a2bb631ace9bec | 4,571 | py | Python | implementations/VAE/utils.py | STomoya/animeface | 37b3cd26097d7874559d4c152e41e5712b7a1a42 | [
"MIT"
] | 61 | 2020-06-06T08:25:09.000Z | 2022-03-28T13:30:10.000Z | implementations/VAE/utils.py | OrigamiXx/animeface | 8724006df99ba7ef369e837d8294350ea733611b | [
"MIT"
] | 13 | 2020-07-02T02:41:14.000Z | 2021-05-09T14:24:58.000Z | implementations/VAE/utils.py | OrigamiXx/animeface | 8724006df99ba7ef369e837d8294350ea733611b | [
"MIT"
] | 8 | 2020-10-03T18:51:16.000Z | 2022-02-05T18:18:01.000Z |
import functools
from utils.argument import add_args
import torch
import torch.nn as nn
import torch.optim as optim
from torch.cuda.amp import autocast, GradScaler
from torchvision.utils import save_image
from dataset import AnimeFace, DanbooruPortrait
from utils import Status, save_args, add_args
from nnutils import sample_nnoise, get_device
from .model import VAE, init_weight
recons = nn.MSELoss(reduction='sum')
def KL_divergence(mu, logvar):
return - 0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
def train(
dataset, max_iter, test_sampler,
model, optimizer,
device, amp, save=1000
):
status = Status(max_iter)
scaler = GradScaler() if amp else None
while status.batches_done < max_iter:
for src in dataset:
optimizer.zero_grad()
src = src.to(device)
with autocast(amp):
# VAE(rsc)
dst, _, mu, logvar = model(src)
# loss
recons_loss = recons(dst, src)
kld = KL_divergence(mu, logvar)
loss = recons_loss + kld
if scaler is not None:
scaler.scale(loss).backward()
scaler.step(optimizer)
else:
loss.backward()
optimizer.step()
# save
if status.batches_done % save == 0:
model.eval()
with torch.no_grad():
images = model.decoder(test_sampler())
model.train()
save_image(
images, f'implementations/VAE/result/{status.batches_done}.jpg',
nrow=4, normalize=True, value_range=(-1, 1))
recons_images = _image_grid(src, dst)
save_image(
recons_images, f'implementations/VAE/result/recons_{status.batches_done}.jpg',
nrow=6, normalize=True, value_range=(-1, 1))
torch.save(model.state_dict(), f'implementations/VAE/result/model_{status.batches_done}.pt')
# updates
loss_dict = dict(
loss=loss.item() if not torch.isnan(loss).any() else 0
)
status.update(**loss_dict)
if scaler is not None:
scaler.update()
if status.batches_done == max_iter:
break
status.plot_loss()
def _image_grid(src, dst, num_images=6):
srcs = src.chunk(src.size(0), dim=0)
dsts = dst.chunk(dst.size(0), dim=0)
images = []
for index, (src, dst) in enumerate(zip(srcs, dsts)):
images.extend([src, dst])
if index == num_images - 1:
break
return torch.cat(images, dim=0)
def main(parser):
parser = add_args(parser,
dict(
image_channels = [3, 'number of channels in input images'],
z_dim = [256, 'dimension of extracted feature vector z'],
channels = [32, 'channel width multiplier'],
max_channels = [1024, 'maximum channels'],
enc_target_resl = [4, 'resolution to dwonsample to before faltten'],
disable_bias = [False, 'do not use bias'],
norm_name = ['bn', 'normalization layer name'],
act_name = ['relu', 'activation function name'],
lr = [0.0002, 'learning rate'],
beta1 = [0.9, 'beta1'],
beta2 = [0.999, 'beta2'],
test_images = [16, 'number of images for evaluation']))
args = parser.parse_args()
save_args(args)
use_bias = not args.disable_bias
betas = (args.beta1, args.beta2)
amp = not args.disable_amp
device = get_device(not args.disable_gpu)
# dataset
dataset = AnimeFace.asloader(
args.batch_size, (args.image_size, args.min_year),
pin_memory=not args.disable_gpu)
test_sampler = functools.partial(
sample_nnoise, size=(args.test_images, args.z_dim), device=device
)
if args.max_iters < 0:
args.max_iters = len(dataset) * args.default_epochs
model = VAE(
args.image_size, args.z_dim, args.image_channels,
args.channels, args.max_channels, args.enc_target_resl,
use_bias, args.norm_name, args.act_name
)
# model.apply(init_weight)
model.to(device)
# optimizer
optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=betas)
train(
dataset, args.max_iters, test_sampler,
model, optimizer,
device, amp, 1000
)
| 32.190141 | 108 | 0.574491 |
070ce44832b267d9a562ce775e0c5c72e2b9de8b | 2,559 | py | Python | src/utils/model.py | hahmadraz/SqueezeDet-MultiHead | db8a9c19c633271eb1c90447558e14f4cd98f670 | [
"MIT"
] | 17 | 2020-04-20T06:16:07.000Z | 2022-02-28T13:04:04.000Z | src/utils/model.py | ELanning/SqueezeDet-PyTorch | b0d80daa0147c968d43f3c38bc416394419c4bcc | [
"MIT"
] | 8 | 2020-06-15T08:55:32.000Z | 2022-03-12T00:24:35.000Z | src/utils/model.py | ELanning/SqueezeDet-PyTorch | b0d80daa0147c968d43f3c38bc416394419c4bcc | [
"MIT"
] | 9 | 2020-06-09T07:17:07.000Z | 2022-03-04T05:42:02.000Z | import torch
import torch.nn as nn
def load_model(model, model_path):
checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)
print('loaded model {}, epoch {}'.format(model_path, checkpoint['epoch']))
state_dict_ = checkpoint['state_dict']
state_dict = {}
for k in state_dict_:
if k.startswith('module') and not k.startswith('module_list'):
state_dict[k[7:]] = state_dict_[k]
else:
state_dict[k] = state_dict_[k]
model_state_dict = model.state_dict()
# check loaded parameters and created model parameters
success_loaded = True
for layer in state_dict:
if layer in model_state_dict:
if state_dict[layer].shape != model_state_dict[layer].shape:
success_loaded = False
print('Skip loading param {}, required shape{}, loaded shape{}.'.format(
layer, model_state_dict[layer].shape, state_dict[layer].shape))
state_dict[layer] = model_state_dict[layer]
else:
success_loaded = False
print('Drop param {} in pre-trained model.'.format(layer))
for layer in model_state_dict:
if layer not in state_dict:
success_loaded = False
print('Param {} not found in pre-trained model.'.format(layer))
state_dict[layer] = model_state_dict[layer]
model.load_state_dict(state_dict, strict=False)
print('Model successfully loaded.' if success_loaded else
'The model does not fully load the pre-trained weight.')
return model
def load_official_model(model, model_path):
"""
load official models from https://pytorch.org/docs/stable/torchvision/models.html
:param model:
:param model_path:
:return:
"""
state_dict = torch.load(model_path, map_location=lambda storage, loc: storage)
layers = list(state_dict.keys())
for layer in layers:
new_layer = 'base.' + layer
state_dict[new_layer] = state_dict.pop(layer)
checkpoint = {'epoch': 0,
'state_dict': state_dict}
converted_model_path = model_path.replace('.pth', '_converted.pth')
torch.save(checkpoint, converted_model_path)
return load_model(model, converted_model_path)
def save_model(model, path, epoch):
if isinstance(model, torch.nn.DataParallel):
state_dict = model.module.state_dict()
else:
state_dict = model.state_dict()
data = {'epoch': epoch,
'state_dict': state_dict}
torch.save(data, path)
| 35.541667 | 88 | 0.651817 |
923666a69090bd364e7650a41cfb6b6978dd0b98 | 12,546 | py | Python | models/vnect_model.py | cDenius/VNect-tensorflow | 9bc2102359bd52e05be4fcce67670ebf1f9f5fac | [
"Apache-2.0"
] | null | null | null | models/vnect_model.py | cDenius/VNect-tensorflow | 9bc2102359bd52e05be4fcce67670ebf1f9f5fac | [
"Apache-2.0"
] | null | null | null | models/vnect_model.py | cDenius/VNect-tensorflow | 9bc2102359bd52e05be4fcce67670ebf1f9f5fac | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
import tensorflow.contrib as tc
import pickle
import numpy as np
class VNect():
def __init__(self, input_size):
self.is_training = False
self.input_holder = tf.placeholder(dtype=tf.float32,
shape=(None, input_size, input_size, 3))
self._create_network()
def _create_network(self):
# Conv
self.conv1 = tc.layers.conv2d(self.input_holder, kernel_size=7, num_outputs=64, stride=2, scope='conv1')
self.pool1 = tc.layers.max_pool2d(self.conv1, kernel_size=3, padding='same', scope='pool1')
# Residual block 2a
self.res2a_branch2a = tc.layers.conv2d(self.pool1, kernel_size=1, num_outputs=64, scope='res2a_branch2a')
self.res2a_branch2b = tc.layers.conv2d(self.res2a_branch2a, kernel_size=3, num_outputs=64, scope='res2a_branch2b')
self.res2a_branch2c = tc.layers.conv2d(self.res2a_branch2b, kernel_size=1, num_outputs=256, activation_fn=None, scope='res2a_branch2c')
self.res2a_branch1 = tc.layers.conv2d(self.pool1, kernel_size=1, num_outputs=256, activation_fn=None, scope='res2a_branch1')
self.res2a = tf.add(self.res2a_branch2c, self.res2a_branch1, name='res2a_add')
self.res2a = tf.nn.relu(self.res2a, name='res2a')
# Residual block 2b
self.res2b_branch2a = tc.layers.conv2d(self.res2a, kernel_size=1, num_outputs=64, scope='res2b_branch2a')
self.res2b_branch2b = tc.layers.conv2d(self.res2b_branch2a, kernel_size=3, num_outputs=64, scope='res2b_branch2b')
self.res2b_branch2c = tc.layers.conv2d(self.res2b_branch2b, kernel_size=1, num_outputs=256, activation_fn=None, scope='res2b_branch2c')
self.res2b = tf.add(self.res2b_branch2c, self.res2a, name='res2b_add')
self.res2b = tf.nn.relu(self.res2b, name='res2b')
# Residual block 2c
self.res2c_branch2a = tc.layers.conv2d(self.res2b, kernel_size=1, num_outputs=64, scope='res2c_branch2a')
self.res2c_branch2b = tc.layers.conv2d(self.res2b_branch2a, kernel_size=3, num_outputs=64, scope='res2c_branch2b')
self.res2c_branch2c = tc.layers.conv2d(self.res2b_branch2b, kernel_size=1, num_outputs=256, activation_fn=None, scope='res2c_branch2c')
self.res2c = tf.add(self.res2c_branch2c, self.res2b, name='res2c_add')
self.res2c = tf.nn.relu(self.res2b, name='res2c')
# Residual block 3a
self.res3a_branch2a = tc.layers.conv2d(self.res2c, kernel_size=1, num_outputs=128, stride=2, scope='res3a_branch2a')
self.res3a_branch2b = tc.layers.conv2d(self.res3a_branch2a, kernel_size=3, num_outputs=128, scope='res3a_branch2b')
self.res3a_branch2c = tc.layers.conv2d(self.res3a_branch2b, kernel_size=1, num_outputs=512, activation_fn=None,scope='res3a_branch2c')
self.res3a_branch1 = tc.layers.conv2d(self.res2c, kernel_size=1, num_outputs=512, activation_fn=None, stride=2, scope='res3a_branch1')
self.res3a = tf.add(self.res3a_branch2c, self.res3a_branch1, name='res3a_add')
self.res3a = tf.nn.relu(self.res3a, name='res3a')
# Residual block 3b
self.res3b_branch2a = tc.layers.conv2d(self.res3a, kernel_size=1, num_outputs=128, scope='res3b_branch2a')
self.res3b_branch2b = tc.layers.conv2d(self.res3b_branch2a, kernel_size=3, num_outputs=128,scope='res3b_branch2b')
self.res3b_branch2c = tc.layers.conv2d(self.res3b_branch2b, kernel_size=1, num_outputs=512, activation_fn=None,scope='res3b_branch2c')
self.res3b = tf.add(self.res3b_branch2c, self.res3a, name='res3b_add')
self.res3b = tf.nn.relu(self.res3b, name='res3b')
# Residual block 3c
self.res3c_branch2a = tc.layers.conv2d(self.res3b, kernel_size=1, num_outputs=128, scope='res3c_branch2a')
self.res3c_branch2b = tc.layers.conv2d(self.res3c_branch2a, kernel_size=3, num_outputs=128,scope='res3c_branch2b')
self.res3c_branch2c = tc.layers.conv2d(self.res3c_branch2b, kernel_size=1, num_outputs=512, activation_fn=None,scope='res3c_branch2c')
self.res3c = tf.add(self.res3c_branch2c, self.res3b, name='res3c_add')
self.res3c = tf.nn.relu(self.res3c, name='res3c')
# Residual block 3d
self.res3d_branch2a = tc.layers.conv2d(self.res3c, kernel_size=1, num_outputs=128, scope='res3d_branch2a')
self.res3d_branch2b = tc.layers.conv2d(self.res3d_branch2a, kernel_size=3, num_outputs=128,scope='res3d_branch2b')
self.res3d_branch2c = tc.layers.conv2d(self.res3d_branch2b, kernel_size=1, num_outputs=512, activation_fn=None,scope='res3d_branch2c')
self.res3d = tf.add(self.res3d_branch2c, self.res3b, name='res3d_add')
self.res3d = tf.nn.relu(self.res3d, name='res3d')
# Residual block 4a
self.res4a_branch2a = tc.layers.conv2d(self.res3d, kernel_size=1, num_outputs=256, stride=2, scope='res4a_branch2a')
self.res4a_branch2b = tc.layers.conv2d(self.res4a_branch2a, kernel_size=3, num_outputs=256,scope='res4a_branch2b')
self.res4a_branch2c = tc.layers.conv2d(self.res4a_branch2b, kernel_size=1, num_outputs=1024, activation_fn=None,scope='res4a_branch2c')
self.res4a_branch1 = tc.layers.conv2d(self.res3d, kernel_size=1, num_outputs=1024, activation_fn=None, stride=2, scope='res4a_branch1')
self.res4a = tf.add(self.res4a_branch2c, self.res4a_branch1, name='res4a_add')
self.res4a = tf.nn.relu(self.res4a, name='res4a')
# Residual block 4b
self.res4b_branch2a = tc.layers.conv2d(self.res4a, kernel_size=1, num_outputs=256, scope='res4b_branch2a')
self.res4b_branch2b = tc.layers.conv2d(self.res4b_branch2a, kernel_size=3, num_outputs=256, scope='res4b_branch2b')
self.res4b_branch2c = tc.layers.conv2d(self.res4b_branch2b, kernel_size=1, num_outputs=1024, activation_fn=None, scope='res4b_branch2c')
self.res4b = tf.add(self.res4b_branch2c, self.res4a, name='res4b_add')
self.res4b = tf.nn.relu(self.res4b, name='res4b')
# Residual block 4c
self.res4c_branch2a = tc.layers.conv2d(self.res4b, kernel_size=1, num_outputs=256, scope='res4c_branch2a')
self.res4c_branch2b = tc.layers.conv2d(self.res4c_branch2a, kernel_size=3, num_outputs=256, scope='res4c_branch2b')
self.res4c_branch2c = tc.layers.conv2d(self.res4c_branch2b, kernel_size=1, num_outputs=1024, activation_fn=None, scope='res4c_branch2c')
self.res4c = tf.add(self.res4c_branch2c, self.res4b, name='res4c_add')
self.res4c = tf.nn.relu(self.res4c, name='res4c')
# Residual block 4d
self.res4d_branch2a = tc.layers.conv2d(self.res4c, kernel_size=1, num_outputs=256, scope='res4d_branch2a')
self.res4d_branch2b = tc.layers.conv2d(self.res4d_branch2a, kernel_size=3, num_outputs=256, scope='res4d_branch2b')
self.res4d_branch2c = tc.layers.conv2d(self.res4d_branch2b, kernel_size=1, num_outputs=1024, activation_fn=None, scope='res4d_branch2c')
self.res4d = tf.add(self.res4d_branch2c, self.res4c, name='res4d_add')
self.res4d = tf.nn.relu(self.res4d, name='res4d')
# Residual block 4e
self.res4e_branch2a = tc.layers.conv2d(self.res4d, kernel_size=1, num_outputs=256, scope='res4e_branch2a')
self.res4e_branch2b = tc.layers.conv2d(self.res4e_branch2a, kernel_size=3, num_outputs=256, scope='res4e_branch2b')
self.res4e_branch2c = tc.layers.conv2d(self.res4e_branch2b, kernel_size=1, num_outputs=1024, activation_fn=None, scope='res4e_branch2c')
self.res4e = tf.add(self.res4e_branch2c, self.res4d, name='res4e_add')
self.res4e = tf.nn.relu(self.res4e, name='res4e')
# Residual block 4f
self.res4f_branch2a = tc.layers.conv2d(self.res4e, kernel_size=1, num_outputs=256, scope='res4f_branch2a')
self.res4f_branch2b = tc.layers.conv2d(self.res4f_branch2a, kernel_size=3, num_outputs=256, scope='res4f_branch2b')
self.res4f_branch2c = tc.layers.conv2d(self.res4f_branch2b, kernel_size=1, num_outputs=1024, activation_fn=None, scope='res4f_branch2c')
self.res4f = tf.add(self.res4f_branch2c, self.res4e, name='res4f_add')
self.res4f = tf.nn.relu(self.res4f, name='res4f')
# Residual block 5a
self.res5a_branch2a_new = tc.layers.conv2d(self.res4f, kernel_size=1, num_outputs=512, scope='res5a_branch2a_new')
self.res5a_branch2b_new = tc.layers.conv2d(self.res5a_branch2a_new, kernel_size=3, num_outputs=512, scope='res5a_branch2b_new')
self.res5a_branch2c_new = tc.layers.conv2d(self.res5a_branch2b_new, kernel_size=1, num_outputs=1024, activation_fn=None, scope='res5a_branch2c_new')
self.res5a_branch1_new = tc.layers.conv2d(self.res4f, kernel_size=1, num_outputs=1024, activation_fn=None, scope='res5a_branch1_new')
self.res5a = tf.add(self.res5a_branch2c_new, self.res5a_branch1_new, name='res5a_add')
self.res5a = tf.nn.relu(self.res5a, name='res5a')
# Residual block 5b
self.res5b_branch2a_new = tc.layers.conv2d(self.res5a, kernel_size=1, num_outputs=256, scope='res5b_branch2a_new')
self.res5b_branch2b_new = tc.layers.conv2d(self.res5b_branch2a_new, kernel_size=3, num_outputs=128, scope='res5b_branch2b_new')
self.res5b_branch2c_new = tc.layers.conv2d(self.res5b_branch2b_new, kernel_size=1, num_outputs=256, scope='res5b_branch2c_new')
# Transpose Conv
self.res5c_branch1a = tf.layers.conv2d_transpose(self.res5b_branch2c_new, kernel_size=4, filters=63, activation=None, strides=2, padding='same', use_bias=False, name='res5c_branch1a')
self.res5c_branch2a = tf.layers.conv2d_transpose(self.res5b_branch2c_new, kernel_size=4, filters=128, activation=None, strides=2, padding='same', use_bias=False, name='res5c_branch2a')
self.bn5c_branch2a = tc.layers.batch_norm(self.res5c_branch2a, scale=True, is_training=self.is_training, scope='bn5c_branch2a')
self.bn5c_branch2a = tf.nn.relu(self.bn5c_branch2a)
self.res5c_delta_x, self.res5c_delta_y, self.res5c_delta_z = tf.split(self.res5c_branch1a, num_or_size_splits=3, axis=3)
self.res5c_branch1a_sqr = tf.multiply(self.res5c_branch1a, self.res5c_branch1a, name='res5c_branch1a_sqr')
self.res5c_delta_x_sqr, self.res5c_delta_y_sqr, self.res5c_delta_z_sqr = tf.split(self.res5c_branch1a_sqr, num_or_size_splits=3, axis=3)
self.res5c_bone_length_sqr = tf.add(tf.add(self.res5c_delta_x_sqr, self.res5c_delta_y_sqr), self.res5c_delta_z_sqr)
self.res5c_bone_length = tf.sqrt(self.res5c_bone_length_sqr)
self.res5c_branch2a_feat = tf.concat([self.bn5c_branch2a, self.res5c_delta_x, self.res5c_delta_y, self.res5c_delta_z, self.res5c_bone_length],
axis=3, name='res5c_branch2a_feat')
self.res5c_branch2b = tc.layers.conv2d(self.res5c_branch2a_feat, kernel_size=3, num_outputs=128, scope='res5c_branch2b')
self.res5c_branch2c = tf.layers.conv2d(self.res5c_branch2b, kernel_size=1, filters=84, activation=None, use_bias=False, name='res5c_branch2c')
self.heapmap, self.x_heatmap, self.y_heatmap, self.z_heatmap = tf.split(self.res5c_branch2c, num_or_size_splits=4, axis=3)
@property
def all_vars(self):
return tf.global_variables()
def load_weights(self, sess, weight_file):
# Read pretrained model file
model_weights = pickle.load(open(weight_file, 'rb'))
# For each layer each var
with tf.variable_scope('', reuse=True):
for variable in tf.global_variables():
var_name = variable.name.split(':')[0]
self._assign_weights_from_dict(var_name, model_weights, sess)
def _assign_weights_from_dict(self, var_name, model_weights, sess):
with tf.variable_scope('', reuse=True):
var_tf = tf.get_variable(var_name)
# print(var_tf)
sess.run(tf.assign(var_tf, model_weights[var_name]))
np.testing.assert_allclose(var_tf.eval(sess), model_weights[var_name])
if __name__ == '__main__':
model_file = 'vnect.pkl'
model = VNect(368)
with tf.Session() as sess:
saver = tf.train.Saver()
#tf_writer = tf.summary.FileWriter(logdir='./', graph=sess.graph)
#sess.run(tf.global_variables_initializer())
#print(model.res5b_branch2c_new)
#print(model.heapmap, model.x_heatmap, model.y_heatmap, model.z_heatmap)
model.load_weights(sess, model_file)
save_path = saver.save(sess, "./vnect_tf")
print("saved model")
| 66.380952 | 192 | 0.72007 |
74af62e60399a4cb5b23de30d12203ab1ab673e5 | 17,152 | py | Python | nova/tests/api/openstack/compute/contrib/test_createserverext.py | dreamhost/nova | 066a3d4c410056689b5843d9520f43b2b6e7d127 | [
"Apache-2.0"
] | null | null | null | nova/tests/api/openstack/compute/contrib/test_createserverext.py | dreamhost/nova | 066a3d4c410056689b5843d9520f43b2b6e7d127 | [
"Apache-2.0"
] | null | null | null | nova/tests/api/openstack/compute/contrib/test_createserverext.py | dreamhost/nova | 066a3d4c410056689b5843d9520f43b2b6e7d127 | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=5 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
from xml.dom import minidom
import webob
from nova.compute import api as compute_api
from nova import db
from nova import exception
from nova import flags
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
FLAGS = flags.FLAGS
FAKE_UUID = fakes.FAKE_UUID
FAKE_NETWORKS = [('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12'),
('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', '10.0.2.12')]
DUPLICATE_NETWORKS = [('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12')]
INVALID_NETWORKS = [('invalid', 'invalid-ip-address')]
def return_security_group_non_existing(context, project_id, group_name):
raise exception.SecurityGroupNotFoundForProject(project_id=project_id,
security_group_id=group_name)
def return_security_group_get_by_name(context, project_id, group_name):
return {'id': 1, 'name': group_name}
def return_security_group_get(context, security_group_id, session):
return {'id': security_group_id}
def return_instance_add_security_group(context, instance_id,
security_group_id):
pass
class CreateserverextTest(test.TestCase):
def setUp(self):
super(CreateserverextTest, self).setUp()
self.security_group = None
self.injected_files = None
self.networks = None
self.user_data = None
def create(*args, **kwargs):
if 'security_group' in kwargs:
self.security_group = kwargs['security_group']
else:
self.security_group = None
if 'injected_files' in kwargs:
self.injected_files = kwargs['injected_files']
else:
self.injected_files = None
if 'requested_networks' in kwargs:
self.networks = kwargs['requested_networks']
else:
self.networks = None
if 'user_data' in kwargs:
self.user_data = kwargs['user_data']
resv_id = None
return ([{'id': '1234', 'display_name': 'fakeinstance',
'uuid': FAKE_UUID,
'user_id': 'fake',
'project_id': 'fake',
'created_at': "",
'updated_at': "",
'fixed_ips': [],
'progress': 0}], resv_id)
self.stubs.Set(compute_api.API, 'create', create)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Createserverext', 'User_data',
'Security_groups', 'Networks'])
def _make_stub_method(self, canned_return):
def stub_method(*args, **kwargs):
return canned_return
return stub_method
def _create_security_group_request_dict(self, security_groups):
server = {}
server['name'] = 'new-server-test'
server['imageRef'] = 'cedef40a-ed67-4d10-800e-17455edce175'
server['flavorRef'] = 1
if security_groups is not None:
sg_list = []
for name in security_groups:
sg_list.append({'name': name})
server['security_groups'] = sg_list
return {'server': server}
def _create_networks_request_dict(self, networks):
server = {}
server['name'] = 'new-server-test'
server['imageRef'] = 'cedef40a-ed67-4d10-800e-17455edce175'
server['flavorRef'] = 1
if networks is not None:
network_list = []
for uuid, fixed_ip in networks:
network_list.append({'uuid': uuid, 'fixed_ip': fixed_ip})
server['networks'] = network_list
return {'server': server}
def _create_user_data_request_dict(self, user_data):
server = {}
server['name'] = 'new-server-test'
server['imageRef'] = 'cedef40a-ed67-4d10-800e-17455edce175'
server['flavorRef'] = 1
server['user_data'] = user_data
return {'server': server}
def _get_create_request_json(self, body_dict):
req = webob.Request.blank('/v2/fake/os-create-server-ext')
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = jsonutils.dumps(body_dict)
return req
def _format_xml_request_body(self, body_dict):
server = body_dict['server']
body_parts = []
body_parts.extend([
'<?xml version="1.0" encoding="UTF-8"?>',
'<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.1"',
' name="%s" imageRef="%s" flavorRef="%s">' % (
server['name'], server['imageRef'], server['flavorRef'])])
if 'metadata' in server:
metadata = server['metadata']
body_parts.append('<metadata>')
for item in metadata.iteritems():
body_parts.append('<meta key="%s">%s</meta>' % item)
body_parts.append('</metadata>')
if 'personality' in server:
personalities = server['personality']
body_parts.append('<personality>')
for file in personalities:
item = (file['path'], file['contents'])
body_parts.append('<file path="%s">%s</file>' % item)
body_parts.append('</personality>')
if 'networks' in server:
networks = server['networks']
body_parts.append('<networks>')
for network in networks:
item = (network['uuid'], network['fixed_ip'])
body_parts.append('<network uuid="%s" fixed_ip="%s"></network>'
% item)
body_parts.append('</networks>')
body_parts.append('</server>')
return ''.join(body_parts)
def _get_create_request_xml(self, body_dict):
req = webob.Request.blank('/v2/fake/os-create-server-ext')
req.content_type = 'application/xml'
req.accept = 'application/xml'
req.method = 'POST'
req.body = self._format_xml_request_body(body_dict)
return req
def _create_instance_with_networks_json(self, networks):
body_dict = self._create_networks_request_dict(networks)
request = self._get_create_request_json(body_dict)
response = request.get_response(fakes.wsgi_app(
init_only=('servers', 'os-create-server-ext')))
return request, response, self.networks
def _create_instance_with_user_data_json(self, networks):
body_dict = self._create_user_data_request_dict(networks)
request = self._get_create_request_json(body_dict)
response = request.get_response(fakes.wsgi_app(
init_only=('servers', 'os-create-server-ext')))
return request, response, self.user_data
def _create_instance_with_networks_xml(self, networks):
body_dict = self._create_networks_request_dict(networks)
request = self._get_create_request_xml(body_dict)
response = request.get_response(fakes.wsgi_app(
init_only=('servers', 'os-create-server-ext')))
return request, response, self.networks
def test_create_instance_with_no_networks(self):
_create_inst = self._create_instance_with_networks_json
request, response, networks = _create_inst(networks=None)
self.assertEquals(response.status_int, 202)
self.assertEquals(networks, None)
def test_create_instance_with_no_networks_xml(self):
_create_inst = self._create_instance_with_networks_xml
request, response, networks = _create_inst(networks=None)
self.assertEquals(response.status_int, 202)
self.assertEquals(networks, None)
def test_create_instance_with_one_network(self):
_create_inst = self._create_instance_with_networks_json
request, response, networks = _create_inst([FAKE_NETWORKS[0]])
self.assertEquals(response.status_int, 202)
self.assertEquals(networks, [FAKE_NETWORKS[0]])
def test_create_instance_with_one_network_xml(self):
_create_inst = self._create_instance_with_networks_xml
request, response, networks = _create_inst([FAKE_NETWORKS[0]])
self.assertEquals(response.status_int, 202)
self.assertEquals(networks, [FAKE_NETWORKS[0]])
def test_create_instance_with_two_networks(self):
_create_inst = self._create_instance_with_networks_json
request, response, networks = _create_inst(FAKE_NETWORKS)
self.assertEquals(response.status_int, 202)
self.assertEquals(networks, FAKE_NETWORKS)
def test_create_instance_with_two_networks_xml(self):
_create_inst = self._create_instance_with_networks_xml
request, response, networks = _create_inst(FAKE_NETWORKS)
self.assertEquals(response.status_int, 202)
self.assertEquals(networks, FAKE_NETWORKS)
def test_create_instance_with_duplicate_networks(self):
_create_inst = self._create_instance_with_networks_json
request, response, networks = _create_inst(DUPLICATE_NETWORKS)
self.assertEquals(response.status_int, 400)
self.assertEquals(networks, None)
def test_create_instance_with_duplicate_networks_xml(self):
_create_inst = self._create_instance_with_networks_xml
request, response, networks = _create_inst(DUPLICATE_NETWORKS)
self.assertEquals(response.status_int, 400)
self.assertEquals(networks, None)
def test_create_instance_with_network_no_id(self):
body_dict = self._create_networks_request_dict([FAKE_NETWORKS[0]])
del body_dict['server']['networks'][0]['uuid']
request = self._get_create_request_json(body_dict)
response = request.get_response(fakes.wsgi_app(
init_only=('servers', 'os-create-server-ext')))
self.assertEquals(response.status_int, 400)
self.assertEquals(self.networks, None)
def test_create_instance_with_network_no_id_xml(self):
body_dict = self._create_networks_request_dict([FAKE_NETWORKS[0]])
request = self._get_create_request_xml(body_dict)
uuid = ' uuid="aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"'
request.body = request.body.replace(uuid, '')
response = request.get_response(fakes.wsgi_app(
init_only=('servers', 'os-create-server-ext')))
self.assertEquals(response.status_int, 400)
self.assertEquals(self.networks, None)
def test_create_instance_with_network_invalid_id(self):
_create_inst = self._create_instance_with_networks_json
request, response, networks = _create_inst(INVALID_NETWORKS)
self.assertEquals(response.status_int, 400)
self.assertEquals(networks, None)
def test_create_instance_with_network_invalid_id_xml(self):
_create_inst = self._create_instance_with_networks_xml
request, response, networks = _create_inst(INVALID_NETWORKS)
self.assertEquals(response.status_int, 400)
self.assertEquals(networks, None)
def test_create_instance_with_network_empty_fixed_ip(self):
networks = [('1', '')]
_create_inst = self._create_instance_with_networks_json
request, response, networks = _create_inst(networks)
self.assertEquals(response.status_int, 400)
self.assertEquals(networks, None)
def test_create_instance_with_network_non_string_fixed_ip(self):
networks = [('1', 12345)]
_create_inst = self._create_instance_with_networks_json
request, response, networks = _create_inst(networks)
self.assertEquals(response.status_int, 400)
self.assertEquals(networks, None)
def test_create_instance_with_network_empty_fixed_ip_xml(self):
networks = [('1', '')]
_create_inst = self._create_instance_with_networks_xml
request, response, networks = _create_inst(networks)
self.assertEquals(response.status_int, 400)
self.assertEquals(networks, None)
def test_create_instance_with_network_no_fixed_ip(self):
body_dict = self._create_networks_request_dict([FAKE_NETWORKS[0]])
del body_dict['server']['networks'][0]['fixed_ip']
request = self._get_create_request_json(body_dict)
response = request.get_response(fakes.wsgi_app(
init_only=('servers', 'os-create-server-ext')))
self.assertEquals(response.status_int, 202)
self.assertEquals(self.networks,
[('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)])
def test_create_instance_with_network_no_fixed_ip_xml(self):
body_dict = self._create_networks_request_dict([FAKE_NETWORKS[0]])
request = self._get_create_request_xml(body_dict)
request.body = request.body.replace(' fixed_ip="10.0.1.12"', '')
response = request.get_response(fakes.wsgi_app(
init_only=('servers', 'os-create-server-ext')))
self.assertEquals(response.status_int, 202)
self.assertEquals(self.networks,
[('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)])
def test_create_instance_with_userdata(self):
user_data_contents = '#!/bin/bash\necho "Oh no!"\n'
user_data_contents = base64.b64encode(user_data_contents)
_create_inst = self._create_instance_with_user_data_json
request, response, user_data = _create_inst(user_data_contents)
self.assertEquals(response.status_int, 202)
self.assertEquals(user_data, user_data_contents)
def test_create_instance_with_userdata_none(self):
user_data_contents = None
_create_inst = self._create_instance_with_user_data_json
request, response, user_data = _create_inst(user_data_contents)
self.assertEquals(response.status_int, 202)
self.assertEquals(user_data, user_data_contents)
def test_create_instance_with_userdata_with_non_b64_content(self):
user_data_contents = '#!/bin/bash\necho "Oh no!"\n'
_create_inst = self._create_instance_with_user_data_json
request, response, user_data = _create_inst(user_data_contents)
self.assertEquals(response.status_int, 400)
self.assertEquals(user_data, None)
def test_create_instance_with_security_group_json(self):
security_groups = ['test', 'test1']
self.stubs.Set(db, 'security_group_get_by_name',
return_security_group_get_by_name)
self.stubs.Set(db, 'instance_add_security_group',
return_instance_add_security_group)
body_dict = self._create_security_group_request_dict(security_groups)
request = self._get_create_request_json(body_dict)
response = request.get_response(fakes.wsgi_app(
init_only=('servers', 'os-create-server-ext')))
self.assertEquals(response.status_int, 202)
self.assertEquals(self.security_group, security_groups)
def test_get_server_by_id_verify_security_groups_json(self):
self.stubs.Set(db, 'instance_get', fakes.fake_instance_get())
req = webob.Request.blank('/v2/fake/os-create-server-ext/1')
req.headers['Content-Type'] = 'application/json'
response = req.get_response(fakes.wsgi_app(
init_only=('os-create-server-ext', 'servers')))
self.assertEquals(response.status_int, 200)
res_dict = jsonutils.loads(response.body)
expected_security_group = [{"name": "test"}]
self.assertEquals(res_dict['server'].get('security_groups'),
expected_security_group)
def test_get_server_by_id_verify_security_groups_xml(self):
self.stubs.Set(db, 'instance_get', fakes.fake_instance_get())
req = webob.Request.blank('/v2/fake/os-create-server-ext/1')
req.headers['Accept'] = 'application/xml'
response = req.get_response(fakes.wsgi_app(
init_only=('os-create-server-ext', 'servers')))
self.assertEquals(response.status_int, 200)
dom = minidom.parseString(response.body)
server = dom.childNodes[0]
sec_groups = server.getElementsByTagName('security_groups')[0]
sec_group = sec_groups.getElementsByTagName('security_group')[0]
self.assertEqual('test', sec_group.getAttribute("name"))
| 43.755102 | 79 | 0.666744 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.