hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c2b503dcae407a91374699d400e6ce2f325764f | 1,940 | py | Python | python/test/function/test_log_softmax.py | sdonatti/nnabla | ac4a42e62dd358f16bd79c08a9a9f3d83c0100c9 | [
"Apache-2.0"
] | 1 | 2020-08-03T12:49:19.000Z | 2020-08-03T12:49:19.000Z | python/test/function/test_log_softmax.py | langbin2014/nnabla | e94bac5bed65337010e2ac07a5937fb862ab2dd8 | [
"Apache-2.0"
] | 1 | 2020-11-09T07:33:29.000Z | 2020-11-09T07:33:29.000Z | python/test/function/test_log_softmax.py | langbin2014/nnabla | e94bac5bed65337010e2ac07a5937fb862ab2dd8 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla.functions as F
from nbla_test_utils import list_context
def ref_log_softmax(x, axis):
x = x - x.max(axis, keepdims=True)
x = x - np.log(np.exp(x).sum(axis, keepdims=True))
return x
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("axis", [0, 1, 2])
@pytest.mark.parametrize("ctx, func_name", list_context('LogSoftmax'))
def test_log_softmax_forward_backward(seed, axis, ctx, func_name):
from nbla_test_utils import function_tester
rng = np.random.RandomState(seed)
inputs = [rng.randn(2, 3, 4).astype(np.float32)]
function_tester(rng, F.log_softmax, ref_log_softmax, inputs, func_args=[axis],
ctx=ctx, func_name=func_name, atol_b=1e-2)
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("axis", [0, 1, 2])
@pytest.mark.parametrize("ctx, func_name", list_context('LogSoftmax'))
def test_log_softmax_double_backward(seed, axis, ctx, func_name):
from nbla_test_utils import backward_function_tester
rng = np.random.RandomState(seed)
inputs = [rng.randn(2, 3, 4).astype(np.float32)]
backward_function_tester(rng, F.log_softmax, None, inputs, func_args=[axis],
ctx=ctx, func_name=func_name,
atol_b=1e-1, atol_accum=1e-1, dstep=1e-3)
| 40.416667 | 82 | 0.713918 |
import pytest
import numpy as np
import nnabla.functions as F
from nbla_test_utils import list_context
def ref_log_softmax(x, axis):
x = x - x.max(axis, keepdims=True)
x = x - np.log(np.exp(x).sum(axis, keepdims=True))
return x
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("axis", [0, 1, 2])
@pytest.mark.parametrize("ctx, func_name", list_context('LogSoftmax'))
def test_log_softmax_forward_backward(seed, axis, ctx, func_name):
from nbla_test_utils import function_tester
rng = np.random.RandomState(seed)
inputs = [rng.randn(2, 3, 4).astype(np.float32)]
function_tester(rng, F.log_softmax, ref_log_softmax, inputs, func_args=[axis],
ctx=ctx, func_name=func_name, atol_b=1e-2)
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("axis", [0, 1, 2])
@pytest.mark.parametrize("ctx, func_name", list_context('LogSoftmax'))
def test_log_softmax_double_backward(seed, axis, ctx, func_name):
from nbla_test_utils import backward_function_tester
rng = np.random.RandomState(seed)
inputs = [rng.randn(2, 3, 4).astype(np.float32)]
backward_function_tester(rng, F.log_softmax, None, inputs, func_args=[axis],
ctx=ctx, func_name=func_name,
atol_b=1e-1, atol_accum=1e-1, dstep=1e-3)
| true | true |
1c2b509e3a950854ffb86dc4c6e88bf0e9f99e61 | 3,170 | py | Python | kubernetes/client/models/v1beta2_scale_spec.py | jashandeep-sohi/kubernetes-python | e057f273069de445a2d5a250ac5fe37d79671f3b | [
"Apache-2.0"
] | 1 | 2020-05-08T12:41:04.000Z | 2020-05-08T12:41:04.000Z | kubernetes/client/models/v1beta2_scale_spec.py | jashandeep-sohi/kubernetes-python | e057f273069de445a2d5a250ac5fe37d79671f3b | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1beta2_scale_spec.py | jashandeep-sohi/kubernetes-python | e057f273069de445a2d5a250ac5fe37d79671f3b | [
"Apache-2.0"
] | 2 | 2021-07-09T08:49:05.000Z | 2021-08-03T18:08:36.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.10.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta2ScaleSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'replicas': 'int'
}
attribute_map = {
'replicas': 'replicas'
}
def __init__(self, replicas=None):
"""
V1beta2ScaleSpec - a model defined in Swagger
"""
self._replicas = None
self.discriminator = None
if replicas is not None:
self.replicas = replicas
@property
def replicas(self):
"""
Gets the replicas of this V1beta2ScaleSpec.
desired number of instances for the scaled object.
:return: The replicas of this V1beta2ScaleSpec.
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""
Sets the replicas of this V1beta2ScaleSpec.
desired number of instances for the scaled object.
:param replicas: The replicas of this V1beta2ScaleSpec.
:type: int
"""
self._replicas = replicas
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta2ScaleSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 24.96063 | 105 | 0.547319 |
from pprint import pformat
from six import iteritems
import re
class V1beta2ScaleSpec(object):
swagger_types = {
'replicas': 'int'
}
attribute_map = {
'replicas': 'replicas'
}
def __init__(self, replicas=None):
self._replicas = None
self.discriminator = None
if replicas is not None:
self.replicas = replicas
@property
def replicas(self):
return self._replicas
@replicas.setter
def replicas(self, replicas):
self._replicas = replicas
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1beta2ScaleSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c2b50e3041167a0f64de7ee49838ee9ea968d07 | 1,486 | py | Python | src/item_nesting/nested_item.py | uoshvis/scrapy-examples | ba1b274543436e3856a852c62111090fdd322c60 | [
"MIT"
] | null | null | null | src/item_nesting/nested_item.py | uoshvis/scrapy-examples | ba1b274543436e3856a852c62111090fdd322c60 | [
"MIT"
] | null | null | null | src/item_nesting/nested_item.py | uoshvis/scrapy-examples | ba1b274543436e3856a852c62111090fdd322c60 | [
"MIT"
] | null | null | null | import scrapy
from scrapy.crawler import CrawlerProcess
from scrapy.item import Item, Field
class FamilyItem(Item):
name = Field()
sons = Field()
class SonsItem(Item):
name = Field()
grandsons = Field()
class GrandsonsItem(Item):
name = Field()
age = Field()
weight = Field()
class MySpider(scrapy.Spider):
name = 'scraper_name'
allowed_domains = ['quotes.toscrape.com']
start_urls = ['http://quotes.toscrape.com/']
def parse(self, response):
gs1 = GrandsonsItem()
gs1['name'] = 'GS1'
gs1['age'] = 18
gs1['weight'] = 50
gs2 = GrandsonsItem()
gs2['name'] = 'GS2'
gs2['age'] = 19
gs2['weight'] = 51
s1 = SonsItem()
s1['name'] = 'S1'
s1['grandsons'] = [dict(gs1), dict(gs2)]
jenny = FamilyItem()
jenny['name'] = 'Jenny'
jenny['sons'] = [dict(s1)]
yield jenny
# Output example
# {'name': 'Jenny',
# 'sons': [{'grandsons': [{'age': 18, 'name': 'GS1', 'weight': 50},
# {'age': 19, 'name': 'GS2', 'weight': 51}],
# 'name': 'S1'}]}
if __name__ == '__main__':
# run scraper
process = CrawlerProcess(settings={
'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36',
'CONCURRENT_REQUESTS': 1
})
process.crawl(MySpider)
process.start()
| 23.21875 | 130 | 0.535666 | import scrapy
from scrapy.crawler import CrawlerProcess
from scrapy.item import Item, Field
class FamilyItem(Item):
name = Field()
sons = Field()
class SonsItem(Item):
name = Field()
grandsons = Field()
class GrandsonsItem(Item):
name = Field()
age = Field()
weight = Field()
class MySpider(scrapy.Spider):
name = 'scraper_name'
allowed_domains = ['quotes.toscrape.com']
start_urls = ['http://quotes.toscrape.com/']
def parse(self, response):
gs1 = GrandsonsItem()
gs1['name'] = 'GS1'
gs1['age'] = 18
gs1['weight'] = 50
gs2 = GrandsonsItem()
gs2['name'] = 'GS2'
gs2['age'] = 19
gs2['weight'] = 51
s1 = SonsItem()
s1['name'] = 'S1'
s1['grandsons'] = [dict(gs1), dict(gs2)]
jenny = FamilyItem()
jenny['name'] = 'Jenny'
jenny['sons'] = [dict(s1)]
yield jenny
if __name__ == '__main__':
process = CrawlerProcess(settings={
'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36',
'CONCURRENT_REQUESTS': 1
})
process.crawl(MySpider)
process.start()
| true | true |
1c2b52b2edc8446836b1d3e0ad5969f8ab03d62b | 3,922 | py | Python | bin/discovery.py | rog-works/lf3py | e89937f7aa133ed54d85764f06101ab9abf6b960 | [
"CNRI-Python"
] | null | null | null | bin/discovery.py | rog-works/lf3py | e89937f7aa133ed54d85764f06101ab9abf6b960 | [
"CNRI-Python"
] | 48 | 2020-12-19T13:47:26.000Z | 2021-01-07T22:27:56.000Z | bin/discovery.py | rog-works/lf3py | e89937f7aa133ed54d85764f06101ab9abf6b960 | [
"CNRI-Python"
] | null | null | null | from types import ModuleType
from typing import Any, Dict, List, Optional, Tuple
from typing_extensions import Protocol
from lf3py.app.app import App
from lf3py.lang.dict import deep_merge
from lf3py.lang.module import import_module
from lf3py.lang.sequence import first, flatten, last
from lf3py.middleware import Middleware
from lf3py.routing.symbols import IRouter
from lf3py.task.types import Runner
class Generator(Protocol):
def generate(self, bps: List[App]) -> Any:
raise NotImplementedError()
class Discovery:
def __init__(self, filepaths: List[str]) -> None:
self._bps = self.__discover(list(filepaths))
def __discover(self, filepaths: List[str]) -> List[App]:
paths = [self.__to_module_path(filepath) for filepath in filepaths]
searched = [self.__dirty_resolve_bp(path) for path in paths]
return [result for result in searched if result]
def __to_module_path(self, filepath: str) -> str:
return '.'.join('.'.join(filepath.split('.')[:-1]).split('/'))
def __dirty_resolve_bp(self, path: str) -> Optional[App]:
modules = import_module(path)
for module in modules.__dict__.values():
if hasattr(module, 'locate') and callable(module.locate) and hasattr(module.locate, '__self__'):
return module
return None
def generate(self, generator: 'Generator') -> Any:
return generator.generate(self._bps)
class RoutesGenerator:
def generate(self, bps: List[App]) -> dict:
return dict(flatten([self.__dirty_get_routes_to_tuple(bp) for bp in bps]))
def __dirty_get_routes_to_tuple(self, bp: App) -> List[Tuple[str, str]]:
routes = bp.locate(IRouter)._routes # FIXME dirty get routes
return [(dsn_spec, module_path) for dsn_spec, module_path in routes.items()]
class OpenApiGenerator:
def generate(self, bps: List[App]) -> dict:
schema = {}
for bp in bps:
schema = {**schema, **self.__gen_schema_from_bp(bp)}
return schema
def __gen_schema_from_bp(self, bp: App) -> dict:
middleware = bp.locate(Middleware)
routes = bp.locate(IRouter)._routes # FIXME dirty get routes
path = '.'.join(first(routes.values()).split('.')[:-1])
modules = import_module(path)
schema = {'paths': {}}
for spec, runner in self.__extract_runners(routes, modules).items():
schema['paths'] = deep_merge(schema['paths'], self.__gen_api_schema(spec, runner, middleware))
return schema
def __extract_runners(self, routes: dict, modules: ModuleType) -> Dict[str, Runner]:
extracted = {}
for spec, module_path in routes.items():
module_name = last(module_path.split('.'))
extracted[spec] = modules.__dict__[module_name]
return extracted
def __gen_api_schema(self, spec: str, runner: Runner, middleware: Middleware) -> dict:
api_schema = self.__gen_api_schema_from_middleware(middleware, runner)
return self.__gen_api_schema_from_runner(spec, runner, api_schema)
def __gen_api_schema_from_middleware(self, middleware: Middleware, runner: Runner) -> dict:
attaches, caches = middleware._attaches.get(runner, []), middleware._catches.get(runner, [])
elems = flatten([attaches, caches])
schema = {}
for elem in elems:
if hasattr(elem, '__openapi__'):
schema = deep_merge(schema, getattr(elem, '__openapi__'))
return schema
def __gen_api_schema_from_runner(self, spec: str, runner: Runner, api_schema: dict) -> dict:
method, path = spec.split(' ')
base_api_schema = {
'responses': {
200: {'description': '200 OK'},
}
}
return {
path: {
method.lower(): deep_merge(api_schema, base_api_schema)
}
}
| 37.352381 | 108 | 0.649414 | from types import ModuleType
from typing import Any, Dict, List, Optional, Tuple
from typing_extensions import Protocol
from lf3py.app.app import App
from lf3py.lang.dict import deep_merge
from lf3py.lang.module import import_module
from lf3py.lang.sequence import first, flatten, last
from lf3py.middleware import Middleware
from lf3py.routing.symbols import IRouter
from lf3py.task.types import Runner
class Generator(Protocol):
def generate(self, bps: List[App]) -> Any:
raise NotImplementedError()
class Discovery:
def __init__(self, filepaths: List[str]) -> None:
self._bps = self.__discover(list(filepaths))
def __discover(self, filepaths: List[str]) -> List[App]:
paths = [self.__to_module_path(filepath) for filepath in filepaths]
searched = [self.__dirty_resolve_bp(path) for path in paths]
return [result for result in searched if result]
def __to_module_path(self, filepath: str) -> str:
return '.'.join('.'.join(filepath.split('.')[:-1]).split('/'))
def __dirty_resolve_bp(self, path: str) -> Optional[App]:
modules = import_module(path)
for module in modules.__dict__.values():
if hasattr(module, 'locate') and callable(module.locate) and hasattr(module.locate, '__self__'):
return module
return None
def generate(self, generator: 'Generator') -> Any:
return generator.generate(self._bps)
class RoutesGenerator:
def generate(self, bps: List[App]) -> dict:
return dict(flatten([self.__dirty_get_routes_to_tuple(bp) for bp in bps]))
def __dirty_get_routes_to_tuple(self, bp: App) -> List[Tuple[str, str]]:
routes = bp.locate(IRouter)._routes
return [(dsn_spec, module_path) for dsn_spec, module_path in routes.items()]
class OpenApiGenerator:
def generate(self, bps: List[App]) -> dict:
schema = {}
for bp in bps:
schema = {**schema, **self.__gen_schema_from_bp(bp)}
return schema
def __gen_schema_from_bp(self, bp: App) -> dict:
middleware = bp.locate(Middleware)
routes = bp.locate(IRouter)._routes
path = '.'.join(first(routes.values()).split('.')[:-1])
modules = import_module(path)
schema = {'paths': {}}
for spec, runner in self.__extract_runners(routes, modules).items():
schema['paths'] = deep_merge(schema['paths'], self.__gen_api_schema(spec, runner, middleware))
return schema
def __extract_runners(self, routes: dict, modules: ModuleType) -> Dict[str, Runner]:
extracted = {}
for spec, module_path in routes.items():
module_name = last(module_path.split('.'))
extracted[spec] = modules.__dict__[module_name]
return extracted
def __gen_api_schema(self, spec: str, runner: Runner, middleware: Middleware) -> dict:
api_schema = self.__gen_api_schema_from_middleware(middleware, runner)
return self.__gen_api_schema_from_runner(spec, runner, api_schema)
def __gen_api_schema_from_middleware(self, middleware: Middleware, runner: Runner) -> dict:
attaches, caches = middleware._attaches.get(runner, []), middleware._catches.get(runner, [])
elems = flatten([attaches, caches])
schema = {}
for elem in elems:
if hasattr(elem, '__openapi__'):
schema = deep_merge(schema, getattr(elem, '__openapi__'))
return schema
def __gen_api_schema_from_runner(self, spec: str, runner: Runner, api_schema: dict) -> dict:
method, path = spec.split(' ')
base_api_schema = {
'responses': {
200: {'description': '200 OK'},
}
}
return {
path: {
method.lower(): deep_merge(api_schema, base_api_schema)
}
}
| true | true |
1c2b540b4943418dd69ae5877da213effcf117cb | 6,901 | py | Python | examples/vae.py | ethanabrooks/dm-haiku | 0c030422f0e3a331b6df5aa8f2fe92576444bd3b | [
"Apache-2.0"
] | null | null | null | examples/vae.py | ethanabrooks/dm-haiku | 0c030422f0e3a331b6df5aa8f2fe92576444bd3b | [
"Apache-2.0"
] | null | null | null | examples/vae.py | ethanabrooks/dm-haiku | 0c030422f0e3a331b6df5aa8f2fe92576444bd3b | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variational Autoencoder example on binarized MNIST dataset."""
from typing import Generator, Mapping, Tuple, NamedTuple, Sequence
from absl import app
from absl import flags
from absl import logging
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
import tensorflow_datasets as tfds
flags.DEFINE_integer("batch_size", 128, "Size of the batch to train on.")
flags.DEFINE_float("learning_rate", 0.001, "Learning rate for the optimizer.")
flags.DEFINE_integer("training_steps", 5000, "Number of training steps to run.")
flags.DEFINE_integer("eval_frequency", 100, "How often to evaluate the model.")
flags.DEFINE_integer("random_seed", 42, "Random seed.")
FLAGS = flags.FLAGS
PRNGKey = jnp.ndarray
Batch = Mapping[str, np.ndarray]
MNIST_IMAGE_SHAPE: Sequence[int] = (28, 28, 1)
def load_dataset(split: str, batch_size: int) -> Generator[Batch, None, None]:
ds = tfds.load("binarized_mnist", split=split, shuffle_files=True,
read_config=tfds.ReadConfig(shuffle_seed=FLAGS.random_seed))
ds = ds.shuffle(buffer_size=10 * batch_size, seed=FLAGS.random_seed)
ds = ds.batch(batch_size)
ds = ds.prefetch(buffer_size=5)
ds = ds.repeat()
return iter(tfds.as_numpy(ds))
class Encoder(hk.Module):
"""Encoder model."""
def __init__(self, hidden_size: int = 512, latent_size: int = 10):
super().__init__()
self._hidden_size = hidden_size
self._latent_size = latent_size
def __call__(self, x: jnp.ndarray) -> Tuple[jnp.ndarray, jnp.ndarray]:
x = hk.Flatten()(x)
x = hk.Linear(self._hidden_size)(x)
x = jax.nn.relu(x)
mean = hk.Linear(self._latent_size)(x)
log_stddev = hk.Linear(self._latent_size)(x)
stddev = jnp.exp(log_stddev)
return mean, stddev
class Decoder(hk.Module):
"""Decoder model."""
def __init__(
self,
hidden_size: int = 512,
output_shape: Sequence[int] = MNIST_IMAGE_SHAPE,
):
super().__init__()
self._hidden_size = hidden_size
self._output_shape = output_shape
def __call__(self, z: jnp.ndarray) -> jnp.ndarray:
z = hk.Linear(self._hidden_size)(z)
z = jax.nn.relu(z)
logits = hk.Linear(np.prod(self._output_shape))(z)
logits = jnp.reshape(logits, (-1, *self._output_shape))
return logits
class VAEOutput(NamedTuple):
image: jnp.ndarray
mean: jnp.ndarray
stddev: jnp.ndarray
logits: jnp.ndarray
class VariationalAutoEncoder(hk.Module):
"""Main VAE model class, uses Encoder & Decoder under the hood."""
def __init__(
self,
hidden_size: int = 512,
latent_size: int = 10,
output_shape: Sequence[int] = MNIST_IMAGE_SHAPE,
):
super().__init__()
self._hidden_size = hidden_size
self._latent_size = latent_size
self._output_shape = output_shape
def __call__(self, x: jnp.ndarray) -> VAEOutput:
x = x.astype(jnp.float32)
mean, stddev = Encoder(self._hidden_size, self._latent_size)(x)
z = mean + stddev * jax.random.normal(hk.next_rng_key(), mean.shape)
logits = Decoder(self._hidden_size, self._output_shape)(z)
p = jax.nn.sigmoid(logits)
image = jax.random.bernoulli(hk.next_rng_key(), p)
return VAEOutput(image, mean, stddev, logits)
def binary_cross_entropy(x: jnp.ndarray, logits: jnp.ndarray) -> jnp.ndarray:
"""Calculate binary (logistic) cross-entropy from distribution logits.
Args:
x: input variable tensor, must be of same shape as logits
logits: log odds of a Bernoulli distribution, i.e. log(p/(1-p))
Returns:
A scalar representing binary CE for the given Bernoulli distribution.
"""
if x.shape != logits.shape:
raise ValueError("inputs x and logits must be of the same shape")
x = jnp.reshape(x, (x.shape[0], -1))
logits = jnp.reshape(logits, (logits.shape[0], -1))
return -jnp.sum(x * logits - jnp.logaddexp(0.0, logits), axis=-1)
def kl_gaussian(mean: jnp.ndarray, var: jnp.ndarray) -> jnp.ndarray:
r"""Calculate KL divergence between given and standard gaussian distributions.
KL(p, q) = H(p, q) - H(p) = -\int p(x)log(q(x))dx - -\int p(x)log(p(x))dx
= 0.5 * [log(|s2|/|s1|) - 1 + tr(s1/s2) + (m1-m2)^2/s2]
= 0.5 * [-log(|s1|) - 1 + tr(s1) + m1^2] (if m2 = 0, s2 = 1)
Args:
mean: mean vector of the first distribution
var: diagonal vector of covariance matrix of the first distribution
Returns:
A scalar representing KL divergence of the two Gaussian distributions.
"""
return 0.5 * jnp.sum(-jnp.log(var) - 1.0 + var + jnp.square(mean), axis=-1)
def main(_):
FLAGS.alsologtostderr = True
model = hk.transform(lambda x: VariationalAutoEncoder()(x)) # pylint: disable=unnecessary-lambda
optimizer = optax.adam(FLAGS.learning_rate)
@jax.jit
def loss_fn(params: hk.Params, rng_key: PRNGKey, batch: Batch) -> jnp.ndarray:
"""ELBO loss: E_p[log(x)] - KL(d||q), where p ~ Be(0.5) and q ~ N(0,1)."""
outputs: VAEOutput = model.apply(params, rng_key, batch["image"])
log_likelihood = -binary_cross_entropy(batch["image"], outputs.logits)
kl = kl_gaussian(outputs.mean, jnp.square(outputs.stddev))
elbo = log_likelihood - kl
return -jnp.mean(elbo)
@jax.jit
def update(
params: hk.Params,
rng_key: PRNGKey,
opt_state: optax.OptState,
batch: Batch,
) -> Tuple[hk.Params, optax.OptState]:
"""Single SGD update step."""
grads = jax.grad(loss_fn)(params, rng_key, batch)
updates, new_opt_state = optimizer.update(grads, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, new_opt_state
rng_seq = hk.PRNGSequence(FLAGS.random_seed)
params = model.init(next(rng_seq), np.zeros((1, *MNIST_IMAGE_SHAPE)))
opt_state = optimizer.init(params)
train_ds = load_dataset(tfds.Split.TRAIN, FLAGS.batch_size)
valid_ds = load_dataset(tfds.Split.TEST, FLAGS.batch_size)
for step in range(FLAGS.training_steps):
params, opt_state = update(params, next(rng_seq), opt_state, next(train_ds))
if step % FLAGS.eval_frequency == 0:
val_loss = loss_fn(params, next(rng_seq), next(valid_ds))
logging.info("STEP: %5d; Validation ELBO: %.3f", step, -val_loss)
if __name__ == "__main__":
app.run(main)
| 32.399061 | 99 | 0.686712 |
from typing import Generator, Mapping, Tuple, NamedTuple, Sequence
from absl import app
from absl import flags
from absl import logging
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
import tensorflow_datasets as tfds
flags.DEFINE_integer("batch_size", 128, "Size of the batch to train on.")
flags.DEFINE_float("learning_rate", 0.001, "Learning rate for the optimizer.")
flags.DEFINE_integer("training_steps", 5000, "Number of training steps to run.")
flags.DEFINE_integer("eval_frequency", 100, "How often to evaluate the model.")
flags.DEFINE_integer("random_seed", 42, "Random seed.")
FLAGS = flags.FLAGS
PRNGKey = jnp.ndarray
Batch = Mapping[str, np.ndarray]
MNIST_IMAGE_SHAPE: Sequence[int] = (28, 28, 1)
def load_dataset(split: str, batch_size: int) -> Generator[Batch, None, None]:
ds = tfds.load("binarized_mnist", split=split, shuffle_files=True,
read_config=tfds.ReadConfig(shuffle_seed=FLAGS.random_seed))
ds = ds.shuffle(buffer_size=10 * batch_size, seed=FLAGS.random_seed)
ds = ds.batch(batch_size)
ds = ds.prefetch(buffer_size=5)
ds = ds.repeat()
return iter(tfds.as_numpy(ds))
class Encoder(hk.Module):
def __init__(self, hidden_size: int = 512, latent_size: int = 10):
super().__init__()
self._hidden_size = hidden_size
self._latent_size = latent_size
def __call__(self, x: jnp.ndarray) -> Tuple[jnp.ndarray, jnp.ndarray]:
x = hk.Flatten()(x)
x = hk.Linear(self._hidden_size)(x)
x = jax.nn.relu(x)
mean = hk.Linear(self._latent_size)(x)
log_stddev = hk.Linear(self._latent_size)(x)
stddev = jnp.exp(log_stddev)
return mean, stddev
class Decoder(hk.Module):
def __init__(
self,
hidden_size: int = 512,
output_shape: Sequence[int] = MNIST_IMAGE_SHAPE,
):
super().__init__()
self._hidden_size = hidden_size
self._output_shape = output_shape
def __call__(self, z: jnp.ndarray) -> jnp.ndarray:
z = hk.Linear(self._hidden_size)(z)
z = jax.nn.relu(z)
logits = hk.Linear(np.prod(self._output_shape))(z)
logits = jnp.reshape(logits, (-1, *self._output_shape))
return logits
class VAEOutput(NamedTuple):
image: jnp.ndarray
mean: jnp.ndarray
stddev: jnp.ndarray
logits: jnp.ndarray
class VariationalAutoEncoder(hk.Module):
def __init__(
self,
hidden_size: int = 512,
latent_size: int = 10,
output_shape: Sequence[int] = MNIST_IMAGE_SHAPE,
):
super().__init__()
self._hidden_size = hidden_size
self._latent_size = latent_size
self._output_shape = output_shape
def __call__(self, x: jnp.ndarray) -> VAEOutput:
x = x.astype(jnp.float32)
mean, stddev = Encoder(self._hidden_size, self._latent_size)(x)
z = mean + stddev * jax.random.normal(hk.next_rng_key(), mean.shape)
logits = Decoder(self._hidden_size, self._output_shape)(z)
p = jax.nn.sigmoid(logits)
image = jax.random.bernoulli(hk.next_rng_key(), p)
return VAEOutput(image, mean, stddev, logits)
def binary_cross_entropy(x: jnp.ndarray, logits: jnp.ndarray) -> jnp.ndarray:
if x.shape != logits.shape:
raise ValueError("inputs x and logits must be of the same shape")
x = jnp.reshape(x, (x.shape[0], -1))
logits = jnp.reshape(logits, (logits.shape[0], -1))
return -jnp.sum(x * logits - jnp.logaddexp(0.0, logits), axis=-1)
def kl_gaussian(mean: jnp.ndarray, var: jnp.ndarray) -> jnp.ndarray:
return 0.5 * jnp.sum(-jnp.log(var) - 1.0 + var + jnp.square(mean), axis=-1)
def main(_):
FLAGS.alsologtostderr = True
model = hk.transform(lambda x: VariationalAutoEncoder()(x))
optimizer = optax.adam(FLAGS.learning_rate)
@jax.jit
def loss_fn(params: hk.Params, rng_key: PRNGKey, batch: Batch) -> jnp.ndarray:
outputs: VAEOutput = model.apply(params, rng_key, batch["image"])
log_likelihood = -binary_cross_entropy(batch["image"], outputs.logits)
kl = kl_gaussian(outputs.mean, jnp.square(outputs.stddev))
elbo = log_likelihood - kl
return -jnp.mean(elbo)
@jax.jit
def update(
params: hk.Params,
rng_key: PRNGKey,
opt_state: optax.OptState,
batch: Batch,
) -> Tuple[hk.Params, optax.OptState]:
grads = jax.grad(loss_fn)(params, rng_key, batch)
updates, new_opt_state = optimizer.update(grads, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, new_opt_state
rng_seq = hk.PRNGSequence(FLAGS.random_seed)
params = model.init(next(rng_seq), np.zeros((1, *MNIST_IMAGE_SHAPE)))
opt_state = optimizer.init(params)
train_ds = load_dataset(tfds.Split.TRAIN, FLAGS.batch_size)
valid_ds = load_dataset(tfds.Split.TEST, FLAGS.batch_size)
for step in range(FLAGS.training_steps):
params, opt_state = update(params, next(rng_seq), opt_state, next(train_ds))
if step % FLAGS.eval_frequency == 0:
val_loss = loss_fn(params, next(rng_seq), next(valid_ds))
logging.info("STEP: %5d; Validation ELBO: %.3f", step, -val_loss)
if __name__ == "__main__":
app.run(main)
| true | true |
1c2b56f9c54f6473c047b01c41373222dfecefb3 | 20,584 | py | Python | stock_report/pdf.py | pfeiffer-dev/stock-report | da9f3926a8fa5c6bc40febf1afd83c36699f19aa | [
"MIT"
] | null | null | null | stock_report/pdf.py | pfeiffer-dev/stock-report | da9f3926a8fa5c6bc40febf1afd83c36699f19aa | [
"MIT"
] | null | null | null | stock_report/pdf.py | pfeiffer-dev/stock-report | da9f3926a8fa5c6bc40febf1afd83c36699f19aa | [
"MIT"
] | null | null | null | # pdf.py
# stock-report
# Copyright 2022 Kevin Pfeiffer
# MIT License
import os
from fpdf import FPDF
from datetime import date
class PDF:
def __init__(self, data, name, ticker):
"""
Inherit PDF class with its default arguments.
"""
self.date = date.today()
self.data = data
self.name = name
self.ticker = ticker
self.width = 210
self.height = 297
# Formats A4 Letter
pdf = FPDF("P", "mm", "A4")
def new_page(self):
"""
Creates a new pdf page with default header and footer.
"""
self.pdf.add_page()
self.pdf.set_font("Arial", "B", 12)
this_dir, this_filename = os.path.split(__file__)
header = os.path.join(this_dir, "resources", "header.png")
footer = os.path.join(this_dir, "resources", "footer.png")
self.pdf.image(header, 0, 0, self.width)
self.pdf.image(footer, 0, 252, self.width)
def create_title(self):
"""
Creates title with stock name and date created.
"""
self.pdf.set_font("Arial", "b", 40)
self.pdf.ln(30)
self.pdf.write(4, f"{self.name}")
self.pdf.ln(10)
self.pdf.set_font("Arial", "", 10)
self.pdf.write(4, f"Created: {self.date}")
self.pdf.ln(5)
def create_heading(self, headline):
"""
Creates heading for a page with ticker.
:param headline: text as a string
"""
self.pdf.set_text_color(0, 0, 0)
self.pdf.set_font("Arial", "b", 20)
self.pdf.ln(30)
self.pdf.write(4, f"{headline}")
self.pdf.ln(5)
self.pdf.set_font("Arial", "", 10)
self.pdf.write(4, f"Symbol: {self.ticker}")
self.pdf.ln(5)
def no_data_available(self):
"""
Creates heading for a page with ticker.
:param headline: text as a string
"""
self.pdf.set_text_color(0, 0, 0)
self.pdf.set_font("Arial", "b", 40)
self.pdf.ln(100)
self.pdf.write(4, "No data available")
def category_key_figures(self, data, category):
"""
Returns data by category.
:param data: data from Alpha Vantage as pandas dataframe
:param category: category by Alpha Vantage as a string
:return: data from a category as a string
"""
category_data = data
category_data = category_data[category].values[0]
return category_data
def category_annual_report(self, data, year ,category):
"""
Returns data by category and year.
:param data: data from Alpha Vantage as pandas dataframe
:param year: takes integer, latest year = 0, second latest year = 1 ...
:param category: category by Alpha Vantage as a string
:return: data from a category and year as a string
"""
end = year + 1
category_annual_report = data
category_annual_report = category_annual_report.iloc[year:end , :]
category_annual_report = category_annual_report[category].values[0]
return category_annual_report
def kff(self, data, key, category, y, key_style="", value_style="", left_position=True, thousands_character=False):
"""
Creates Template for key_figures().
:param data: data from Alpha Vantage as pandas dataframe
:param key: name of key as string
:param category: category by Alpha Vantage as a string
:param y: y position of text as integer
:param key_style: style of key text as string, b: bold, i: italic, u: underline
:param key_style: style of value text as string, b: bold, i: italic, u: underline
:param left_position: takes boolean for left or right position on paper
:param thousands_character: takes boolean for thousands separator
"""
# x position
if left_position:
x_key = 10
x_value = 70
else:
x_key = 115
x_value = 175
# format of key
self.pdf.set_font("Arial", key_style, 12)
self.pdf.set_xy(x_key, y)
self.pdf.cell(0, txt=f"{key}")
# set value
value = self.category_key_figures(data, category)
# thousands character for value
if thousands_character:
try:
value = int(value)
value = f'{value:,}'
except:
value = value
else:
value = value
# format of value
self.pdf.set_font("Arial", value_style, 12)
self.pdf.set_xy(x_value, y)
self.pdf.cell(0, txt=f"{value}")
self.pdf.set_text_color(0, 0, 0)
def arf(self, data, key, category, y, key_style="", value_style=""):
"""
Creates Template for income_statment(), balance_sheet() and cash_flow().
:param data: data from Alpha Vantage as pandas dataframe
:param key: name of key as string
:param category: category by Alpha Vantage as a string
:param y: y position of text as integer
:param key_style: style of key text as string, b: bold, i: italic, u: underline
:param key_style: style of value text as string, b: bold, i: italic, u: underline
"""
# format of key
self.pdf.set_font("Arial", key_style, 12)
self.pdf.set_xy(10, y)
self.pdf.cell(0, txt=f"{key}")
self.pdf.set_font("Arial", value_style, 12)
# format of value
self.pdf.set_xy(95, y)
value_1 = self.category_annual_report(data, 0, category)
try:
value_1 = int(value_1) # for seperation
value_1 = f'{value_1:,}'
except:
value_1 = value_1
self.pdf.cell(0, txt=f"{value_1}")
self.pdf.set_xy(135, y)
value_2 = self.category_annual_report(data, 1, category)
try:
value_2 = int(value_2)
value_2 = f'{value_2:,}'
except:
value_2 = value_2
self.pdf.cell(0, txt=f"{value_2}")
self.pdf.set_xy(175, y)
value_3 = self.category_annual_report(data, 2, category)
try:
value_3 = int(value_3)
value_3 = f'{value_3:,}'
except:
value_3 = value_3
self.pdf.cell(0, txt=f"{value_3}")
def key_figures(self):
"""
Creates key figures on pdf page.
"""
try:
# access data
data_co = self.data.company_overview()
# set elements
self.kff(data_co, "Industry", "Industry", 70)
self.kff(data_co, "Sector", "Sector", 75)
self.kff(data_co, "Country", "Country", 80)
self.kff(data_co, "Exchange", "Exchange", 85)
self.kff(data_co, "Currency", "Currency", 90)
self.kff(data_co, "Fiscal Year End", "FiscalYearEnd", 95)
self.kff(data_co, "Latest Quarter", "LatestQuarter", 100)
self.kff(data_co, "Market Capitalization", "MarketCapitalization", 110, thousands_character=True)
self.kff(data_co, "Shares Outstanding", "SharesOutstanding", 115, thousands_character=True)
self.kff(data_co, "Revenue", "RevenueTTM", 125, thousands_character=True)
self.kff(data_co, "Gross Profit", "GrossProfitTTM", 130, thousands_character=True)
self.kff(data_co, "EBITDA", "EBITDA", 135, thousands_character=True)
self.kff(data_co, "Earnings per Share", "EPS", 145)
self.kff(data_co, "Quarterly Earnings Growth", "QuarterlyEarningsGrowthYOY", 150)
self.kff(data_co, "Revenue per Share", "RevenuePerShareTTM", 155)
self.kff(data_co, "Quarterly Revenue Growth", "QuarterlyRevenueGrowthYOY", 160)
self.kff(data_co, "Return on Assets", "ReturnOnAssetsTTM", 170)
self.kff(data_co, "Return on Equity", "ReturnOnEquityTTM", 175)
self.kff(data_co, "Profit Margin", "ProfitMargin", 185)
self.kff(data_co, "Operating Margin", "OperatingMarginTTM", 190)
self.kff(data_co, "Price to Earnings", "PERatio", 200)
self.kff(data_co, "PE Forward", "ForwardPE", 205)
self.kff(data_co, "Price to Earnings Growth", "PEGRatio", 210)
self.kff(data_co, "Enterprise Value to Revenue", "EVToRevenue", 215)
self.kff(data_co, "Enterprise Value to EBITDA", "EVToEBITDA", 220)
self.kff(data_co, "Price to Sales", "PriceToSalesRatioTTM", 225)
self.kff(data_co, "Price to Book", "PriceToBookRatio", 230)
self.kff(data_co, "Book Value", "BookValue", 235)
self.kff(data_co, "Beta", "Beta", 240)
self.kff(data_co, "52 Week High", "52WeekHigh", 160, left_position=False)
self.kff(data_co, "52 Week Low", "52WeekLow", 165, left_position=False)
self.kff(data_co, "50 Day Moving Average", "50DayMovingAverage", 170, left_position=False)
self.kff(data_co, "200 Day Moving Average", "200DayMovingAverage", 175, left_position=False)
self.kff(data_co, "Analyst Target Price", "AnalystTargetPrice", 185, left_position=False)
self.kff(data_co, "Dividend per Share", "DividendPerShare", 195, left_position=False)
self.kff(data_co, "Dividend Yield", "DividendYield", 200, left_position=False)
self.kff(data_co, "Dividend Date", "DividendDate", 205, left_position=False)
self.kff(data_co, "Ex Dividend Date", "ExDividendDate", 210, left_position=False)
except:
pass
try:
# access data
data_qu = self.data.quote()
self.kff(data_qu, "Price", "05. price", 110, "b", "b", left_position=False)
self.kff(data_qu, "Change", "09. change", 115, left_position=False)
self.kff(data_qu, "Percent Change", "10. change percent", 120, left_position=False)
self.kff(data_qu, "Open", "02. open", 130, left_position=False)
self.kff(data_qu, "High", "03. high", 135, left_position=False)
self.kff(data_qu, "Low", "04. low", 140, left_position=False)
self.kff(data_qu, "Previous Close", "08. previous close", 145, left_position=False)
self.kff(data_qu, "Volume", "06. volume", 150, thousands_character=True, left_position=False)
except:
self.no_data_available()
def income_statement(self):
'''
Creates income statement on pdf page.
'''
try:
# access data
data_in = self.data.income_statement()
# set elements
self.arf(data_in, "", "fiscalDateEnding", 60)
self.arf(data_in, "", "reportedCurrency", 65)
self.arf(data_in, "Revenue", "totalRevenue", 75)
self.arf(data_in, "Cost of Revenue", "costOfRevenue", 80)
self.arf(data_in, "Gross Profit", "grossProfit", 85, "b", "b")
self.arf(data_in, "Operating Expense", "operatingExpenses", 95)
self.arf(data_in, "Selling General and Administrativ", "sellingGeneralAndAdministrative", 100)
self.arf(data_in, "Research Development", "researchAndDevelopment", 105)
self.arf(data_in, "EBITDA", "ebitda", 110, "b", "b")
self.arf(data_in, "Deprecation and Amortiziation", "depreciationAndAmortization", 120)
self.arf(data_in, "Deprecation", "depreciation", 125)
self.arf(data_in, "Operating Income", "operatingIncome", 130, "b", "b")
self.arf(data_in, "Interest Income", "interestIncome", 140)
self.arf(data_in, "Other non Operating Income or Expense", "otherNonOperatingIncome", 145)
self.arf(data_in, "EBIT", "ebit", 150, "b", "b")
self.arf(data_in, "Interest Expense", "interestExpense", 160)
self.arf(data_in, "EBT", "incomeBeforeTax", 165, "b", "b")
self.arf(data_in, "Income Tax Expense", "incomeTaxExpense", 175)
self.arf(data_in, "Net Income from Continuing Operations", "netIncomeFromContinuingOperations", 180)
self.arf(data_in, "Net Income", "netIncome", 185, "b", "b")
self.arf(data_in, "Net Investment Income", "investmentIncomeNet", 205)
self.arf(data_in, "Net Interest Income", "netInterestIncome", 210)
self.arf(data_in, "Non Interest Income", "nonInterestIncome", 215)
self.arf(data_in, "Interest and Dept Expense", "interestAndDebtExpense", 220)
self.arf(data_in, "Comprehensive Income Net of Tax", "comprehensiveIncomeNetOfTax", 225)
self.arf(data_in, "Cost of Goods and Services Sold", "costofGoodsAndServicesSold", 230)
except:
self.no_data_available()
def balance_sheet(self):
'''
Creates balance sheet on pdf page.
'''
try:
# access data
data_bs = self.data.balance_sheet()
# set elements
self.arf(data_bs, "", "fiscalDateEnding", 60)
self.arf(data_bs, "", "reportedCurrency", 65)
self.arf(data_bs, "Total Assets", "totalAssets", 75, "b", "b")
self.arf(data_bs, "Current Assets", "totalCurrentAssets", 80, "b", "b")
self.arf(data_bs, "Cash and Short Term Investments", "cashAndShortTermInvestments", 85)
self.arf(data_bs, "Cash and Cash Equivalents at CaVa", "cashAndCashEquivalentsAtCarryingValue", 90)
self.arf(data_bs, "Short Term Investments", "shortTermInvestments", 95)
self.arf(data_bs, "Current Net Receivable", "currentNetReceivables", 100)
self.arf(data_bs, "Inventory", "inventory", 105)
self.arf(data_bs, "Other Current Assets", "otherCurrentAssets", 110)
self.arf(data_bs, "Non Current Assets", "totalNonCurrentAssets", 115, "b", "b")
self.arf(data_bs, "Property Plant Equipment", "propertyPlantEquipment", 120)
self.arf(data_bs, "Accumulated Depreciation Amortization PPE", "accumulatedDepreciationAmortizationPPE", 125)
self.arf(data_bs, "Intangible Assets", "intangibleAssets", 130)
self.arf(data_bs, "Goodwill", "goodwill", 135)
self.arf(data_bs, "Intangible Assets Excluding Goodwill", "intangibleAssetsExcludingGoodwill", 140)
self.arf(data_bs, "Long Term Investments", "longTermInvestments", 145)
self.arf(data_bs, "Other Non Current Assets", "otherNonCurrrentAssets", 150)
self.arf(data_bs, "Total Liabilities", "totalLiabilities", 160, "b", "b")
self.arf(data_bs, "Current Liabilities", "totalCurrentLiabilities", 165, "b", "b")
self.arf(data_bs, "Current Accounts Payable", "currentAccountsPayable", 170)
self.arf(data_bs, "Short Term Debt", "shortTermDebt", 175)
self.arf(data_bs, "Deferred Revenue", "deferredRevenue", 180)
self.arf(data_bs, "Current Long Term Debt", "currentLongTermDebt", 185)
self.arf(data_bs, "Other Current Liabilities", "otherCurrentLiabilities", 190)
self.arf(data_bs, "Non Current Liabilities", "totalNonCurrentLiabilities", 195, "b", "b")
self.arf(data_bs, "Long Term Debt Non Current", "longTermDebtNoncurrent", 200)
self.arf(data_bs, "Other Non Current Liabilities", "otherNonCurrentLiabilities", 205)
self.arf(data_bs, "Shareholder Equity", "totalShareholderEquity", 215, "b", "b")
self.arf(data_bs, "Common Stock", "commonStock", 220)
self.arf(data_bs, "Treasury Stock", "treasuryStock",225)
self.arf(data_bs, "Retained Earnings", "retainedEarnings", 230)
self.arf(data_bs, "Common Stock Shares Outstanding", "commonStockSharesOutstanding", 235)
except:
self.no_data_available()
def cash_flow(self):
'''
Creates cash flow on pdf page.
'''
try:
# access data
data_cs = self.data.cash_flow()
# set elements
self.arf(data_cs, "", "fiscalDateEnding", 60)
self.arf(data_cs, "", "reportedCurrency", 65)
self.arf(data_cs, "Operating Cashflow", "operatingCashflow", 75, "b", "b")
self.arf(data_cs, "Net Income", "netIncome", 80)
self.arf(data_cs, "Payments for Operating Activities", "paymentsForOperatingActivities", 85)
self.arf(data_cs, "Proceeds from Operating Activities", "proceedsFromOperatingActivities", 90)
self.arf(data_cs, "Depreciation Depletion and Amortization", "depreciationDepletionAndAmortization", 95)
self.arf(data_cs, "Change in Operating Liabilities", "changeInOperatingLiabilities", 100)
self.arf(data_cs, "Change in Operating Assets", "changeInOperatingAssets", 105)
self.arf(data_cs, "Change in Receivables", "changeInReceivables", 110)
self.arf(data_cs, "Change in Inventory", "changeInInventory", 115)
self.arf(data_cs, "Cashflow from Investment", "cashflowFromInvestment", 125, "b", "b")
self.arf(data_cs, "Capital Expenditures", "capitalExpenditures", 130)
self.arf(data_cs, "Cashflow from Financing", "cashflowFromFinancing", 140, "b", "b")
self.arf(data_cs, "Dividend Payout", "dividendPayout", 145)
self.arf(data_cs, "Dividend Payout Common Stock", "dividendPayoutCommonStock", 150)
self.arf(data_cs, "Dividend Payout Preferred Stock", "operatingCashflow", 155)
self.arf(data_cs, "Payments for Repurchase of Common Stock", "paymentsForRepurchaseOfCommonStock", 160)
self.arf(data_cs, "Payments for Repurchase of Equity", "paymentsForRepurchaseOfEquity", 165)
self.arf(data_cs, "Payments for Repurchase of Preferred Stock", "paymentsForRepurchaseOfPreferredStock", 170)
self.arf(data_cs, "Proceeds from Repayments of Short Term D.", "proceedsFromRepaymentsOfShortTermDebt", 175)
self.arf(data_cs, "Proceeds from Issuance of Common Stock", "proceedsFromIssuanceOfCommonStock", 180)
self.arf(data_cs, "Proceeds from Issuance of Long Term Debt", "proceedsFromIssuanceOfLongTermDebtAndCapitalSecuritiesNet", 185)
self.arf(data_cs, "Proceeds from Issuance of Preferred Stock", "proceedsFromIssuanceOfPreferredStock", 190)
self.arf(data_cs, "Proceeds from Repurchase of Equity", "proceedsFromRepurchaseOfEquity", 195)
self.arf(data_cs, "Proceeds from Sale of Treasury Stock", "proceedsFromSaleOfTreasuryStock", 200)
self.arf(data_cs, "Change in Cash and Cash Equivalents", "changeInCashAndCashEquivalents", 210)
self.arf(data_cs, "Change in Exchange Rate", "changeInExchangeRate", 215)
except:
self.no_data_available()
def technical_analysis(self):
"""
Insert plots in pdf
"""
download_folder = os.path.expanduser("~")+"/Downloads/"
self.pdf.image(f"{download_folder}/stock-report_sma.png", 5, 55, self.width - 10)
self.pdf.image(f"{download_folder}/stock-report_bb.png", 5, 150, self.width - 10)
self.new_page()
self.pdf.image(f"{download_folder}/stock-report_macd.png", 5, 55, self.width - 10)
self.pdf.image(f"{download_folder}/stock-report_rsi.png", 5, 150, self.width - 10)
self.new_page()
self.pdf.image(f"{download_folder}/stock-report_dpc.png", 5, 55, self.width - 10)
self.pdf.image(f"{download_folder}/stock-report_md.png", 5, 150, self.width - 10)
os.remove(f"{download_folder}/stock-report_sma.png")
os.remove(f"{download_folder}/stock-report_bb.png")
os.remove(f"{download_folder}/stock-report_macd.png")
os.remove(f"{download_folder}/stock-report_rsi.png")
os.remove(f"{download_folder}/stock-report_dpc.png")
os.remove(f"{download_folder}/stock-report_md.png") | 46.888383 | 139 | 0.598815 |
import os
from fpdf import FPDF
from datetime import date
class PDF:
def __init__(self, data, name, ticker):
self.date = date.today()
self.data = data
self.name = name
self.ticker = ticker
self.width = 210
self.height = 297
pdf = FPDF("P", "mm", "A4")
def new_page(self):
self.pdf.add_page()
self.pdf.set_font("Arial", "B", 12)
this_dir, this_filename = os.path.split(__file__)
header = os.path.join(this_dir, "resources", "header.png")
footer = os.path.join(this_dir, "resources", "footer.png")
self.pdf.image(header, 0, 0, self.width)
self.pdf.image(footer, 0, 252, self.width)
def create_title(self):
self.pdf.set_font("Arial", "b", 40)
self.pdf.ln(30)
self.pdf.write(4, f"{self.name}")
self.pdf.ln(10)
self.pdf.set_font("Arial", "", 10)
self.pdf.write(4, f"Created: {self.date}")
self.pdf.ln(5)
def create_heading(self, headline):
self.pdf.set_text_color(0, 0, 0)
self.pdf.set_font("Arial", "b", 20)
self.pdf.ln(30)
self.pdf.write(4, f"{headline}")
self.pdf.ln(5)
self.pdf.set_font("Arial", "", 10)
self.pdf.write(4, f"Symbol: {self.ticker}")
self.pdf.ln(5)
def no_data_available(self):
self.pdf.set_text_color(0, 0, 0)
self.pdf.set_font("Arial", "b", 40)
self.pdf.ln(100)
self.pdf.write(4, "No data available")
def category_key_figures(self, data, category):
category_data = data
category_data = category_data[category].values[0]
return category_data
def category_annual_report(self, data, year ,category):
end = year + 1
category_annual_report = data
category_annual_report = category_annual_report.iloc[year:end , :]
category_annual_report = category_annual_report[category].values[0]
return category_annual_report
def kff(self, data, key, category, y, key_style="", value_style="", left_position=True, thousands_character=False):
if left_position:
x_key = 10
x_value = 70
else:
x_key = 115
x_value = 175
self.pdf.set_font("Arial", key_style, 12)
self.pdf.set_xy(x_key, y)
self.pdf.cell(0, txt=f"{key}")
value = self.category_key_figures(data, category)
if thousands_character:
try:
value = int(value)
value = f'{value:,}'
except:
value = value
else:
value = value
self.pdf.set_font("Arial", value_style, 12)
self.pdf.set_xy(x_value, y)
self.pdf.cell(0, txt=f"{value}")
self.pdf.set_text_color(0, 0, 0)
def arf(self, data, key, category, y, key_style="", value_style=""):
self.pdf.set_font("Arial", key_style, 12)
self.pdf.set_xy(10, y)
self.pdf.cell(0, txt=f"{key}")
self.pdf.set_font("Arial", value_style, 12)
self.pdf.set_xy(95, y)
value_1 = self.category_annual_report(data, 0, category)
try:
value_1 = int(value_1)
value_1 = f'{value_1:,}'
except:
value_1 = value_1
self.pdf.cell(0, txt=f"{value_1}")
self.pdf.set_xy(135, y)
value_2 = self.category_annual_report(data, 1, category)
try:
value_2 = int(value_2)
value_2 = f'{value_2:,}'
except:
value_2 = value_2
self.pdf.cell(0, txt=f"{value_2}")
self.pdf.set_xy(175, y)
value_3 = self.category_annual_report(data, 2, category)
try:
value_3 = int(value_3)
value_3 = f'{value_3:,}'
except:
value_3 = value_3
self.pdf.cell(0, txt=f"{value_3}")
def key_figures(self):
try:
data_co = self.data.company_overview()
self.kff(data_co, "Industry", "Industry", 70)
self.kff(data_co, "Sector", "Sector", 75)
self.kff(data_co, "Country", "Country", 80)
self.kff(data_co, "Exchange", "Exchange", 85)
self.kff(data_co, "Currency", "Currency", 90)
self.kff(data_co, "Fiscal Year End", "FiscalYearEnd", 95)
self.kff(data_co, "Latest Quarter", "LatestQuarter", 100)
self.kff(data_co, "Market Capitalization", "MarketCapitalization", 110, thousands_character=True)
self.kff(data_co, "Shares Outstanding", "SharesOutstanding", 115, thousands_character=True)
self.kff(data_co, "Revenue", "RevenueTTM", 125, thousands_character=True)
self.kff(data_co, "Gross Profit", "GrossProfitTTM", 130, thousands_character=True)
self.kff(data_co, "EBITDA", "EBITDA", 135, thousands_character=True)
self.kff(data_co, "Earnings per Share", "EPS", 145)
self.kff(data_co, "Quarterly Earnings Growth", "QuarterlyEarningsGrowthYOY", 150)
self.kff(data_co, "Revenue per Share", "RevenuePerShareTTM", 155)
self.kff(data_co, "Quarterly Revenue Growth", "QuarterlyRevenueGrowthYOY", 160)
self.kff(data_co, "Return on Assets", "ReturnOnAssetsTTM", 170)
self.kff(data_co, "Return on Equity", "ReturnOnEquityTTM", 175)
self.kff(data_co, "Profit Margin", "ProfitMargin", 185)
self.kff(data_co, "Operating Margin", "OperatingMarginTTM", 190)
self.kff(data_co, "Price to Earnings", "PERatio", 200)
self.kff(data_co, "PE Forward", "ForwardPE", 205)
self.kff(data_co, "Price to Earnings Growth", "PEGRatio", 210)
self.kff(data_co, "Enterprise Value to Revenue", "EVToRevenue", 215)
self.kff(data_co, "Enterprise Value to EBITDA", "EVToEBITDA", 220)
self.kff(data_co, "Price to Sales", "PriceToSalesRatioTTM", 225)
self.kff(data_co, "Price to Book", "PriceToBookRatio", 230)
self.kff(data_co, "Book Value", "BookValue", 235)
self.kff(data_co, "Beta", "Beta", 240)
self.kff(data_co, "52 Week High", "52WeekHigh", 160, left_position=False)
self.kff(data_co, "52 Week Low", "52WeekLow", 165, left_position=False)
self.kff(data_co, "50 Day Moving Average", "50DayMovingAverage", 170, left_position=False)
self.kff(data_co, "200 Day Moving Average", "200DayMovingAverage", 175, left_position=False)
self.kff(data_co, "Analyst Target Price", "AnalystTargetPrice", 185, left_position=False)
self.kff(data_co, "Dividend per Share", "DividendPerShare", 195, left_position=False)
self.kff(data_co, "Dividend Yield", "DividendYield", 200, left_position=False)
self.kff(data_co, "Dividend Date", "DividendDate", 205, left_position=False)
self.kff(data_co, "Ex Dividend Date", "ExDividendDate", 210, left_position=False)
except:
pass
try:
data_qu = self.data.quote()
self.kff(data_qu, "Price", "05. price", 110, "b", "b", left_position=False)
self.kff(data_qu, "Change", "09. change", 115, left_position=False)
self.kff(data_qu, "Percent Change", "10. change percent", 120, left_position=False)
self.kff(data_qu, "Open", "02. open", 130, left_position=False)
self.kff(data_qu, "High", "03. high", 135, left_position=False)
self.kff(data_qu, "Low", "04. low", 140, left_position=False)
self.kff(data_qu, "Previous Close", "08. previous close", 145, left_position=False)
self.kff(data_qu, "Volume", "06. volume", 150, thousands_character=True, left_position=False)
except:
self.no_data_available()
def income_statement(self):
try:
data_in = self.data.income_statement()
self.arf(data_in, "", "fiscalDateEnding", 60)
self.arf(data_in, "", "reportedCurrency", 65)
self.arf(data_in, "Revenue", "totalRevenue", 75)
self.arf(data_in, "Cost of Revenue", "costOfRevenue", 80)
self.arf(data_in, "Gross Profit", "grossProfit", 85, "b", "b")
self.arf(data_in, "Operating Expense", "operatingExpenses", 95)
self.arf(data_in, "Selling General and Administrativ", "sellingGeneralAndAdministrative", 100)
self.arf(data_in, "Research Development", "researchAndDevelopment", 105)
self.arf(data_in, "EBITDA", "ebitda", 110, "b", "b")
self.arf(data_in, "Deprecation and Amortiziation", "depreciationAndAmortization", 120)
self.arf(data_in, "Deprecation", "depreciation", 125)
self.arf(data_in, "Operating Income", "operatingIncome", 130, "b", "b")
self.arf(data_in, "Interest Income", "interestIncome", 140)
self.arf(data_in, "Other non Operating Income or Expense", "otherNonOperatingIncome", 145)
self.arf(data_in, "EBIT", "ebit", 150, "b", "b")
self.arf(data_in, "Interest Expense", "interestExpense", 160)
self.arf(data_in, "EBT", "incomeBeforeTax", 165, "b", "b")
self.arf(data_in, "Income Tax Expense", "incomeTaxExpense", 175)
self.arf(data_in, "Net Income from Continuing Operations", "netIncomeFromContinuingOperations", 180)
self.arf(data_in, "Net Income", "netIncome", 185, "b", "b")
self.arf(data_in, "Net Investment Income", "investmentIncomeNet", 205)
self.arf(data_in, "Net Interest Income", "netInterestIncome", 210)
self.arf(data_in, "Non Interest Income", "nonInterestIncome", 215)
self.arf(data_in, "Interest and Dept Expense", "interestAndDebtExpense", 220)
self.arf(data_in, "Comprehensive Income Net of Tax", "comprehensiveIncomeNetOfTax", 225)
self.arf(data_in, "Cost of Goods and Services Sold", "costofGoodsAndServicesSold", 230)
except:
self.no_data_available()
def balance_sheet(self):
try:
data_bs = self.data.balance_sheet()
self.arf(data_bs, "", "fiscalDateEnding", 60)
self.arf(data_bs, "", "reportedCurrency", 65)
self.arf(data_bs, "Total Assets", "totalAssets", 75, "b", "b")
self.arf(data_bs, "Current Assets", "totalCurrentAssets", 80, "b", "b")
self.arf(data_bs, "Cash and Short Term Investments", "cashAndShortTermInvestments", 85)
self.arf(data_bs, "Cash and Cash Equivalents at CaVa", "cashAndCashEquivalentsAtCarryingValue", 90)
self.arf(data_bs, "Short Term Investments", "shortTermInvestments", 95)
self.arf(data_bs, "Current Net Receivable", "currentNetReceivables", 100)
self.arf(data_bs, "Inventory", "inventory", 105)
self.arf(data_bs, "Other Current Assets", "otherCurrentAssets", 110)
self.arf(data_bs, "Non Current Assets", "totalNonCurrentAssets", 115, "b", "b")
self.arf(data_bs, "Property Plant Equipment", "propertyPlantEquipment", 120)
self.arf(data_bs, "Accumulated Depreciation Amortization PPE", "accumulatedDepreciationAmortizationPPE", 125)
self.arf(data_bs, "Intangible Assets", "intangibleAssets", 130)
self.arf(data_bs, "Goodwill", "goodwill", 135)
self.arf(data_bs, "Intangible Assets Excluding Goodwill", "intangibleAssetsExcludingGoodwill", 140)
self.arf(data_bs, "Long Term Investments", "longTermInvestments", 145)
self.arf(data_bs, "Other Non Current Assets", "otherNonCurrrentAssets", 150)
self.arf(data_bs, "Total Liabilities", "totalLiabilities", 160, "b", "b")
self.arf(data_bs, "Current Liabilities", "totalCurrentLiabilities", 165, "b", "b")
self.arf(data_bs, "Current Accounts Payable", "currentAccountsPayable", 170)
self.arf(data_bs, "Short Term Debt", "shortTermDebt", 175)
self.arf(data_bs, "Deferred Revenue", "deferredRevenue", 180)
self.arf(data_bs, "Current Long Term Debt", "currentLongTermDebt", 185)
self.arf(data_bs, "Other Current Liabilities", "otherCurrentLiabilities", 190)
self.arf(data_bs, "Non Current Liabilities", "totalNonCurrentLiabilities", 195, "b", "b")
self.arf(data_bs, "Long Term Debt Non Current", "longTermDebtNoncurrent", 200)
self.arf(data_bs, "Other Non Current Liabilities", "otherNonCurrentLiabilities", 205)
self.arf(data_bs, "Shareholder Equity", "totalShareholderEquity", 215, "b", "b")
self.arf(data_bs, "Common Stock", "commonStock", 220)
self.arf(data_bs, "Treasury Stock", "treasuryStock",225)
self.arf(data_bs, "Retained Earnings", "retainedEarnings", 230)
self.arf(data_bs, "Common Stock Shares Outstanding", "commonStockSharesOutstanding", 235)
except:
self.no_data_available()
def cash_flow(self):
try:
data_cs = self.data.cash_flow()
self.arf(data_cs, "", "fiscalDateEnding", 60)
self.arf(data_cs, "", "reportedCurrency", 65)
self.arf(data_cs, "Operating Cashflow", "operatingCashflow", 75, "b", "b")
self.arf(data_cs, "Net Income", "netIncome", 80)
self.arf(data_cs, "Payments for Operating Activities", "paymentsForOperatingActivities", 85)
self.arf(data_cs, "Proceeds from Operating Activities", "proceedsFromOperatingActivities", 90)
self.arf(data_cs, "Depreciation Depletion and Amortization", "depreciationDepletionAndAmortization", 95)
self.arf(data_cs, "Change in Operating Liabilities", "changeInOperatingLiabilities", 100)
self.arf(data_cs, "Change in Operating Assets", "changeInOperatingAssets", 105)
self.arf(data_cs, "Change in Receivables", "changeInReceivables", 110)
self.arf(data_cs, "Change in Inventory", "changeInInventory", 115)
self.arf(data_cs, "Cashflow from Investment", "cashflowFromInvestment", 125, "b", "b")
self.arf(data_cs, "Capital Expenditures", "capitalExpenditures", 130)
self.arf(data_cs, "Cashflow from Financing", "cashflowFromFinancing", 140, "b", "b")
self.arf(data_cs, "Dividend Payout", "dividendPayout", 145)
self.arf(data_cs, "Dividend Payout Common Stock", "dividendPayoutCommonStock", 150)
self.arf(data_cs, "Dividend Payout Preferred Stock", "operatingCashflow", 155)
self.arf(data_cs, "Payments for Repurchase of Common Stock", "paymentsForRepurchaseOfCommonStock", 160)
self.arf(data_cs, "Payments for Repurchase of Equity", "paymentsForRepurchaseOfEquity", 165)
self.arf(data_cs, "Payments for Repurchase of Preferred Stock", "paymentsForRepurchaseOfPreferredStock", 170)
self.arf(data_cs, "Proceeds from Repayments of Short Term D.", "proceedsFromRepaymentsOfShortTermDebt", 175)
self.arf(data_cs, "Proceeds from Issuance of Common Stock", "proceedsFromIssuanceOfCommonStock", 180)
self.arf(data_cs, "Proceeds from Issuance of Long Term Debt", "proceedsFromIssuanceOfLongTermDebtAndCapitalSecuritiesNet", 185)
self.arf(data_cs, "Proceeds from Issuance of Preferred Stock", "proceedsFromIssuanceOfPreferredStock", 190)
self.arf(data_cs, "Proceeds from Repurchase of Equity", "proceedsFromRepurchaseOfEquity", 195)
self.arf(data_cs, "Proceeds from Sale of Treasury Stock", "proceedsFromSaleOfTreasuryStock", 200)
self.arf(data_cs, "Change in Cash and Cash Equivalents", "changeInCashAndCashEquivalents", 210)
self.arf(data_cs, "Change in Exchange Rate", "changeInExchangeRate", 215)
except:
self.no_data_available()
def technical_analysis(self):
download_folder = os.path.expanduser("~")+"/Downloads/"
self.pdf.image(f"{download_folder}/stock-report_sma.png", 5, 55, self.width - 10)
self.pdf.image(f"{download_folder}/stock-report_bb.png", 5, 150, self.width - 10)
self.new_page()
self.pdf.image(f"{download_folder}/stock-report_macd.png", 5, 55, self.width - 10)
self.pdf.image(f"{download_folder}/stock-report_rsi.png", 5, 150, self.width - 10)
self.new_page()
self.pdf.image(f"{download_folder}/stock-report_dpc.png", 5, 55, self.width - 10)
self.pdf.image(f"{download_folder}/stock-report_md.png", 5, 150, self.width - 10)
os.remove(f"{download_folder}/stock-report_sma.png")
os.remove(f"{download_folder}/stock-report_bb.png")
os.remove(f"{download_folder}/stock-report_macd.png")
os.remove(f"{download_folder}/stock-report_rsi.png")
os.remove(f"{download_folder}/stock-report_dpc.png")
os.remove(f"{download_folder}/stock-report_md.png") | true | true |
1c2b5707eacc35166817d17fd215e17cbfc0edcd | 1,651 | py | Python | bobenv.py | GT-melee/initial-trial | 88799120788130805927c7139c477aee06b435e1 | [
"MIT"
] | null | null | null | bobenv.py | GT-melee/initial-trial | 88799120788130805927c7139c477aee06b435e1 | [
"MIT"
] | null | null | null | bobenv.py | GT-melee/initial-trial | 88799120788130805927c7139c477aee06b435e1 | [
"MIT"
] | null | null | null | import math
import gym
from gym_minigrid.envs import EmptyEnv, MiniGridEnv, Grid, Goal
import numpy as np
from gym_minigrid.wrappers import RGBImgPartialObsWrapper, ImgObsWrapper
class _BobEnv(MiniGridEnv):
"""
Empty grid environment, no obstacles, sparse reward
"""
def __init__(self,
size,
):
self.size = size
self.agent_start_pos = (1,1)
self.agent_start_dir = 0
super().__init__(
grid_size=size,
max_steps=4*size*size,
# Set this to True for maximum speed
see_through_walls=True
)
self.action_space = gym.spaces.Discrete(3)
def step(self, action):
obs, rew, done, info = super(_BobEnv, self).step(action)
#print("b")
return obs, self.size * rew, done, info
def _gen_grid(self, width, height):
# Create an empty grid
self.grid = Grid(width, height)
# Generate the surrounding walls
self.grid.wall_rect(0, 0, width, height)
# Place a goal square in the bottom-right corner
pos = np.random.randint(2,height-2+1,(2,)) if 2 < height - 2 else (3,3)
self.put_obj(Goal(), pos[0], pos[1])
# Place the agent
if self.agent_start_pos is not None:
self.agent_pos = self.agent_start_pos
self.agent_dir = self.agent_start_dir
else:
self.place_agent()
self.mission = "get to the green goal square"
def BobEnv(size):
return ImgObsWrapper(RGBImgPartialObsWrapper(_BobEnv(size)))
def GetBobEnvClass(size):
def temp():
return BobEnv(size)
return temp | 25.796875 | 79 | 0.616596 | import math
import gym
from gym_minigrid.envs import EmptyEnv, MiniGridEnv, Grid, Goal
import numpy as np
from gym_minigrid.wrappers import RGBImgPartialObsWrapper, ImgObsWrapper
class _BobEnv(MiniGridEnv):
def __init__(self,
size,
):
self.size = size
self.agent_start_pos = (1,1)
self.agent_start_dir = 0
super().__init__(
grid_size=size,
max_steps=4*size*size,
see_through_walls=True
)
self.action_space = gym.spaces.Discrete(3)
def step(self, action):
obs, rew, done, info = super(_BobEnv, self).step(action)
return obs, self.size * rew, done, info
def _gen_grid(self, width, height):
self.grid = Grid(width, height)
self.grid.wall_rect(0, 0, width, height)
pos = np.random.randint(2,height-2+1,(2,)) if 2 < height - 2 else (3,3)
self.put_obj(Goal(), pos[0], pos[1])
if self.agent_start_pos is not None:
self.agent_pos = self.agent_start_pos
self.agent_dir = self.agent_start_dir
else:
self.place_agent()
self.mission = "get to the green goal square"
def BobEnv(size):
return ImgObsWrapper(RGBImgPartialObsWrapper(_BobEnv(size)))
def GetBobEnvClass(size):
def temp():
return BobEnv(size)
return temp | true | true |
1c2b596f621af73e6b9b729fb2b3b74a24f2a32f | 983 | py | Python | aliyun-python-sdk-cloudmarketing/aliyunsdkcloudmarketing/request/v20180910/RequestUploadFileRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-cloudmarketing/aliyunsdkcloudmarketing/request/v20180910/RequestUploadFileRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | 1 | 2020-05-31T14:51:47.000Z | 2020-05-31T14:51:47.000Z | aliyun-python-sdk-cloudmarketing/aliyunsdkcloudmarketing/request/v20180910/RequestUploadFileRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class RequestUploadFileRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'cloudmarketing', '2018-09-10', 'RequestUploadFile') | 40.958333 | 80 | 0.775178 |
from aliyunsdkcore.request import RpcRequest
class RequestUploadFileRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'cloudmarketing', '2018-09-10', 'RequestUploadFile') | true | true |
1c2b59918a6ccaefb2b585c1f6d876aa8abfb389 | 556 | py | Python | collect_monitor.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
] | null | null | null | collect_monitor.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
] | 1 | 2019-10-22T21:28:31.000Z | 2019-10-22T21:39:12.000Z | collect_monitor.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
] | 2 | 2019-06-06T15:06:46.000Z | 2020-07-20T02:03:22.000Z | #!/usr/bin/env python
"""
Author: Friedrich Schotte
Date created: 2019-02-02
Date last modified: 2019-02-03
"""
from redirect import redirect
redirect("collect_monitor")
from CA import camonitor
camonitor("NIH:TIMING.registers.ch7_state.count")
camonitor("NIH:TIMING.registers.image_number.count")
camonitor("NIH:TIMING.registers.xdet_count.count")
camonitor("NIH:TIMING.registers.xdet_trig_count.count")
camonitor("NIH:TIMING.registers.xdet_acq_count.count")
camonitor("NIH:TIMING.registers.acquiring.count")
from time import sleep
while True: sleep(0.1)
| 30.888889 | 55 | 0.802158 |
from redirect import redirect
redirect("collect_monitor")
from CA import camonitor
camonitor("NIH:TIMING.registers.ch7_state.count")
camonitor("NIH:TIMING.registers.image_number.count")
camonitor("NIH:TIMING.registers.xdet_count.count")
camonitor("NIH:TIMING.registers.xdet_trig_count.count")
camonitor("NIH:TIMING.registers.xdet_acq_count.count")
camonitor("NIH:TIMING.registers.acquiring.count")
from time import sleep
while True: sleep(0.1)
| true | true |
1c2b5adc03c2891478a803b74f77ab8dd34fc1e3 | 3,257 | py | Python | airflow/providers/ftp/sensors/ftp.py | dorranh/airflow | 1a9a2cadcf8606cfcb729d1323dd33dfacc64633 | [
"Apache-2.0"
] | null | null | null | airflow/providers/ftp/sensors/ftp.py | dorranh/airflow | 1a9a2cadcf8606cfcb729d1323dd33dfacc64633 | [
"Apache-2.0"
] | 1 | 2019-05-14T14:32:40.000Z | 2019-05-14T14:32:40.000Z | airflow/providers/ftp/sensors/ftp.py | dorranh/airflow | 1a9a2cadcf8606cfcb729d1323dd33dfacc64633 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import ftplib
import re
from airflow.providers.ftp.hooks.ftp import FTPHook, FTPSHook
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class FTPSensor(BaseSensorOperator):
"""
Waits for a file or directory to be present on FTP.
:param path: Remote file or directory path
:type path: str
:param fail_on_transient_errors: Fail on all errors,
including 4xx transient errors. Default True.
:type fail_on_transient_errors: bool
:param ftp_conn_id: The connection to run the sensor against
:type ftp_conn_id: str
"""
template_fields = ('path',)
"""Errors that are transient in nature, and where action can be retried"""
transient_errors = [421, 425, 426, 434, 450, 451, 452]
error_code_pattern = re.compile(r"([\d]+)")
@apply_defaults
def __init__(
self,
path,
ftp_conn_id='ftp_default',
fail_on_transient_errors=True,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.path = path
self.ftp_conn_id = ftp_conn_id
self.fail_on_transient_errors = fail_on_transient_errors
def _create_hook(self):
"""Return connection hook."""
return FTPHook(ftp_conn_id=self.ftp_conn_id)
def _get_error_code(self, e):
"""Extract error code from ftp exception"""
try:
matches = self.error_code_pattern.match(str(e))
code = int(matches.group(0))
return code
except ValueError:
return e
def poke(self, context):
with self._create_hook() as hook:
self.log.info('Poking for %s', self.path)
try:
hook.get_mod_time(self.path)
except ftplib.error_perm as e:
self.log.info('Ftp error encountered: %s', str(e))
error_code = self._get_error_code(e)
if ((error_code != 550) and
(self.fail_on_transient_errors or
(error_code not in self.transient_errors))):
raise e
return False
return True
class FTPSSensor(FTPSensor):
"""Waits for a file or directory to be present on FTP over SSL."""
def _create_hook(self):
"""Return connection hook."""
return FTPSHook(ftp_conn_id=self.ftp_conn_id)
| 33.57732 | 78 | 0.650906 |
import ftplib
import re
from airflow.providers.ftp.hooks.ftp import FTPHook, FTPSHook
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class FTPSensor(BaseSensorOperator):
template_fields = ('path',)
transient_errors = [421, 425, 426, 434, 450, 451, 452]
error_code_pattern = re.compile(r"([\d]+)")
@apply_defaults
def __init__(
self,
path,
ftp_conn_id='ftp_default',
fail_on_transient_errors=True,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.path = path
self.ftp_conn_id = ftp_conn_id
self.fail_on_transient_errors = fail_on_transient_errors
def _create_hook(self):
return FTPHook(ftp_conn_id=self.ftp_conn_id)
def _get_error_code(self, e):
try:
matches = self.error_code_pattern.match(str(e))
code = int(matches.group(0))
return code
except ValueError:
return e
def poke(self, context):
with self._create_hook() as hook:
self.log.info('Poking for %s', self.path)
try:
hook.get_mod_time(self.path)
except ftplib.error_perm as e:
self.log.info('Ftp error encountered: %s', str(e))
error_code = self._get_error_code(e)
if ((error_code != 550) and
(self.fail_on_transient_errors or
(error_code not in self.transient_errors))):
raise e
return False
return True
class FTPSSensor(FTPSensor):
def _create_hook(self):
return FTPSHook(ftp_conn_id=self.ftp_conn_id)
| true | true |
1c2b5add923f0efb8e9b26f4f4bf3fd50e09fa50 | 25,746 | py | Python | mmdet/models/roi_heads/keypoint_roi_head.py | VGrondin/CBNetV2_mask_remote | b27246af5081d5395db3c3105d32226de05fcd13 | [
"Apache-2.0"
] | null | null | null | mmdet/models/roi_heads/keypoint_roi_head.py | VGrondin/CBNetV2_mask_remote | b27246af5081d5395db3c3105d32226de05fcd13 | [
"Apache-2.0"
] | null | null | null | mmdet/models/roi_heads/keypoint_roi_head.py | VGrondin/CBNetV2_mask_remote | b27246af5081d5395db3c3105d32226de05fcd13 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import torch
from torch.nn import functional as F
from typing import Any, List, Tuple, Union
from detectron2.layers import cat
from mmdet.core import bbox2result, bbox2roi
from ..builder import HEADS, build_head, build_roi_extractor
from .standard_roi_head import StandardRoIHead
_TOTAL_SKIPPED = 0
def _keypoints_to_heatmap(
keypoints: torch.Tensor, rois: torch.Tensor, heatmap_size: int
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Encode keypoint locations into a target heatmap for use in SoftmaxWithLoss across space.
Maps keypoints from the half-open interval [x1, x2) on continuous image coordinates to the
closed interval [0, heatmap_size - 1] on discrete image coordinates. We use the
continuous-discrete conversion from Heckbert 1990 ("What is the coordinate of a pixel?"):
d = floor(c) and c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate.
Arguments:
keypoints: tensor of keypoint locations in of shape (N, K, 3).
rois: Nx4 tensor of rois in xyxy format
heatmap_size: integer side length of square heatmap.
Returns:
heatmaps: A tensor of shape (N, K) containing an integer spatial label
in the range [0, heatmap_size**2 - 1] for each keypoint in the input.
valid: A tensor of shape (N, K) containing whether each keypoint is in
the roi or not.
"""
if rois.numel() == 0:
return rois.new().long(), rois.new().long()
offset_x = rois[:, 0]
offset_y = rois[:, 1]
scale_x = heatmap_size / (rois[:, 2] - rois[:, 0])
scale_y = heatmap_size / (rois[:, 3] - rois[:, 1])
offset_x = offset_x[:, None]
offset_y = offset_y[:, None]
scale_x = scale_x[:, None]
scale_y = scale_y[:, None]
x = keypoints[..., 0]
y = keypoints[..., 1]
x_boundary_inds = x == rois[:, 2][:, None]
y_boundary_inds = y == rois[:, 3][:, None]
x = (x - offset_x) * scale_x
x = x.floor().long()
y = (y - offset_y) * scale_y
y = y.floor().long()
x[x_boundary_inds] = heatmap_size - 1
y[y_boundary_inds] = heatmap_size - 1
valid_loc = (x >= 0) & (y >= 0) & (x < heatmap_size) & (y < heatmap_size)
vis = keypoints[..., 2] > 0
valid = (valid_loc & vis).long()
lin_ind = y * heatmap_size + x
heatmaps = lin_ind * valid
return heatmaps, valid
def heatmaps_to_keypoints(maps: torch.Tensor, rois: torch.Tensor) -> torch.Tensor:
"""
Extract predicted keypoint locations from heatmaps.
Args:
maps (Tensor): (#ROIs, #keypoints, POOL_H, POOL_W). The predicted heatmap of logits for
each ROI and each keypoint.
rois (Tensor): (#ROIs, 4). The box of each ROI.
Returns:
Tensor of shape (#ROIs, #keypoints, 4) with the last dimension corresponding to
(x, y, logit, score) for each keypoint.
When converting discrete pixel indices in an NxN image to a continuous keypoint coordinate,
we maintain consistency with :meth:`Keypoints.to_heatmap` by using the conversion from
Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate.
"""
# The decorator use of torch.no_grad() was not supported by torchscript.
# https://github.com/pytorch/pytorch/issues/44768
maps = maps.detach()
rois = rois.detach()
offset_x = rois[:, 0]
offset_y = rois[:, 1]
widths = (rois[:, 2] - rois[:, 0]).clamp(min=1)
heights = (rois[:, 3] - rois[:, 1]).clamp(min=1)
widths_ceil = widths.ceil()
heights_ceil = heights.ceil()
num_rois, num_keypoints = maps.shape[:2]
xy_preds = maps.new_zeros(rois.shape[0], num_keypoints, 4)
width_corrections = widths / widths_ceil
height_corrections = heights / heights_ceil
keypoints_idx = torch.arange(num_keypoints, device=maps.device)
for i in range(num_rois):
outsize = (int(heights_ceil[i]), int(widths_ceil[i]))
roi_map = F.interpolate(
maps[[i]], size=outsize, mode="bicubic", align_corners=False
).squeeze(
0
) # #keypoints x H x W
# softmax over the spatial region
max_score, _ = roi_map.view(num_keypoints, -1).max(1)
max_score = max_score.view(num_keypoints, 1, 1)
tmp_full_resolution = (roi_map - max_score).exp_()
tmp_pool_resolution = (maps[i] - max_score).exp_()
# Produce scores over the region H x W, but normalize with POOL_H x POOL_W,
# so that the scores of objects of different absolute sizes will be more comparable
roi_map_scores = tmp_full_resolution / tmp_pool_resolution.sum((1, 2), keepdim=True)
w = roi_map.shape[2]
pos = roi_map.view(num_keypoints, -1).argmax(1)
x_int = pos % w
y_int = (pos - x_int) // w
assert (
roi_map_scores[keypoints_idx, y_int, x_int]
== roi_map_scores.view(num_keypoints, -1).max(1)[0]
).all()
x = (x_int.float() + 0.5) * width_corrections[i]
y = (y_int.float() + 0.5) * height_corrections[i]
xy_preds[i, :, 0] = x + offset_x[i]
xy_preds[i, :, 1] = y + offset_y[i]
xy_preds[i, :, 2] = roi_map[keypoints_idx, y_int, x_int]
xy_preds[i, :, 3] = roi_map_scores[keypoints_idx, y_int, x_int]
return xy_preds
@HEADS.register_module()
class KeypointRoIHead(StandardRoIHead):
"""Simplest base roi head including one bbox head and one mask head."""
def __init__(self, output_heatmaps=False, keypoint_decoder=None, **kwargs):
super().__init__(**kwargs)
self.output_heatmaps = output_heatmaps
if keypoint_decoder:
self.keypoint_decoder = build_head(keypoint_decoder)
else:
assert output_heatmaps is True
self.keypoint_decoder = None
# def init_keypoint_head(self, keypoint_roi_extractor, keypoint_head):
self.with_keypoint = True
self.share_roi_extractor = False
keypoint_roi_extractor = dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32])
self.keypoint_roi_extractor = build_roi_extractor(keypoint_roi_extractor)
# if keypoint_roi_extractor is not None:
# self.keypoint_roi_extractor = build_roi_extractor(
# keypoint_roi_extractor)
# self.share_roi_extractor = False
# else:
# self.share_roi_extractor = True
# self.keypoint_roi_extractor = self.bbox_roi_extractor
keypoint_head=dict(
type='KeypointRCNNHead',
num_convs=8,
in_channels=256,
features_size=[256, 256, 256, 256],
conv_out_channels=512,
num_keypoints=5,
loss_keypoint=dict(type='MSELoss', loss_weight=5.0))
self.keypoint_head = build_head(keypoint_head)
def init_weights(self, pretrained):
super().init_weights(pretrained)
if self.with_keypoint and self.keypoint_head:
self.keypoint_head.init_weights()
def forward_dummy(self, x, proposals):
outs = super().forward_dummy(x, proposals)
# keypoints head
if self.with_keypoint:
pass
return outs
def forward_train(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_keypoints=None,
gt_masks=None,
heatmaps=None):
"""
Args:
x (list[Tensor]): list of multi-level img features.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
proposals (list[Tensors]): list of region proposals.
gt_bboxes (list[Tensor]): each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# assign gts and sample proposals
sampling_results = []
bbox_results = {'bbox_feats': []}
if self.with_bbox or self.with_mask or self.with_keypoint:
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
for i in range(num_imgs):
assign_result = self.bbox_assigner.assign(
proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = self.bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
losses = dict()
# bbox head forward and loss
if self.with_bbox:
bbox_results = self._bbox_forward_train(x, sampling_results,
gt_bboxes, gt_labels,
img_metas)
losses.update(bbox_results['loss_bbox'])
# mask head forward and loss
# if self.with_mask:
# mask_results = self._mask_forward_train(x, sampling_results,
# bbox_results['bbox_feats'],
# gt_masks, img_metas)
# # TODO: Support empty tensor input. #2280
# if mask_results['loss_mask'] is not None:
# losses.update(mask_results['loss_mask'])
if self.with_keypoint:
keypoint_results = self._keypoint_forward_train(
x, sampling_results, bbox_results['bbox_feats'], gt_keypoints,
heatmaps, img_metas, gt_bboxes)
if keypoint_results['loss_keypoint'] is not None:
# losses.update(keypoint_results['loss_keypoint'])
losses.update(loss_keypoint=keypoint_results['loss_keypoint'].unsqueeze(0))
return losses
def _keypoint_forward_train(self, x, sampling_results, bbox_feats,
gt_keypoints, heatmaps, img_metas, gt_bboxes):
pos_rois_all = []
if not self.share_roi_extractor:
pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
pos_rois_all.append(pos_rois)
# if pos_rois.shape[0] == 0:
# return dict(loss_keypoint=None)
keypoint_results = self._keypoint_forward_2(x, pos_rois)
else:
pos_inds = []
device = bbox_feats.device
for res in sampling_results:
pos_inds.append(
torch.ones(
res.pos_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds.append(
torch.zeros(
res.neg_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds = torch.cat(pos_inds)
if pos_inds.shape[0] == 0:
return dict(loss_keypoint=None)
keypoint_results = self._keypoint_forward_2(
x, pos_inds=pos_inds, bbox_feats=bbox_feats)
#
num_gt_instances = []
num_props = []
heatmaps = []
valid = []
for im_in_batch, res in enumerate(sampling_results):
num_gt_instances.append(len(gt_keypoints[im_in_batch]))
num_props.append(res.pos_bboxes.shape[0])
keypoints = gt_keypoints[im_in_batch]
heatmaps_per_image, valid_per_image = _keypoints_to_heatmap(
keypoints.reshape(-1,3),
res.pos_bboxes,
# gt_bboxes[im_in_batch][instances_per_image].unsqueeze(0),
56
)
# heatmaps_per_image : a tensor of shape (N, K) containing an integer spatial label
# in the range [0, heatmap_size**2 - 1] for each keypoint in the input
heatmaps.append(heatmaps_per_image.view(-1))
# valid_per_image : a tensor of shape (N, K) containing whether
# each keypoint is in the roi or not.
valid.append(valid_per_image.view(-1))
# DEBUG
# heatmaps_gt_56x56 = torch.zeros(1, 5, 56, 56)
# # create heatmap using gt (might need to inverse / and mod)
# heatmaps_gt_56x56[0, 0, int(heatmaps_per_image[0][0]/56), int(heatmaps_per_image[0][0]%56) ] = 1 # 56*X + Y = heatmaps_per_image[0][0]
# heatmaps_gt_56x56[0, 1, int(heatmaps_per_image[0][1]/56), int(heatmaps_per_image[0][1]%56) ] = 1 # 56*X + Y = heatmaps_per_image[0][0]
# heatmaps_gt_56x56[0, 2, int(heatmaps_per_image[0][2]/56), int(heatmaps_per_image[0][2]%56) ] = 1 # 56*X + Y = heatmaps_per_image[0][0]
# heatmaps_gt_56x56[0, 3, int(heatmaps_per_image[0][3]/56), int(heatmaps_per_image[0][3]%56) ] = 1 # 56*X + Y = heatmaps_per_image[0][0]
# heatmaps_gt_56x56[0, 4, int(heatmaps_per_image[0][4]/56), int(heatmaps_per_image[0][4]%56) ] = 1 # 56*X + Y = heatmaps_per_image[0][0]
# gt_from_heatmaps = heatmaps_to_keypoints(heatmaps_gt_56x56, gt_bboxes[im_in_batch][instances_per_image].cpu().clone().unsqueeze(0))
# print(gt_from_heatmaps[0,:,:2])
# print(gt_keypoints[im_in_batch][instances_per_image])
if len(heatmaps):
keypoint_targets = cat(heatmaps, dim=0)
# heatmaps_gt = cat(heatmaps_gt, dim=1)
valid_all = cat(valid, dim=0).to(dtype=torch.uint8)
valid = torch.nonzero(valid_all).squeeze(1)
# torch.mean (in binary_cross_entropy_with_logits) doesn't
# accept empty tensors, so handle it separately
if len(heatmaps) == 0 or valid.numel() == 0:
global _TOTAL_SKIPPED
_TOTAL_SKIPPED += 1
keypoint_results.update(loss_keypoint=keypoint_results['heatmaps'].sum() * 0, keypoint_targets=gt_keypoints)
return keypoint_results
N, K, H, W = keypoint_results['heatmaps'].shape
pred_keypoint_logits = keypoint_results['heatmaps'].view(N * K, H * W)
valid_preds = []
idx_prop = 0 # starts at 1 because 0modX would increment it anyways
idx_kp = 0 # starts at one for modulo
idx_gt = 0
idx_kp_tot = 0
for _, val in enumerate(valid_all):
if idx_gt < len(num_props) - 1:
if idx_kp == (num_props[idx_gt] * num_gt_instances[idx_gt] * K):
idx_gt += 1
idx_kp = 0
# print(idx_prop)
# idx_prop -= 1 # modulo 0 will add 1
# get
# next proposal
if idx_kp%(K*num_gt_instances[idx_gt]) == 0:
idx_prop += 1
if val > 0:
valid_preds.append((idx_prop-1)*K + idx_kp%K)
idx_kp += 1
idx_kp_tot += 1
if pred_keypoint_logits.shape[0] < ((idx_prop-1)*K + idx_kp_tot%K-1):
print('out of bound from valid ' + str(pred_keypoint_logits.shape[0]) + ' < ' + str((idx_prop-1)*K + idx_kp_tot%K-1))
print('Number of proposals = ' + str(pred_keypoint_logits.shape[0]) + ', idx_prop = ' + str((idx_prop-1)*K))
print('Number of heatmaps = ' + str(len(valid_all)) + ', idx_kp = ' + str(idx_kp_tot))
loss_keypoint = F.cross_entropy(
pred_keypoint_logits[valid_preds], keypoint_targets[valid], reduction="sum"
)
# loss_keypoint = keypoint_results['heatmaps'].sum() * 0
# If a normalizer isn't specified, normalize by the number of visible keypoints in the minibatch
# if normalizer is None:
normalizer = valid.numel()
loss_keypoint /= normalizer
# loss_keypoint = self.keypoint_head.loss(keypoint_results['heatmaps'],
# heatmap, 0)
keypoint_results.update(
loss_keypoint=loss_keypoint, keypoint_targets=gt_keypoints)
return keypoint_results
def _keypoint_forward(self, x, rois=None, pos_inds=None, bbox_feats=None):
keypoint_pred = self.keypoint_head(x)
keypoint_results = dict(heatmaps=keypoint_pred)
return keypoint_results
def _keypoint_forward_2(self, x, rois=None, pos_inds=None, bbox_feats=None):
"""Keypoint head forward function used in both training and testing."""
assert ((rois is not None) ^
(pos_inds is not None and bbox_feats is not None))
if rois is not None:
keypoints_feats = self.keypoint_roi_extractor(
x[:self.keypoint_roi_extractor.num_inputs], rois)
if self.with_shared_head:
keypoints_feats = self.shared_head(keypoints_feats)
else:
assert bbox_feats is not None
keypoints_feats = bbox_feats[pos_inds]
keypoint_pred = self.keypoint_head(keypoints_feats)
keypoint_results = dict(heatmaps=keypoint_pred)
return keypoint_results
def simple_test_keypoints(self,
x,
img_metas,
proposals=None,
rcnn_test_cfg=None,
rescale=False):
"""Test only keypoints without augmentation."""
assert self.keypoint_decoder is not None
scale_factor = img_metas[0]['scale_factor']
proposals[:,1] = proposals[:,1] * scale_factor[0]
proposals[:,2] = proposals[:,2] * scale_factor[1]
proposals[:,3] = proposals[:,3] * scale_factor[0]
proposals[:,4] = proposals[:,4] * scale_factor[1]
keypoint_results = self._keypoint_forward_2(x, rois=proposals)
# Convert heatmaps to keypoints
pred_keypoint_logits = keypoint_results['heatmaps']
pred_from_heatmaps = torch.zeros(pred_keypoint_logits.shape[0], pred_keypoint_logits.shape[1], 4)
for i in range(pred_keypoint_logits.shape[0]):
# create heatmap using gt (might need to inverse / and mod)
prop_boxes = torch.zeros(1,4)
prop_boxes[0] = proposals[i,1:] #* 0.3125
pred_from_heatmaps[i, :] = heatmaps_to_keypoints(pred_keypoint_logits[i].unsqueeze(0), proposals[i,1:].unsqueeze(0))
# Upscale keypoints to the original size
pred_from_heatmaps[i, :, 0] /= scale_factor[0]
pred_from_heatmaps[i, :, 1] /= scale_factor[1]
# print(pred_from_heatmaps[i,:,:2])
# pred = heatmaps_to_keypoints(pred_keypoint_logits, proposals[:,1:])
# pred = self.keypoint_decoder(res)
keypoint_results['keypoints'] = pred_from_heatmaps.cpu().numpy()
# Upscale keypoints to the original size
# pred[:, :, 0] /= scale_factor[0]
# pred[:, :, 1] /= scale_factor[1]
if self.output_heatmaps:
keypoint_results['heatmaps'] = keypoint_results['heatmaps'].cpu(
).numpy()
else:
keypoint_results.pop('heatmaps')
return keypoint_results
async def async_test_keypoints(self,
x,
img_metas,
proposals=None,
rcnn_test_cfg=None,
rescale=False):
"""Test only keypoints without augmentation."""
assert self.keypoint_decoder is not None
keypoint_results = self._keypoint_forward(x)
scale_factor = img_metas[0]['scale_factor']
# Convert heatmaps to keypoints
res = keypoint_results['heatmaps']
pred = self.keypoint_decoder(res)
keypoint_results['keypoints'] = pred.cpu().numpy()
# Upscale keypoints to the original size
pred[:, :, 0] /= scale_factor[0]
pred[:, :, 1] /= scale_factor[1]
if self.output_heatmaps:
keypoint_results['heatmaps'] = keypoint_results['heatmaps'].cpu(
).numpy()
else:
keypoint_results.pop('heatmaps')
return keypoint_results
async def async_simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False):
"""Async test without augmentation."""
if self.with_bbox:
det_bboxes, det_labels = await self.async_test_bboxes(
x, img_metas, proposal_list, self.test_cfg, rescale=rescale)
bbox_results = bbox2result(det_bboxes, det_labels,
self.bbox_head.num_classes)
else:
bbox_results = np.zeros((1, 0, 5))
if not self.with_mask:
segm_results = None
else:
segm_results = await self.async_test_mask(
x,
img_metas,
det_bboxes,
det_labels,
rescale=rescale,
mask_test_cfg=self.test_cfg.get('mask'))
result = {'bbox': bbox_results, 'mask': segm_results}
if self.with_keypoint:
if self.keypoint_decoder is not None:
kpts_results = self.async_test_keypoints(
x, img_metas, rescale=rescale)
result.update(kpts_results)
else:
kpts_results = None
return result
def simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False):
"""Test without augmentation."""
# assert self.with_bbox, 'Bbox head must be implemented.'
if self.with_bbox:
det_bboxes, det_labels = self.simple_test_bboxes(
x, img_metas, proposal_list, self.test_cfg, rescale=rescale)
bbox_results = bbox2result(det_bboxes, det_labels,
self.bbox_head.num_classes)
else:
bbox_results = np.zeros((1, 0, 5))
if self.with_mask:
segm_results = self.simple_test_mask(
x, img_metas, det_bboxes, det_labels, rescale=rescale)
else:
segm_results = None
result = {'bbox': bbox_results, 'mask': segm_results}
if self.with_keypoint:
if self.with_bbox:
kpts_results = self.simple_test_keypoints(
x, img_metas, bbox2roi(det_bboxes), rescale=rescale)
# need to rescale keypoints
# else:
# kpts_results = self.simple_test_keypoints(x, img_metas,
# rescale=rescale)
# if self.keypoint_decoder is not None:
# kpts_results = self.simple_test_keypoints(
# x, img_metas, rescale=rescale)
result.update(kpts_results)
else:
kpts_results = None
return result
def aug_test(self, x, proposal_list, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
# recompute feats to save memory
det_bboxes, det_labels = self.aug_test_bboxes(x, img_metas,
proposal_list,
self.test_cfg)
if rescale:
_det_bboxes = det_bboxes
else:
_det_bboxes = det_bboxes.clone()
_det_bboxes[:, :4] *= det_bboxes.new_tensor(
img_metas[0][0]['scale_factor'])
bbox_results = bbox2result(_det_bboxes, det_labels,
self.bbox_head.num_classes)
# det_bboxes always keep the original scale
if self.with_mask:
segm_results = self.aug_test_mask(x, img_metas, det_bboxes,
det_labels)
return bbox_results, segm_results
else:
return bbox_results
| 41.592892 | 151 | 0.567273 | import numpy as np
import torch
from torch.nn import functional as F
from typing import Any, List, Tuple, Union
from detectron2.layers import cat
from mmdet.core import bbox2result, bbox2roi
from ..builder import HEADS, build_head, build_roi_extractor
from .standard_roi_head import StandardRoIHead
_TOTAL_SKIPPED = 0
def _keypoints_to_heatmap(
keypoints: torch.Tensor, rois: torch.Tensor, heatmap_size: int
) -> Tuple[torch.Tensor, torch.Tensor]:
if rois.numel() == 0:
return rois.new().long(), rois.new().long()
offset_x = rois[:, 0]
offset_y = rois[:, 1]
scale_x = heatmap_size / (rois[:, 2] - rois[:, 0])
scale_y = heatmap_size / (rois[:, 3] - rois[:, 1])
offset_x = offset_x[:, None]
offset_y = offset_y[:, None]
scale_x = scale_x[:, None]
scale_y = scale_y[:, None]
x = keypoints[..., 0]
y = keypoints[..., 1]
x_boundary_inds = x == rois[:, 2][:, None]
y_boundary_inds = y == rois[:, 3][:, None]
x = (x - offset_x) * scale_x
x = x.floor().long()
y = (y - offset_y) * scale_y
y = y.floor().long()
x[x_boundary_inds] = heatmap_size - 1
y[y_boundary_inds] = heatmap_size - 1
valid_loc = (x >= 0) & (y >= 0) & (x < heatmap_size) & (y < heatmap_size)
vis = keypoints[..., 2] > 0
valid = (valid_loc & vis).long()
lin_ind = y * heatmap_size + x
heatmaps = lin_ind * valid
return heatmaps, valid
def heatmaps_to_keypoints(maps: torch.Tensor, rois: torch.Tensor) -> torch.Tensor:
maps = maps.detach()
rois = rois.detach()
offset_x = rois[:, 0]
offset_y = rois[:, 1]
widths = (rois[:, 2] - rois[:, 0]).clamp(min=1)
heights = (rois[:, 3] - rois[:, 1]).clamp(min=1)
widths_ceil = widths.ceil()
heights_ceil = heights.ceil()
num_rois, num_keypoints = maps.shape[:2]
xy_preds = maps.new_zeros(rois.shape[0], num_keypoints, 4)
width_corrections = widths / widths_ceil
height_corrections = heights / heights_ceil
keypoints_idx = torch.arange(num_keypoints, device=maps.device)
for i in range(num_rois):
outsize = (int(heights_ceil[i]), int(widths_ceil[i]))
roi_map = F.interpolate(
maps[[i]], size=outsize, mode="bicubic", align_corners=False
).squeeze(
0
) max_score, _ = roi_map.view(num_keypoints, -1).max(1)
max_score = max_score.view(num_keypoints, 1, 1)
tmp_full_resolution = (roi_map - max_score).exp_()
tmp_pool_resolution = (maps[i] - max_score).exp_()
roi_map_scores = tmp_full_resolution / tmp_pool_resolution.sum((1, 2), keepdim=True)
w = roi_map.shape[2]
pos = roi_map.view(num_keypoints, -1).argmax(1)
x_int = pos % w
y_int = (pos - x_int) // w
assert (
roi_map_scores[keypoints_idx, y_int, x_int]
== roi_map_scores.view(num_keypoints, -1).max(1)[0]
).all()
x = (x_int.float() + 0.5) * width_corrections[i]
y = (y_int.float() + 0.5) * height_corrections[i]
xy_preds[i, :, 0] = x + offset_x[i]
xy_preds[i, :, 1] = y + offset_y[i]
xy_preds[i, :, 2] = roi_map[keypoints_idx, y_int, x_int]
xy_preds[i, :, 3] = roi_map_scores[keypoints_idx, y_int, x_int]
return xy_preds
@HEADS.register_module()
class KeypointRoIHead(StandardRoIHead):
def __init__(self, output_heatmaps=False, keypoint_decoder=None, **kwargs):
super().__init__(**kwargs)
self.output_heatmaps = output_heatmaps
if keypoint_decoder:
self.keypoint_decoder = build_head(keypoint_decoder)
else:
assert output_heatmaps is True
self.keypoint_decoder = None
self.with_keypoint = True
self.share_roi_extractor = False
keypoint_roi_extractor = dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32])
self.keypoint_roi_extractor = build_roi_extractor(keypoint_roi_extractor)
keypoint_head=dict(
type='KeypointRCNNHead',
num_convs=8,
in_channels=256,
features_size=[256, 256, 256, 256],
conv_out_channels=512,
num_keypoints=5,
loss_keypoint=dict(type='MSELoss', loss_weight=5.0))
self.keypoint_head = build_head(keypoint_head)
def init_weights(self, pretrained):
super().init_weights(pretrained)
if self.with_keypoint and self.keypoint_head:
self.keypoint_head.init_weights()
def forward_dummy(self, x, proposals):
outs = super().forward_dummy(x, proposals)
if self.with_keypoint:
pass
return outs
def forward_train(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_keypoints=None,
gt_masks=None,
heatmaps=None):
sampling_results = []
bbox_results = {'bbox_feats': []}
if self.with_bbox or self.with_mask or self.with_keypoint:
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
for i in range(num_imgs):
assign_result = self.bbox_assigner.assign(
proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = self.bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
losses = dict()
if self.with_bbox:
bbox_results = self._bbox_forward_train(x, sampling_results,
gt_bboxes, gt_labels,
img_metas)
losses.update(bbox_results['loss_bbox'])
keypoint_results = self._keypoint_forward_train(
x, sampling_results, bbox_results['bbox_feats'], gt_keypoints,
heatmaps, img_metas, gt_bboxes)
if keypoint_results['loss_keypoint'] is not None:
losses.update(loss_keypoint=keypoint_results['loss_keypoint'].unsqueeze(0))
return losses
def _keypoint_forward_train(self, x, sampling_results, bbox_feats,
gt_keypoints, heatmaps, img_metas, gt_bboxes):
pos_rois_all = []
if not self.share_roi_extractor:
pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
pos_rois_all.append(pos_rois)
keypoint_results = self._keypoint_forward_2(x, pos_rois)
else:
pos_inds = []
device = bbox_feats.device
for res in sampling_results:
pos_inds.append(
torch.ones(
res.pos_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds.append(
torch.zeros(
res.neg_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds = torch.cat(pos_inds)
if pos_inds.shape[0] == 0:
return dict(loss_keypoint=None)
keypoint_results = self._keypoint_forward_2(
x, pos_inds=pos_inds, bbox_feats=bbox_feats)
num_gt_instances = []
num_props = []
heatmaps = []
valid = []
for im_in_batch, res in enumerate(sampling_results):
num_gt_instances.append(len(gt_keypoints[im_in_batch]))
num_props.append(res.pos_bboxes.shape[0])
keypoints = gt_keypoints[im_in_batch]
heatmaps_per_image, valid_per_image = _keypoints_to_heatmap(
keypoints.reshape(-1,3),
res.pos_bboxes,
56
)
heatmaps.append(heatmaps_per_image.view(-1))
valid.append(valid_per_image.view(-1))
dim=0).to(dtype=torch.uint8)
valid = torch.nonzero(valid_all).squeeze(1)
# accept empty tensors, so handle it separately
if len(heatmaps) == 0 or valid.numel() == 0:
global _TOTAL_SKIPPED
_TOTAL_SKIPPED += 1
keypoint_results.update(loss_keypoint=keypoint_results['heatmaps'].sum() * 0, keypoint_targets=gt_keypoints)
return keypoint_results
N, K, H, W = keypoint_results['heatmaps'].shape
pred_keypoint_logits = keypoint_results['heatmaps'].view(N * K, H * W)
valid_preds = []
idx_prop = 0 # starts at 1 because 0modX would increment it anyways
idx_kp = 0 # starts at one for modulo
idx_gt = 0
idx_kp_tot = 0
for _, val in enumerate(valid_all):
if idx_gt < len(num_props) - 1:
if idx_kp == (num_props[idx_gt] * num_gt_instances[idx_gt] * K):
idx_gt += 1
idx_kp = 0
# print(idx_prop)
# idx_prop -= 1 # modulo 0 will add 1
# get
# next proposal
if idx_kp%(K*num_gt_instances[idx_gt]) == 0:
idx_prop += 1
if val > 0:
valid_preds.append((idx_prop-1)*K + idx_kp%K)
idx_kp += 1
idx_kp_tot += 1
if pred_keypoint_logits.shape[0] < ((idx_prop-1)*K + idx_kp_tot%K-1):
print('out of bound from valid ' + str(pred_keypoint_logits.shape[0]) + ' < ' + str((idx_prop-1)*K + idx_kp_tot%K-1))
print('Number of proposals = ' + str(pred_keypoint_logits.shape[0]) + ', idx_prop = ' + str((idx_prop-1)*K))
print('Number of heatmaps = ' + str(len(valid_all)) + ', idx_kp = ' + str(idx_kp_tot))
loss_keypoint = F.cross_entropy(
pred_keypoint_logits[valid_preds], keypoint_targets[valid], reduction="sum"
)
# loss_keypoint = keypoint_results['heatmaps'].sum() * 0
# If a normalizer isn't specified, normalize by the number of visible keypoints in the minibatch
normalizer = valid.numel()
loss_keypoint /= normalizer
keypoint_results.update(
loss_keypoint=loss_keypoint, keypoint_targets=gt_keypoints)
return keypoint_results
def _keypoint_forward(self, x, rois=None, pos_inds=None, bbox_feats=None):
keypoint_pred = self.keypoint_head(x)
keypoint_results = dict(heatmaps=keypoint_pred)
return keypoint_results
def _keypoint_forward_2(self, x, rois=None, pos_inds=None, bbox_feats=None):
assert ((rois is not None) ^
(pos_inds is not None and bbox_feats is not None))
if rois is not None:
keypoints_feats = self.keypoint_roi_extractor(
x[:self.keypoint_roi_extractor.num_inputs], rois)
if self.with_shared_head:
keypoints_feats = self.shared_head(keypoints_feats)
else:
assert bbox_feats is not None
keypoints_feats = bbox_feats[pos_inds]
keypoint_pred = self.keypoint_head(keypoints_feats)
keypoint_results = dict(heatmaps=keypoint_pred)
return keypoint_results
def simple_test_keypoints(self,
x,
img_metas,
proposals=None,
rcnn_test_cfg=None,
rescale=False):
assert self.keypoint_decoder is not None
scale_factor = img_metas[0]['scale_factor']
proposals[:,1] = proposals[:,1] * scale_factor[0]
proposals[:,2] = proposals[:,2] * scale_factor[1]
proposals[:,3] = proposals[:,3] * scale_factor[0]
proposals[:,4] = proposals[:,4] * scale_factor[1]
keypoint_results = self._keypoint_forward_2(x, rois=proposals)
pred_keypoint_logits = keypoint_results['heatmaps']
pred_from_heatmaps = torch.zeros(pred_keypoint_logits.shape[0], pred_keypoint_logits.shape[1], 4)
for i in range(pred_keypoint_logits.shape[0]):
prop_boxes = torch.zeros(1,4)
prop_boxes[0] = proposals[i,1:]
pred_from_heatmaps[i, :] = heatmaps_to_keypoints(pred_keypoint_logits[i].unsqueeze(0), proposals[i,1:].unsqueeze(0))
pred_from_heatmaps[i, :, 0] /= scale_factor[0]
pred_from_heatmaps[i, :, 1] /= scale_factor[1]
keypoint_results['keypoints'] = pred_from_heatmaps.cpu().numpy()
if self.output_heatmaps:
keypoint_results['heatmaps'] = keypoint_results['heatmaps'].cpu(
).numpy()
else:
keypoint_results.pop('heatmaps')
return keypoint_results
async def async_test_keypoints(self,
x,
img_metas,
proposals=None,
rcnn_test_cfg=None,
rescale=False):
assert self.keypoint_decoder is not None
keypoint_results = self._keypoint_forward(x)
scale_factor = img_metas[0]['scale_factor']
res = keypoint_results['heatmaps']
pred = self.keypoint_decoder(res)
keypoint_results['keypoints'] = pred.cpu().numpy()
pred[:, :, 0] /= scale_factor[0]
pred[:, :, 1] /= scale_factor[1]
if self.output_heatmaps:
keypoint_results['heatmaps'] = keypoint_results['heatmaps'].cpu(
).numpy()
else:
keypoint_results.pop('heatmaps')
return keypoint_results
async def async_simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False):
if self.with_bbox:
det_bboxes, det_labels = await self.async_test_bboxes(
x, img_metas, proposal_list, self.test_cfg, rescale=rescale)
bbox_results = bbox2result(det_bboxes, det_labels,
self.bbox_head.num_classes)
else:
bbox_results = np.zeros((1, 0, 5))
if not self.with_mask:
segm_results = None
else:
segm_results = await self.async_test_mask(
x,
img_metas,
det_bboxes,
det_labels,
rescale=rescale,
mask_test_cfg=self.test_cfg.get('mask'))
result = {'bbox': bbox_results, 'mask': segm_results}
if self.with_keypoint:
if self.keypoint_decoder is not None:
kpts_results = self.async_test_keypoints(
x, img_metas, rescale=rescale)
result.update(kpts_results)
else:
kpts_results = None
return result
def simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False):
if self.with_bbox:
det_bboxes, det_labels = self.simple_test_bboxes(
x, img_metas, proposal_list, self.test_cfg, rescale=rescale)
bbox_results = bbox2result(det_bboxes, det_labels,
self.bbox_head.num_classes)
else:
bbox_results = np.zeros((1, 0, 5))
if self.with_mask:
segm_results = self.simple_test_mask(
x, img_metas, det_bboxes, det_labels, rescale=rescale)
else:
segm_results = None
result = {'bbox': bbox_results, 'mask': segm_results}
if self.with_keypoint:
if self.with_bbox:
kpts_results = self.simple_test_keypoints(
x, img_metas, bbox2roi(det_bboxes), rescale=rescale)
result.update(kpts_results)
else:
kpts_results = None
return result
def aug_test(self, x, proposal_list, img_metas, rescale=False):
det_bboxes, det_labels = self.aug_test_bboxes(x, img_metas,
proposal_list,
self.test_cfg)
if rescale:
_det_bboxes = det_bboxes
else:
_det_bboxes = det_bboxes.clone()
_det_bboxes[:, :4] *= det_bboxes.new_tensor(
img_metas[0][0]['scale_factor'])
bbox_results = bbox2result(_det_bboxes, det_labels,
self.bbox_head.num_classes)
if self.with_mask:
segm_results = self.aug_test_mask(x, img_metas, det_bboxes,
det_labels)
return bbox_results, segm_results
else:
return bbox_results
| true | true |
1c2b5b7db40236f6841cb409ec51c9284c7bc93a | 38,122 | py | Python | models/rexnetv1.py | www516717402/TinyNeuralNetwork | 23e7931b4377462fad94a9ab0651b6d9a346252d | [
"MIT"
] | 241 | 2021-11-02T06:59:37.000Z | 2022-03-31T03:20:42.000Z | models/rexnetv1.py | kingkie/TinyNeuralNetwork | 9b4313bbe6fb46d602681b69799e4725eef4d71b | [
"MIT"
] | 48 | 2021-11-03T11:55:06.000Z | 2022-03-29T10:46:07.000Z | models/rexnetv1.py | kingkie/TinyNeuralNetwork | 9b4313bbe6fb46d602681b69799e4725eef4d71b | [
"MIT"
] | 41 | 2021-11-02T07:50:43.000Z | 2022-03-29T03:47:45.000Z |
import torch
import torch.nn
import torch.functional
import torch.nn.functional
class rexnetv1(torch.nn.Module):
def __init__(self):
super().__init__()
self.features_0 = torch.nn.modules.conv.Conv2d(3, 32, (3, 3), stride=(2, 2), padding=(1, 1), dilation=(1, 1), bias=False)
self.features_1 = torch.nn.modules.batchnorm.BatchNorm2d(32)
self.features_3_out_0 = torch.nn.modules.conv.Conv2d(32, 32, (3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), groups=32, bias=False)
self.features_3_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(32)
self.features_3_out_2 = torch.nn.modules.activation.ReLU6()
self.features_3_out_3 = torch.nn.modules.conv.Conv2d(32, 16, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_3_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(16)
self.features_4_out_0 = torch.nn.modules.conv.Conv2d(16, 96, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_4_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(96)
self.features_4_out_3 = torch.nn.modules.conv.Conv2d(96, 96, (3, 3), stride=(2, 2), padding=(1, 1), dilation=(1, 1), groups=96, bias=False)
self.features_4_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(96)
self.features_4_out_5 = torch.nn.modules.activation.ReLU6()
self.features_4_out_6 = torch.nn.modules.conv.Conv2d(96, 27, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_4_out_7 = torch.nn.modules.batchnorm.BatchNorm2d(27)
self.features_5_out_0 = torch.nn.modules.conv.Conv2d(27, 162, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_5_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(162)
self.features_5_out_3 = torch.nn.modules.conv.Conv2d(162, 162, (3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), groups=162, bias=False)
self.features_5_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(162)
self.features_5_out_5 = torch.nn.modules.activation.ReLU6()
self.features_5_out_6 = torch.nn.modules.conv.Conv2d(162, 38, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_5_out_7 = torch.nn.modules.batchnorm.BatchNorm2d(38)
self.features_6_out_0 = torch.nn.modules.conv.Conv2d(38, 228, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_6_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(228)
self.features_6_out_3 = torch.nn.modules.conv.Conv2d(228, 228, (3, 3), stride=(2, 2), padding=(1, 1), dilation=(1, 1), groups=228, bias=False)
self.features_6_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(228)
self.features_6_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_6_out_5_fc_0 = torch.nn.modules.conv.Conv2d(228, 19, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_6_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(19)
self.features_6_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_6_out_5_fc_3 = torch.nn.modules.conv.Conv2d(19, 228, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_6_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_6_out_6 = torch.nn.modules.activation.ReLU6()
self.features_6_out_7 = torch.nn.modules.conv.Conv2d(228, 50, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_6_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(50)
self.features_7_out_0 = torch.nn.modules.conv.Conv2d(50, 300, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_7_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(300)
self.features_7_out_3 = torch.nn.modules.conv.Conv2d(300, 300, (3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), groups=300, bias=False)
self.features_7_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(300)
self.features_7_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_7_out_5_fc_0 = torch.nn.modules.conv.Conv2d(300, 25, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_7_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(25)
self.features_7_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_7_out_5_fc_3 = torch.nn.modules.conv.Conv2d(25, 300, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_7_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_7_out_6 = torch.nn.modules.activation.ReLU6()
self.features_7_out_7 = torch.nn.modules.conv.Conv2d(300, 61, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_7_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(61)
self.features_8_out_0 = torch.nn.modules.conv.Conv2d(61, 366, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_8_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(366)
self.features_8_out_3 = torch.nn.modules.conv.Conv2d(366, 366, (3, 3), stride=(2, 2), padding=(1, 1), dilation=(1, 1), groups=366, bias=False)
self.features_8_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(366)
self.features_8_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_8_out_5_fc_0 = torch.nn.modules.conv.Conv2d(366, 30, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_8_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(30)
self.features_8_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_8_out_5_fc_3 = torch.nn.modules.conv.Conv2d(30, 366, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_8_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_8_out_6 = torch.nn.modules.activation.ReLU6()
self.features_8_out_7 = torch.nn.modules.conv.Conv2d(366, 72, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_8_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(72)
self.features_9_out_0 = torch.nn.modules.conv.Conv2d(72, 432, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_9_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(432)
self.features_9_out_3 = torch.nn.modules.conv.Conv2d(432, 432, (3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), groups=432, bias=False)
self.features_9_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(432)
self.features_9_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_9_out_5_fc_0 = torch.nn.modules.conv.Conv2d(432, 36, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_9_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(36)
self.features_9_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_9_out_5_fc_3 = torch.nn.modules.conv.Conv2d(36, 432, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_9_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_9_out_6 = torch.nn.modules.activation.ReLU6()
self.features_9_out_7 = torch.nn.modules.conv.Conv2d(432, 84, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_9_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(84)
self.features_10_out_0 = torch.nn.modules.conv.Conv2d(84, 504, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_10_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(504)
self.features_10_out_3 = torch.nn.modules.conv.Conv2d(504, 504, (3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), groups=504, bias=False)
self.features_10_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(504)
self.features_10_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_10_out_5_fc_0 = torch.nn.modules.conv.Conv2d(504, 42, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_10_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(42)
self.features_10_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_10_out_5_fc_3 = torch.nn.modules.conv.Conv2d(42, 504, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_10_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_10_out_6 = torch.nn.modules.activation.ReLU6()
self.features_10_out_7 = torch.nn.modules.conv.Conv2d(504, 95, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_10_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(95)
self.features_11_out_0 = torch.nn.modules.conv.Conv2d(95, 570, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_11_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(570)
self.features_11_out_3 = torch.nn.modules.conv.Conv2d(570, 570, (3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), groups=570, bias=False)
self.features_11_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(570)
self.features_11_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_11_out_5_fc_0 = torch.nn.modules.conv.Conv2d(570, 47, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_11_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(47)
self.features_11_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_11_out_5_fc_3 = torch.nn.modules.conv.Conv2d(47, 570, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_11_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_11_out_6 = torch.nn.modules.activation.ReLU6()
self.features_11_out_7 = torch.nn.modules.conv.Conv2d(570, 106, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_11_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(106)
self.features_12_out_0 = torch.nn.modules.conv.Conv2d(106, 636, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_12_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(636)
self.features_12_out_3 = torch.nn.modules.conv.Conv2d(636, 636, (3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), groups=636, bias=False)
self.features_12_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(636)
self.features_12_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_12_out_5_fc_0 = torch.nn.modules.conv.Conv2d(636, 53, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_12_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(53)
self.features_12_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_12_out_5_fc_3 = torch.nn.modules.conv.Conv2d(53, 636, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_12_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_12_out_6 = torch.nn.modules.activation.ReLU6()
self.features_12_out_7 = torch.nn.modules.conv.Conv2d(636, 117, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_12_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(117)
self.features_13_out_0 = torch.nn.modules.conv.Conv2d(117, 702, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_13_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(702)
self.features_13_out_3 = torch.nn.modules.conv.Conv2d(702, 702, (3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), groups=702, bias=False)
self.features_13_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(702)
self.features_13_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_13_out_5_fc_0 = torch.nn.modules.conv.Conv2d(702, 58, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_13_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(58)
self.features_13_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_13_out_5_fc_3 = torch.nn.modules.conv.Conv2d(58, 702, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_13_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_13_out_6 = torch.nn.modules.activation.ReLU6()
self.features_13_out_7 = torch.nn.modules.conv.Conv2d(702, 128, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_13_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(128)
self.features_14_out_0 = torch.nn.modules.conv.Conv2d(128, 768, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_14_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(768)
self.features_14_out_3 = torch.nn.modules.conv.Conv2d(768, 768, (3, 3), stride=(2, 2), padding=(1, 1), dilation=(1, 1), groups=768, bias=False)
self.features_14_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(768)
self.features_14_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_14_out_5_fc_0 = torch.nn.modules.conv.Conv2d(768, 64, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_14_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(64)
self.features_14_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_14_out_5_fc_3 = torch.nn.modules.conv.Conv2d(64, 768, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_14_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_14_out_6 = torch.nn.modules.activation.ReLU6()
self.features_14_out_7 = torch.nn.modules.conv.Conv2d(768, 140, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_14_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(140)
self.features_15_out_0 = torch.nn.modules.conv.Conv2d(140, 840, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_15_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(840)
self.features_15_out_3 = torch.nn.modules.conv.Conv2d(840, 840, (3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), groups=840, bias=False)
self.features_15_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(840)
self.features_15_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_15_out_5_fc_0 = torch.nn.modules.conv.Conv2d(840, 70, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_15_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(70)
self.features_15_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_15_out_5_fc_3 = torch.nn.modules.conv.Conv2d(70, 840, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_15_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_15_out_6 = torch.nn.modules.activation.ReLU6()
self.features_15_out_7 = torch.nn.modules.conv.Conv2d(840, 151, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_15_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(151)
self.features_16_out_0 = torch.nn.modules.conv.Conv2d(151, 906, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_16_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(906)
self.features_16_out_3 = torch.nn.modules.conv.Conv2d(906, 906, (3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), groups=906, bias=False)
self.features_16_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(906)
self.features_16_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_16_out_5_fc_0 = torch.nn.modules.conv.Conv2d(906, 75, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_16_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(75)
self.features_16_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_16_out_5_fc_3 = torch.nn.modules.conv.Conv2d(75, 906, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_16_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_16_out_6 = torch.nn.modules.activation.ReLU6()
self.features_16_out_7 = torch.nn.modules.conv.Conv2d(906, 162, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_16_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(162)
self.features_17_out_0 = torch.nn.modules.conv.Conv2d(162, 972, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_17_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(972)
self.features_17_out_3 = torch.nn.modules.conv.Conv2d(972, 972, (3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), groups=972, bias=False)
self.features_17_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(972)
self.features_17_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_17_out_5_fc_0 = torch.nn.modules.conv.Conv2d(972, 81, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_17_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(81)
self.features_17_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_17_out_5_fc_3 = torch.nn.modules.conv.Conv2d(81, 972, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_17_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_17_out_6 = torch.nn.modules.activation.ReLU6()
self.features_17_out_7 = torch.nn.modules.conv.Conv2d(972, 174, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_17_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(174)
self.features_18_out_0 = torch.nn.modules.conv.Conv2d(174, 1044, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_18_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(1044)
self.features_18_out_3 = torch.nn.modules.conv.Conv2d(1044, 1044, (3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), groups=1044, bias=False)
self.features_18_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(1044)
self.features_18_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_18_out_5_fc_0 = torch.nn.modules.conv.Conv2d(1044, 87, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_18_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(87)
self.features_18_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_18_out_5_fc_3 = torch.nn.modules.conv.Conv2d(87, 1044, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_18_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_18_out_6 = torch.nn.modules.activation.ReLU6()
self.features_18_out_7 = torch.nn.modules.conv.Conv2d(1044, 185, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_18_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(185)
self.features_19 = torch.nn.modules.conv.Conv2d(185, 1280, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_20 = torch.nn.modules.batchnorm.BatchNorm2d(1280)
self.features_22 = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.output_0 = torch.nn.modules.dropout.Dropout(p=0.2)
self.output_1 = torch.nn.modules.conv.Conv2d(1280, 1000, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
def forward(self, input_1):
features_0 = self.features_0(input_1)
features_1 = self.features_1(features_0)
sigmoid_1 = features_1.sigmoid()
mul_1 = features_1.mul_(sigmoid_1)
features_3_out_0 = self.features_3_out_0(mul_1)
features_3_out_1 = self.features_3_out_1(features_3_out_0)
features_3_out_2 = self.features_3_out_2(features_3_out_1)
features_3_out_3 = self.features_3_out_3(features_3_out_2)
features_3_out_4 = self.features_3_out_4(features_3_out_3)
features_4_out_0 = self.features_4_out_0(features_3_out_4)
features_4_out_1 = self.features_4_out_1(features_4_out_0)
sigmoid_2 = features_4_out_1.sigmoid()
mul_2 = features_4_out_1.mul_(sigmoid_2)
features_4_out_3 = self.features_4_out_3(mul_2)
features_4_out_4 = self.features_4_out_4(features_4_out_3)
features_4_out_5 = self.features_4_out_5(features_4_out_4)
features_4_out_6 = self.features_4_out_6(features_4_out_5)
features_4_out_7 = self.features_4_out_7(features_4_out_6)
features_5_out_0 = self.features_5_out_0(features_4_out_7)
features_5_out_1 = self.features_5_out_1(features_5_out_0)
sigmoid_3 = features_5_out_1.sigmoid()
mul_3 = features_5_out_1.mul_(sigmoid_3)
features_5_out_3 = self.features_5_out_3(mul_3)
features_5_out_4 = self.features_5_out_4(features_5_out_3)
features_5_out_5 = self.features_5_out_5(features_5_out_4)
features_5_out_6 = self.features_5_out_6(features_5_out_5)
features_5_out_7 = self.features_5_out_7(features_5_out_6)
getitem_1 = features_5_out_7[:, 0:27]
add_1 = getitem_1.__iadd__(features_4_out_7)
features_6_out_0 = self.features_6_out_0(features_5_out_7)
features_6_out_1 = self.features_6_out_1(features_6_out_0)
sigmoid_4 = features_6_out_1.sigmoid()
mul_4 = features_6_out_1.mul_(sigmoid_4)
features_6_out_3 = self.features_6_out_3(mul_4)
features_6_out_4 = self.features_6_out_4(features_6_out_3)
features_6_out_5_avg_pool = self.features_6_out_5_avg_pool(features_6_out_4)
features_6_out_5_fc_0 = self.features_6_out_5_fc_0(features_6_out_5_avg_pool)
features_6_out_5_fc_1 = self.features_6_out_5_fc_1(features_6_out_5_fc_0)
features_6_out_5_fc_2 = self.features_6_out_5_fc_2(features_6_out_5_fc_1)
features_6_out_5_fc_3 = self.features_6_out_5_fc_3(features_6_out_5_fc_2)
features_6_out_5_fc_4 = self.features_6_out_5_fc_4(features_6_out_5_fc_3)
mul_5 = features_6_out_4.__mul__(features_6_out_5_fc_4)
features_6_out_6 = self.features_6_out_6(mul_5)
features_6_out_7 = self.features_6_out_7(features_6_out_6)
features_6_out_8 = self.features_6_out_8(features_6_out_7)
features_7_out_0 = self.features_7_out_0(features_6_out_8)
features_7_out_1 = self.features_7_out_1(features_7_out_0)
sigmoid_5 = features_7_out_1.sigmoid()
mul_6 = features_7_out_1.mul_(sigmoid_5)
features_7_out_3 = self.features_7_out_3(mul_6)
features_7_out_4 = self.features_7_out_4(features_7_out_3)
features_7_out_5_avg_pool = self.features_7_out_5_avg_pool(features_7_out_4)
features_7_out_5_fc_0 = self.features_7_out_5_fc_0(features_7_out_5_avg_pool)
features_7_out_5_fc_1 = self.features_7_out_5_fc_1(features_7_out_5_fc_0)
features_7_out_5_fc_2 = self.features_7_out_5_fc_2(features_7_out_5_fc_1)
features_7_out_5_fc_3 = self.features_7_out_5_fc_3(features_7_out_5_fc_2)
features_7_out_5_fc_4 = self.features_7_out_5_fc_4(features_7_out_5_fc_3)
mul_7 = features_7_out_4.__mul__(features_7_out_5_fc_4)
features_7_out_6 = self.features_7_out_6(mul_7)
features_7_out_7 = self.features_7_out_7(features_7_out_6)
features_7_out_8 = self.features_7_out_8(features_7_out_7)
getitem_2 = features_7_out_8[:, 0:50]
add_2 = getitem_2.__iadd__(features_6_out_8)
features_8_out_0 = self.features_8_out_0(features_7_out_8)
features_8_out_1 = self.features_8_out_1(features_8_out_0)
sigmoid_6 = features_8_out_1.sigmoid()
mul_8 = features_8_out_1.mul_(sigmoid_6)
features_8_out_3 = self.features_8_out_3(mul_8)
features_8_out_4 = self.features_8_out_4(features_8_out_3)
features_8_out_5_avg_pool = self.features_8_out_5_avg_pool(features_8_out_4)
features_8_out_5_fc_0 = self.features_8_out_5_fc_0(features_8_out_5_avg_pool)
features_8_out_5_fc_1 = self.features_8_out_5_fc_1(features_8_out_5_fc_0)
features_8_out_5_fc_2 = self.features_8_out_5_fc_2(features_8_out_5_fc_1)
features_8_out_5_fc_3 = self.features_8_out_5_fc_3(features_8_out_5_fc_2)
features_8_out_5_fc_4 = self.features_8_out_5_fc_4(features_8_out_5_fc_3)
mul_9 = features_8_out_4.__mul__(features_8_out_5_fc_4)
features_8_out_6 = self.features_8_out_6(mul_9)
features_8_out_7 = self.features_8_out_7(features_8_out_6)
features_8_out_8 = self.features_8_out_8(features_8_out_7)
features_9_out_0 = self.features_9_out_0(features_8_out_8)
features_9_out_1 = self.features_9_out_1(features_9_out_0)
sigmoid_7 = features_9_out_1.sigmoid()
mul_10 = features_9_out_1.mul_(sigmoid_7)
features_9_out_3 = self.features_9_out_3(mul_10)
features_9_out_4 = self.features_9_out_4(features_9_out_3)
features_9_out_5_avg_pool = self.features_9_out_5_avg_pool(features_9_out_4)
features_9_out_5_fc_0 = self.features_9_out_5_fc_0(features_9_out_5_avg_pool)
features_9_out_5_fc_1 = self.features_9_out_5_fc_1(features_9_out_5_fc_0)
features_9_out_5_fc_2 = self.features_9_out_5_fc_2(features_9_out_5_fc_1)
features_9_out_5_fc_3 = self.features_9_out_5_fc_3(features_9_out_5_fc_2)
features_9_out_5_fc_4 = self.features_9_out_5_fc_4(features_9_out_5_fc_3)
mul_11 = features_9_out_4.__mul__(features_9_out_5_fc_4)
features_9_out_6 = self.features_9_out_6(mul_11)
features_9_out_7 = self.features_9_out_7(features_9_out_6)
features_9_out_8 = self.features_9_out_8(features_9_out_7)
getitem_3 = features_9_out_8[:, 0:72]
add_3 = getitem_3.__iadd__(features_8_out_8)
features_10_out_0 = self.features_10_out_0(features_9_out_8)
features_10_out_1 = self.features_10_out_1(features_10_out_0)
sigmoid_8 = features_10_out_1.sigmoid()
mul_12 = features_10_out_1.mul_(sigmoid_8)
features_10_out_3 = self.features_10_out_3(mul_12)
features_10_out_4 = self.features_10_out_4(features_10_out_3)
features_10_out_5_avg_pool = self.features_10_out_5_avg_pool(features_10_out_4)
features_10_out_5_fc_0 = self.features_10_out_5_fc_0(features_10_out_5_avg_pool)
features_10_out_5_fc_1 = self.features_10_out_5_fc_1(features_10_out_5_fc_0)
features_10_out_5_fc_2 = self.features_10_out_5_fc_2(features_10_out_5_fc_1)
features_10_out_5_fc_3 = self.features_10_out_5_fc_3(features_10_out_5_fc_2)
features_10_out_5_fc_4 = self.features_10_out_5_fc_4(features_10_out_5_fc_3)
mul_13 = features_10_out_4.__mul__(features_10_out_5_fc_4)
features_10_out_6 = self.features_10_out_6(mul_13)
features_10_out_7 = self.features_10_out_7(features_10_out_6)
features_10_out_8 = self.features_10_out_8(features_10_out_7)
getitem_4 = features_10_out_8[:, 0:84]
add_4 = getitem_4.__iadd__(features_9_out_8)
features_11_out_0 = self.features_11_out_0(features_10_out_8)
features_11_out_1 = self.features_11_out_1(features_11_out_0)
sigmoid_9 = features_11_out_1.sigmoid()
mul_14 = features_11_out_1.mul_(sigmoid_9)
features_11_out_3 = self.features_11_out_3(mul_14)
features_11_out_4 = self.features_11_out_4(features_11_out_3)
features_11_out_5_avg_pool = self.features_11_out_5_avg_pool(features_11_out_4)
features_11_out_5_fc_0 = self.features_11_out_5_fc_0(features_11_out_5_avg_pool)
features_11_out_5_fc_1 = self.features_11_out_5_fc_1(features_11_out_5_fc_0)
features_11_out_5_fc_2 = self.features_11_out_5_fc_2(features_11_out_5_fc_1)
features_11_out_5_fc_3 = self.features_11_out_5_fc_3(features_11_out_5_fc_2)
features_11_out_5_fc_4 = self.features_11_out_5_fc_4(features_11_out_5_fc_3)
mul_15 = features_11_out_4.__mul__(features_11_out_5_fc_4)
features_11_out_6 = self.features_11_out_6(mul_15)
features_11_out_7 = self.features_11_out_7(features_11_out_6)
features_11_out_8 = self.features_11_out_8(features_11_out_7)
getitem_5 = features_11_out_8[:, 0:95]
add_5 = getitem_5.__iadd__(features_10_out_8)
features_12_out_0 = self.features_12_out_0(features_11_out_8)
features_12_out_1 = self.features_12_out_1(features_12_out_0)
sigmoid_10 = features_12_out_1.sigmoid()
mul_16 = features_12_out_1.mul_(sigmoid_10)
features_12_out_3 = self.features_12_out_3(mul_16)
features_12_out_4 = self.features_12_out_4(features_12_out_3)
features_12_out_5_avg_pool = self.features_12_out_5_avg_pool(features_12_out_4)
features_12_out_5_fc_0 = self.features_12_out_5_fc_0(features_12_out_5_avg_pool)
features_12_out_5_fc_1 = self.features_12_out_5_fc_1(features_12_out_5_fc_0)
features_12_out_5_fc_2 = self.features_12_out_5_fc_2(features_12_out_5_fc_1)
features_12_out_5_fc_3 = self.features_12_out_5_fc_3(features_12_out_5_fc_2)
features_12_out_5_fc_4 = self.features_12_out_5_fc_4(features_12_out_5_fc_3)
mul_17 = features_12_out_4.__mul__(features_12_out_5_fc_4)
features_12_out_6 = self.features_12_out_6(mul_17)
features_12_out_7 = self.features_12_out_7(features_12_out_6)
features_12_out_8 = self.features_12_out_8(features_12_out_7)
getitem_6 = features_12_out_8[:, 0:106]
add_6 = getitem_6.__iadd__(features_11_out_8)
features_13_out_0 = self.features_13_out_0(features_12_out_8)
features_13_out_1 = self.features_13_out_1(features_13_out_0)
sigmoid_11 = features_13_out_1.sigmoid()
mul_18 = features_13_out_1.mul_(sigmoid_11)
features_13_out_3 = self.features_13_out_3(mul_18)
features_13_out_4 = self.features_13_out_4(features_13_out_3)
features_13_out_5_avg_pool = self.features_13_out_5_avg_pool(features_13_out_4)
features_13_out_5_fc_0 = self.features_13_out_5_fc_0(features_13_out_5_avg_pool)
features_13_out_5_fc_1 = self.features_13_out_5_fc_1(features_13_out_5_fc_0)
features_13_out_5_fc_2 = self.features_13_out_5_fc_2(features_13_out_5_fc_1)
features_13_out_5_fc_3 = self.features_13_out_5_fc_3(features_13_out_5_fc_2)
features_13_out_5_fc_4 = self.features_13_out_5_fc_4(features_13_out_5_fc_3)
mul_19 = features_13_out_4.__mul__(features_13_out_5_fc_4)
features_13_out_6 = self.features_13_out_6(mul_19)
features_13_out_7 = self.features_13_out_7(features_13_out_6)
features_13_out_8 = self.features_13_out_8(features_13_out_7)
getitem_7 = features_13_out_8[:, 0:117]
add_7 = getitem_7.__iadd__(features_12_out_8)
features_14_out_0 = self.features_14_out_0(features_13_out_8)
features_14_out_1 = self.features_14_out_1(features_14_out_0)
sigmoid_12 = features_14_out_1.sigmoid()
mul_20 = features_14_out_1.mul_(sigmoid_12)
features_14_out_3 = self.features_14_out_3(mul_20)
features_14_out_4 = self.features_14_out_4(features_14_out_3)
features_14_out_5_avg_pool = self.features_14_out_5_avg_pool(features_14_out_4)
features_14_out_5_fc_0 = self.features_14_out_5_fc_0(features_14_out_5_avg_pool)
features_14_out_5_fc_1 = self.features_14_out_5_fc_1(features_14_out_5_fc_0)
features_14_out_5_fc_2 = self.features_14_out_5_fc_2(features_14_out_5_fc_1)
features_14_out_5_fc_3 = self.features_14_out_5_fc_3(features_14_out_5_fc_2)
features_14_out_5_fc_4 = self.features_14_out_5_fc_4(features_14_out_5_fc_3)
mul_21 = features_14_out_4.__mul__(features_14_out_5_fc_4)
features_14_out_6 = self.features_14_out_6(mul_21)
features_14_out_7 = self.features_14_out_7(features_14_out_6)
features_14_out_8 = self.features_14_out_8(features_14_out_7)
features_15_out_0 = self.features_15_out_0(features_14_out_8)
features_15_out_1 = self.features_15_out_1(features_15_out_0)
sigmoid_13 = features_15_out_1.sigmoid()
mul_22 = features_15_out_1.mul_(sigmoid_13)
features_15_out_3 = self.features_15_out_3(mul_22)
features_15_out_4 = self.features_15_out_4(features_15_out_3)
features_15_out_5_avg_pool = self.features_15_out_5_avg_pool(features_15_out_4)
features_15_out_5_fc_0 = self.features_15_out_5_fc_0(features_15_out_5_avg_pool)
features_15_out_5_fc_1 = self.features_15_out_5_fc_1(features_15_out_5_fc_0)
features_15_out_5_fc_2 = self.features_15_out_5_fc_2(features_15_out_5_fc_1)
features_15_out_5_fc_3 = self.features_15_out_5_fc_3(features_15_out_5_fc_2)
features_15_out_5_fc_4 = self.features_15_out_5_fc_4(features_15_out_5_fc_3)
mul_23 = features_15_out_4.__mul__(features_15_out_5_fc_4)
features_15_out_6 = self.features_15_out_6(mul_23)
features_15_out_7 = self.features_15_out_7(features_15_out_6)
features_15_out_8 = self.features_15_out_8(features_15_out_7)
getitem_8 = features_15_out_8[:, 0:140]
add_8 = getitem_8.__iadd__(features_14_out_8)
features_16_out_0 = self.features_16_out_0(features_15_out_8)
features_16_out_1 = self.features_16_out_1(features_16_out_0)
sigmoid_14 = features_16_out_1.sigmoid()
mul_24 = features_16_out_1.mul_(sigmoid_14)
features_16_out_3 = self.features_16_out_3(mul_24)
features_16_out_4 = self.features_16_out_4(features_16_out_3)
features_16_out_5_avg_pool = self.features_16_out_5_avg_pool(features_16_out_4)
features_16_out_5_fc_0 = self.features_16_out_5_fc_0(features_16_out_5_avg_pool)
features_16_out_5_fc_1 = self.features_16_out_5_fc_1(features_16_out_5_fc_0)
features_16_out_5_fc_2 = self.features_16_out_5_fc_2(features_16_out_5_fc_1)
features_16_out_5_fc_3 = self.features_16_out_5_fc_3(features_16_out_5_fc_2)
features_16_out_5_fc_4 = self.features_16_out_5_fc_4(features_16_out_5_fc_3)
mul_25 = features_16_out_4.__mul__(features_16_out_5_fc_4)
features_16_out_6 = self.features_16_out_6(mul_25)
features_16_out_7 = self.features_16_out_7(features_16_out_6)
features_16_out_8 = self.features_16_out_8(features_16_out_7)
getitem_9 = features_16_out_8[:, 0:151]
add_9 = getitem_9.__iadd__(features_15_out_8)
features_17_out_0 = self.features_17_out_0(features_16_out_8)
features_17_out_1 = self.features_17_out_1(features_17_out_0)
sigmoid_15 = features_17_out_1.sigmoid()
mul_26 = features_17_out_1.mul_(sigmoid_15)
features_17_out_3 = self.features_17_out_3(mul_26)
features_17_out_4 = self.features_17_out_4(features_17_out_3)
features_17_out_5_avg_pool = self.features_17_out_5_avg_pool(features_17_out_4)
features_17_out_5_fc_0 = self.features_17_out_5_fc_0(features_17_out_5_avg_pool)
features_17_out_5_fc_1 = self.features_17_out_5_fc_1(features_17_out_5_fc_0)
features_17_out_5_fc_2 = self.features_17_out_5_fc_2(features_17_out_5_fc_1)
features_17_out_5_fc_3 = self.features_17_out_5_fc_3(features_17_out_5_fc_2)
features_17_out_5_fc_4 = self.features_17_out_5_fc_4(features_17_out_5_fc_3)
mul_27 = features_17_out_4.__mul__(features_17_out_5_fc_4)
features_17_out_6 = self.features_17_out_6(mul_27)
features_17_out_7 = self.features_17_out_7(features_17_out_6)
features_17_out_8 = self.features_17_out_8(features_17_out_7)
getitem_10 = features_17_out_8[:, 0:162]
add_10 = getitem_10.__iadd__(features_16_out_8)
features_18_out_0 = self.features_18_out_0(features_17_out_8)
features_18_out_1 = self.features_18_out_1(features_18_out_0)
sigmoid_16 = features_18_out_1.sigmoid()
mul_28 = features_18_out_1.mul_(sigmoid_16)
features_18_out_3 = self.features_18_out_3(mul_28)
features_18_out_4 = self.features_18_out_4(features_18_out_3)
features_18_out_5_avg_pool = self.features_18_out_5_avg_pool(features_18_out_4)
features_18_out_5_fc_0 = self.features_18_out_5_fc_0(features_18_out_5_avg_pool)
features_18_out_5_fc_1 = self.features_18_out_5_fc_1(features_18_out_5_fc_0)
features_18_out_5_fc_2 = self.features_18_out_5_fc_2(features_18_out_5_fc_1)
features_18_out_5_fc_3 = self.features_18_out_5_fc_3(features_18_out_5_fc_2)
features_18_out_5_fc_4 = self.features_18_out_5_fc_4(features_18_out_5_fc_3)
mul_29 = features_18_out_4.__mul__(features_18_out_5_fc_4)
features_18_out_6 = self.features_18_out_6(mul_29)
features_18_out_7 = self.features_18_out_7(features_18_out_6)
features_18_out_8 = self.features_18_out_8(features_18_out_7)
getitem_11 = features_18_out_8[:, 0:174]
add_11 = getitem_11.__iadd__(features_17_out_8)
features_19 = self.features_19(features_18_out_8)
features_20 = self.features_20(features_19)
sigmoid_17 = features_20.sigmoid()
mul_30 = features_20.mul_(sigmoid_17)
features_22 = self.features_22(mul_30)
output_0 = self.output_0(features_22)
output_1 = self.output_1(output_0)
flatten_1 = output_1.flatten(1)
return flatten_1
if __name__ == "__main__":
model = rexnetv1()
model.eval()
model.cpu()
dummy_input_0 = torch.ones((2, 3, 224, 224), dtype=torch.float32)
output = model(dummy_input_0)
print(output)
| 78.118852 | 154 | 0.730444 |
import torch
import torch.nn
import torch.functional
import torch.nn.functional
class rexnetv1(torch.nn.Module):
def __init__(self):
super().__init__()
self.features_0 = torch.nn.modules.conv.Conv2d(3, 32, (3, 3), stride=(2, 2), padding=(1, 1), dilation=(1, 1), bias=False)
self.features_1 = torch.nn.modules.batchnorm.BatchNorm2d(32)
self.features_3_out_0 = torch.nn.modules.conv.Conv2d(32, 32, (3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), groups=32, bias=False)
self.features_3_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(32)
self.features_3_out_2 = torch.nn.modules.activation.ReLU6()
self.features_3_out_3 = torch.nn.modules.conv.Conv2d(32, 16, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_3_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(16)
self.features_4_out_0 = torch.nn.modules.conv.Conv2d(16, 96, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_4_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(96)
self.features_4_out_3 = torch.nn.modules.conv.Conv2d(96, 96, (3, 3), stride=(2, 2), padding=(1, 1), dilation=(1, 1), groups=96, bias=False)
self.features_4_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(96)
self.features_4_out_5 = torch.nn.modules.activation.ReLU6()
self.features_4_out_6 = torch.nn.modules.conv.Conv2d(96, 27, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_4_out_7 = torch.nn.modules.batchnorm.BatchNorm2d(27)
self.features_5_out_0 = torch.nn.modules.conv.Conv2d(27, 162, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_5_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(162)
self.features_5_out_3 = torch.nn.modules.conv.Conv2d(162, 162, (3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), groups=162, bias=False)
self.features_5_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(162)
self.features_5_out_5 = torch.nn.modules.activation.ReLU6()
self.features_5_out_6 = torch.nn.modules.conv.Conv2d(162, 38, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_5_out_7 = torch.nn.modules.batchnorm.BatchNorm2d(38)
self.features_6_out_0 = torch.nn.modules.conv.Conv2d(38, 228, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_6_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(228)
self.features_6_out_3 = torch.nn.modules.conv.Conv2d(228, 228, (3, 3), stride=(2, 2), padding=(1, 1), dilation=(1, 1), groups=228, bias=False)
self.features_6_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(228)
self.features_6_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_6_out_5_fc_0 = torch.nn.modules.conv.Conv2d(228, 19, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_6_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(19)
self.features_6_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_6_out_5_fc_3 = torch.nn.modules.conv.Conv2d(19, 228, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_6_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_6_out_6 = torch.nn.modules.activation.ReLU6()
self.features_6_out_7 = torch.nn.modules.conv.Conv2d(228, 50, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_6_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(50)
self.features_7_out_0 = torch.nn.modules.conv.Conv2d(50, 300, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_7_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(300)
self.features_7_out_3 = torch.nn.modules.conv.Conv2d(300, 300, (3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), groups=300, bias=False)
self.features_7_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(300)
self.features_7_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_7_out_5_fc_0 = torch.nn.modules.conv.Conv2d(300, 25, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_7_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(25)
self.features_7_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_7_out_5_fc_3 = torch.nn.modules.conv.Conv2d(25, 300, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_7_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_7_out_6 = torch.nn.modules.activation.ReLU6()
self.features_7_out_7 = torch.nn.modules.conv.Conv2d(300, 61, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_7_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(61)
self.features_8_out_0 = torch.nn.modules.conv.Conv2d(61, 366, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_8_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(366)
self.features_8_out_3 = torch.nn.modules.conv.Conv2d(366, 366, (3, 3), stride=(2, 2), padding=(1, 1), dilation=(1, 1), groups=366, bias=False)
self.features_8_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(366)
self.features_8_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_8_out_5_fc_0 = torch.nn.modules.conv.Conv2d(366, 30, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_8_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(30)
self.features_8_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_8_out_5_fc_3 = torch.nn.modules.conv.Conv2d(30, 366, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_8_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_8_out_6 = torch.nn.modules.activation.ReLU6()
self.features_8_out_7 = torch.nn.modules.conv.Conv2d(366, 72, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_8_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(72)
self.features_9_out_0 = torch.nn.modules.conv.Conv2d(72, 432, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_9_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(432)
self.features_9_out_3 = torch.nn.modules.conv.Conv2d(432, 432, (3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), groups=432, bias=False)
self.features_9_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(432)
self.features_9_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_9_out_5_fc_0 = torch.nn.modules.conv.Conv2d(432, 36, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_9_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(36)
self.features_9_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_9_out_5_fc_3 = torch.nn.modules.conv.Conv2d(36, 432, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_9_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_9_out_6 = torch.nn.modules.activation.ReLU6()
self.features_9_out_7 = torch.nn.modules.conv.Conv2d(432, 84, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_9_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(84)
self.features_10_out_0 = torch.nn.modules.conv.Conv2d(84, 504, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_10_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(504)
self.features_10_out_3 = torch.nn.modules.conv.Conv2d(504, 504, (3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), groups=504, bias=False)
self.features_10_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(504)
self.features_10_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_10_out_5_fc_0 = torch.nn.modules.conv.Conv2d(504, 42, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_10_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(42)
self.features_10_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_10_out_5_fc_3 = torch.nn.modules.conv.Conv2d(42, 504, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_10_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_10_out_6 = torch.nn.modules.activation.ReLU6()
self.features_10_out_7 = torch.nn.modules.conv.Conv2d(504, 95, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_10_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(95)
self.features_11_out_0 = torch.nn.modules.conv.Conv2d(95, 570, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_11_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(570)
self.features_11_out_3 = torch.nn.modules.conv.Conv2d(570, 570, (3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), groups=570, bias=False)
self.features_11_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(570)
self.features_11_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_11_out_5_fc_0 = torch.nn.modules.conv.Conv2d(570, 47, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_11_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(47)
self.features_11_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_11_out_5_fc_3 = torch.nn.modules.conv.Conv2d(47, 570, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_11_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_11_out_6 = torch.nn.modules.activation.ReLU6()
self.features_11_out_7 = torch.nn.modules.conv.Conv2d(570, 106, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_11_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(106)
self.features_12_out_0 = torch.nn.modules.conv.Conv2d(106, 636, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_12_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(636)
self.features_12_out_3 = torch.nn.modules.conv.Conv2d(636, 636, (3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), groups=636, bias=False)
self.features_12_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(636)
self.features_12_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_12_out_5_fc_0 = torch.nn.modules.conv.Conv2d(636, 53, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_12_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(53)
self.features_12_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_12_out_5_fc_3 = torch.nn.modules.conv.Conv2d(53, 636, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_12_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_12_out_6 = torch.nn.modules.activation.ReLU6()
self.features_12_out_7 = torch.nn.modules.conv.Conv2d(636, 117, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_12_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(117)
self.features_13_out_0 = torch.nn.modules.conv.Conv2d(117, 702, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_13_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(702)
self.features_13_out_3 = torch.nn.modules.conv.Conv2d(702, 702, (3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), groups=702, bias=False)
self.features_13_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(702)
self.features_13_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_13_out_5_fc_0 = torch.nn.modules.conv.Conv2d(702, 58, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_13_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(58)
self.features_13_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_13_out_5_fc_3 = torch.nn.modules.conv.Conv2d(58, 702, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_13_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_13_out_6 = torch.nn.modules.activation.ReLU6()
self.features_13_out_7 = torch.nn.modules.conv.Conv2d(702, 128, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_13_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(128)
self.features_14_out_0 = torch.nn.modules.conv.Conv2d(128, 768, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_14_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(768)
self.features_14_out_3 = torch.nn.modules.conv.Conv2d(768, 768, (3, 3), stride=(2, 2), padding=(1, 1), dilation=(1, 1), groups=768, bias=False)
self.features_14_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(768)
self.features_14_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_14_out_5_fc_0 = torch.nn.modules.conv.Conv2d(768, 64, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_14_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(64)
self.features_14_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_14_out_5_fc_3 = torch.nn.modules.conv.Conv2d(64, 768, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_14_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_14_out_6 = torch.nn.modules.activation.ReLU6()
self.features_14_out_7 = torch.nn.modules.conv.Conv2d(768, 140, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_14_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(140)
self.features_15_out_0 = torch.nn.modules.conv.Conv2d(140, 840, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_15_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(840)
self.features_15_out_3 = torch.nn.modules.conv.Conv2d(840, 840, (3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), groups=840, bias=False)
self.features_15_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(840)
self.features_15_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_15_out_5_fc_0 = torch.nn.modules.conv.Conv2d(840, 70, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_15_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(70)
self.features_15_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_15_out_5_fc_3 = torch.nn.modules.conv.Conv2d(70, 840, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_15_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_15_out_6 = torch.nn.modules.activation.ReLU6()
self.features_15_out_7 = torch.nn.modules.conv.Conv2d(840, 151, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_15_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(151)
self.features_16_out_0 = torch.nn.modules.conv.Conv2d(151, 906, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_16_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(906)
self.features_16_out_3 = torch.nn.modules.conv.Conv2d(906, 906, (3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), groups=906, bias=False)
self.features_16_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(906)
self.features_16_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_16_out_5_fc_0 = torch.nn.modules.conv.Conv2d(906, 75, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_16_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(75)
self.features_16_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_16_out_5_fc_3 = torch.nn.modules.conv.Conv2d(75, 906, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_16_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_16_out_6 = torch.nn.modules.activation.ReLU6()
self.features_16_out_7 = torch.nn.modules.conv.Conv2d(906, 162, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_16_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(162)
self.features_17_out_0 = torch.nn.modules.conv.Conv2d(162, 972, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_17_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(972)
self.features_17_out_3 = torch.nn.modules.conv.Conv2d(972, 972, (3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), groups=972, bias=False)
self.features_17_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(972)
self.features_17_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_17_out_5_fc_0 = torch.nn.modules.conv.Conv2d(972, 81, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_17_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(81)
self.features_17_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_17_out_5_fc_3 = torch.nn.modules.conv.Conv2d(81, 972, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_17_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_17_out_6 = torch.nn.modules.activation.ReLU6()
self.features_17_out_7 = torch.nn.modules.conv.Conv2d(972, 174, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_17_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(174)
self.features_18_out_0 = torch.nn.modules.conv.Conv2d(174, 1044, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_18_out_1 = torch.nn.modules.batchnorm.BatchNorm2d(1044)
self.features_18_out_3 = torch.nn.modules.conv.Conv2d(1044, 1044, (3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), groups=1044, bias=False)
self.features_18_out_4 = torch.nn.modules.batchnorm.BatchNorm2d(1044)
self.features_18_out_5_avg_pool = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.features_18_out_5_fc_0 = torch.nn.modules.conv.Conv2d(1044, 87, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_18_out_5_fc_1 = torch.nn.modules.batchnorm.BatchNorm2d(87)
self.features_18_out_5_fc_2 = torch.nn.modules.activation.ReLU(inplace=True)
self.features_18_out_5_fc_3 = torch.nn.modules.conv.Conv2d(87, 1044, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
self.features_18_out_5_fc_4 = torch.nn.modules.activation.Sigmoid()
self.features_18_out_6 = torch.nn.modules.activation.ReLU6()
self.features_18_out_7 = torch.nn.modules.conv.Conv2d(1044, 185, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_18_out_8 = torch.nn.modules.batchnorm.BatchNorm2d(185)
self.features_19 = torch.nn.modules.conv.Conv2d(185, 1280, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), bias=False)
self.features_20 = torch.nn.modules.batchnorm.BatchNorm2d(1280)
self.features_22 = torch.nn.modules.pooling.AdaptiveAvgPool2d(1)
self.output_0 = torch.nn.modules.dropout.Dropout(p=0.2)
self.output_1 = torch.nn.modules.conv.Conv2d(1280, 1000, (1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1))
def forward(self, input_1):
features_0 = self.features_0(input_1)
features_1 = self.features_1(features_0)
sigmoid_1 = features_1.sigmoid()
mul_1 = features_1.mul_(sigmoid_1)
features_3_out_0 = self.features_3_out_0(mul_1)
features_3_out_1 = self.features_3_out_1(features_3_out_0)
features_3_out_2 = self.features_3_out_2(features_3_out_1)
features_3_out_3 = self.features_3_out_3(features_3_out_2)
features_3_out_4 = self.features_3_out_4(features_3_out_3)
features_4_out_0 = self.features_4_out_0(features_3_out_4)
features_4_out_1 = self.features_4_out_1(features_4_out_0)
sigmoid_2 = features_4_out_1.sigmoid()
mul_2 = features_4_out_1.mul_(sigmoid_2)
features_4_out_3 = self.features_4_out_3(mul_2)
features_4_out_4 = self.features_4_out_4(features_4_out_3)
features_4_out_5 = self.features_4_out_5(features_4_out_4)
features_4_out_6 = self.features_4_out_6(features_4_out_5)
features_4_out_7 = self.features_4_out_7(features_4_out_6)
features_5_out_0 = self.features_5_out_0(features_4_out_7)
features_5_out_1 = self.features_5_out_1(features_5_out_0)
sigmoid_3 = features_5_out_1.sigmoid()
mul_3 = features_5_out_1.mul_(sigmoid_3)
features_5_out_3 = self.features_5_out_3(mul_3)
features_5_out_4 = self.features_5_out_4(features_5_out_3)
features_5_out_5 = self.features_5_out_5(features_5_out_4)
features_5_out_6 = self.features_5_out_6(features_5_out_5)
features_5_out_7 = self.features_5_out_7(features_5_out_6)
getitem_1 = features_5_out_7[:, 0:27]
add_1 = getitem_1.__iadd__(features_4_out_7)
features_6_out_0 = self.features_6_out_0(features_5_out_7)
features_6_out_1 = self.features_6_out_1(features_6_out_0)
sigmoid_4 = features_6_out_1.sigmoid()
mul_4 = features_6_out_1.mul_(sigmoid_4)
features_6_out_3 = self.features_6_out_3(mul_4)
features_6_out_4 = self.features_6_out_4(features_6_out_3)
features_6_out_5_avg_pool = self.features_6_out_5_avg_pool(features_6_out_4)
features_6_out_5_fc_0 = self.features_6_out_5_fc_0(features_6_out_5_avg_pool)
features_6_out_5_fc_1 = self.features_6_out_5_fc_1(features_6_out_5_fc_0)
features_6_out_5_fc_2 = self.features_6_out_5_fc_2(features_6_out_5_fc_1)
features_6_out_5_fc_3 = self.features_6_out_5_fc_3(features_6_out_5_fc_2)
features_6_out_5_fc_4 = self.features_6_out_5_fc_4(features_6_out_5_fc_3)
mul_5 = features_6_out_4.__mul__(features_6_out_5_fc_4)
features_6_out_6 = self.features_6_out_6(mul_5)
features_6_out_7 = self.features_6_out_7(features_6_out_6)
features_6_out_8 = self.features_6_out_8(features_6_out_7)
features_7_out_0 = self.features_7_out_0(features_6_out_8)
features_7_out_1 = self.features_7_out_1(features_7_out_0)
sigmoid_5 = features_7_out_1.sigmoid()
mul_6 = features_7_out_1.mul_(sigmoid_5)
features_7_out_3 = self.features_7_out_3(mul_6)
features_7_out_4 = self.features_7_out_4(features_7_out_3)
features_7_out_5_avg_pool = self.features_7_out_5_avg_pool(features_7_out_4)
features_7_out_5_fc_0 = self.features_7_out_5_fc_0(features_7_out_5_avg_pool)
features_7_out_5_fc_1 = self.features_7_out_5_fc_1(features_7_out_5_fc_0)
features_7_out_5_fc_2 = self.features_7_out_5_fc_2(features_7_out_5_fc_1)
features_7_out_5_fc_3 = self.features_7_out_5_fc_3(features_7_out_5_fc_2)
features_7_out_5_fc_4 = self.features_7_out_5_fc_4(features_7_out_5_fc_3)
mul_7 = features_7_out_4.__mul__(features_7_out_5_fc_4)
features_7_out_6 = self.features_7_out_6(mul_7)
features_7_out_7 = self.features_7_out_7(features_7_out_6)
features_7_out_8 = self.features_7_out_8(features_7_out_7)
getitem_2 = features_7_out_8[:, 0:50]
add_2 = getitem_2.__iadd__(features_6_out_8)
features_8_out_0 = self.features_8_out_0(features_7_out_8)
features_8_out_1 = self.features_8_out_1(features_8_out_0)
sigmoid_6 = features_8_out_1.sigmoid()
mul_8 = features_8_out_1.mul_(sigmoid_6)
features_8_out_3 = self.features_8_out_3(mul_8)
features_8_out_4 = self.features_8_out_4(features_8_out_3)
features_8_out_5_avg_pool = self.features_8_out_5_avg_pool(features_8_out_4)
features_8_out_5_fc_0 = self.features_8_out_5_fc_0(features_8_out_5_avg_pool)
features_8_out_5_fc_1 = self.features_8_out_5_fc_1(features_8_out_5_fc_0)
features_8_out_5_fc_2 = self.features_8_out_5_fc_2(features_8_out_5_fc_1)
features_8_out_5_fc_3 = self.features_8_out_5_fc_3(features_8_out_5_fc_2)
features_8_out_5_fc_4 = self.features_8_out_5_fc_4(features_8_out_5_fc_3)
mul_9 = features_8_out_4.__mul__(features_8_out_5_fc_4)
features_8_out_6 = self.features_8_out_6(mul_9)
features_8_out_7 = self.features_8_out_7(features_8_out_6)
features_8_out_8 = self.features_8_out_8(features_8_out_7)
features_9_out_0 = self.features_9_out_0(features_8_out_8)
features_9_out_1 = self.features_9_out_1(features_9_out_0)
sigmoid_7 = features_9_out_1.sigmoid()
mul_10 = features_9_out_1.mul_(sigmoid_7)
features_9_out_3 = self.features_9_out_3(mul_10)
features_9_out_4 = self.features_9_out_4(features_9_out_3)
features_9_out_5_avg_pool = self.features_9_out_5_avg_pool(features_9_out_4)
features_9_out_5_fc_0 = self.features_9_out_5_fc_0(features_9_out_5_avg_pool)
features_9_out_5_fc_1 = self.features_9_out_5_fc_1(features_9_out_5_fc_0)
features_9_out_5_fc_2 = self.features_9_out_5_fc_2(features_9_out_5_fc_1)
features_9_out_5_fc_3 = self.features_9_out_5_fc_3(features_9_out_5_fc_2)
features_9_out_5_fc_4 = self.features_9_out_5_fc_4(features_9_out_5_fc_3)
mul_11 = features_9_out_4.__mul__(features_9_out_5_fc_4)
features_9_out_6 = self.features_9_out_6(mul_11)
features_9_out_7 = self.features_9_out_7(features_9_out_6)
features_9_out_8 = self.features_9_out_8(features_9_out_7)
getitem_3 = features_9_out_8[:, 0:72]
add_3 = getitem_3.__iadd__(features_8_out_8)
features_10_out_0 = self.features_10_out_0(features_9_out_8)
features_10_out_1 = self.features_10_out_1(features_10_out_0)
sigmoid_8 = features_10_out_1.sigmoid()
mul_12 = features_10_out_1.mul_(sigmoid_8)
features_10_out_3 = self.features_10_out_3(mul_12)
features_10_out_4 = self.features_10_out_4(features_10_out_3)
features_10_out_5_avg_pool = self.features_10_out_5_avg_pool(features_10_out_4)
features_10_out_5_fc_0 = self.features_10_out_5_fc_0(features_10_out_5_avg_pool)
features_10_out_5_fc_1 = self.features_10_out_5_fc_1(features_10_out_5_fc_0)
features_10_out_5_fc_2 = self.features_10_out_5_fc_2(features_10_out_5_fc_1)
features_10_out_5_fc_3 = self.features_10_out_5_fc_3(features_10_out_5_fc_2)
features_10_out_5_fc_4 = self.features_10_out_5_fc_4(features_10_out_5_fc_3)
mul_13 = features_10_out_4.__mul__(features_10_out_5_fc_4)
features_10_out_6 = self.features_10_out_6(mul_13)
features_10_out_7 = self.features_10_out_7(features_10_out_6)
features_10_out_8 = self.features_10_out_8(features_10_out_7)
getitem_4 = features_10_out_8[:, 0:84]
add_4 = getitem_4.__iadd__(features_9_out_8)
features_11_out_0 = self.features_11_out_0(features_10_out_8)
features_11_out_1 = self.features_11_out_1(features_11_out_0)
sigmoid_9 = features_11_out_1.sigmoid()
mul_14 = features_11_out_1.mul_(sigmoid_9)
features_11_out_3 = self.features_11_out_3(mul_14)
features_11_out_4 = self.features_11_out_4(features_11_out_3)
features_11_out_5_avg_pool = self.features_11_out_5_avg_pool(features_11_out_4)
features_11_out_5_fc_0 = self.features_11_out_5_fc_0(features_11_out_5_avg_pool)
features_11_out_5_fc_1 = self.features_11_out_5_fc_1(features_11_out_5_fc_0)
features_11_out_5_fc_2 = self.features_11_out_5_fc_2(features_11_out_5_fc_1)
features_11_out_5_fc_3 = self.features_11_out_5_fc_3(features_11_out_5_fc_2)
features_11_out_5_fc_4 = self.features_11_out_5_fc_4(features_11_out_5_fc_3)
mul_15 = features_11_out_4.__mul__(features_11_out_5_fc_4)
features_11_out_6 = self.features_11_out_6(mul_15)
features_11_out_7 = self.features_11_out_7(features_11_out_6)
features_11_out_8 = self.features_11_out_8(features_11_out_7)
getitem_5 = features_11_out_8[:, 0:95]
add_5 = getitem_5.__iadd__(features_10_out_8)
features_12_out_0 = self.features_12_out_0(features_11_out_8)
features_12_out_1 = self.features_12_out_1(features_12_out_0)
sigmoid_10 = features_12_out_1.sigmoid()
mul_16 = features_12_out_1.mul_(sigmoid_10)
features_12_out_3 = self.features_12_out_3(mul_16)
features_12_out_4 = self.features_12_out_4(features_12_out_3)
features_12_out_5_avg_pool = self.features_12_out_5_avg_pool(features_12_out_4)
features_12_out_5_fc_0 = self.features_12_out_5_fc_0(features_12_out_5_avg_pool)
features_12_out_5_fc_1 = self.features_12_out_5_fc_1(features_12_out_5_fc_0)
features_12_out_5_fc_2 = self.features_12_out_5_fc_2(features_12_out_5_fc_1)
features_12_out_5_fc_3 = self.features_12_out_5_fc_3(features_12_out_5_fc_2)
features_12_out_5_fc_4 = self.features_12_out_5_fc_4(features_12_out_5_fc_3)
mul_17 = features_12_out_4.__mul__(features_12_out_5_fc_4)
features_12_out_6 = self.features_12_out_6(mul_17)
features_12_out_7 = self.features_12_out_7(features_12_out_6)
features_12_out_8 = self.features_12_out_8(features_12_out_7)
getitem_6 = features_12_out_8[:, 0:106]
add_6 = getitem_6.__iadd__(features_11_out_8)
features_13_out_0 = self.features_13_out_0(features_12_out_8)
features_13_out_1 = self.features_13_out_1(features_13_out_0)
sigmoid_11 = features_13_out_1.sigmoid()
mul_18 = features_13_out_1.mul_(sigmoid_11)
features_13_out_3 = self.features_13_out_3(mul_18)
features_13_out_4 = self.features_13_out_4(features_13_out_3)
features_13_out_5_avg_pool = self.features_13_out_5_avg_pool(features_13_out_4)
features_13_out_5_fc_0 = self.features_13_out_5_fc_0(features_13_out_5_avg_pool)
features_13_out_5_fc_1 = self.features_13_out_5_fc_1(features_13_out_5_fc_0)
features_13_out_5_fc_2 = self.features_13_out_5_fc_2(features_13_out_5_fc_1)
features_13_out_5_fc_3 = self.features_13_out_5_fc_3(features_13_out_5_fc_2)
features_13_out_5_fc_4 = self.features_13_out_5_fc_4(features_13_out_5_fc_3)
mul_19 = features_13_out_4.__mul__(features_13_out_5_fc_4)
features_13_out_6 = self.features_13_out_6(mul_19)
features_13_out_7 = self.features_13_out_7(features_13_out_6)
features_13_out_8 = self.features_13_out_8(features_13_out_7)
getitem_7 = features_13_out_8[:, 0:117]
add_7 = getitem_7.__iadd__(features_12_out_8)
features_14_out_0 = self.features_14_out_0(features_13_out_8)
features_14_out_1 = self.features_14_out_1(features_14_out_0)
sigmoid_12 = features_14_out_1.sigmoid()
mul_20 = features_14_out_1.mul_(sigmoid_12)
features_14_out_3 = self.features_14_out_3(mul_20)
features_14_out_4 = self.features_14_out_4(features_14_out_3)
features_14_out_5_avg_pool = self.features_14_out_5_avg_pool(features_14_out_4)
features_14_out_5_fc_0 = self.features_14_out_5_fc_0(features_14_out_5_avg_pool)
features_14_out_5_fc_1 = self.features_14_out_5_fc_1(features_14_out_5_fc_0)
features_14_out_5_fc_2 = self.features_14_out_5_fc_2(features_14_out_5_fc_1)
features_14_out_5_fc_3 = self.features_14_out_5_fc_3(features_14_out_5_fc_2)
features_14_out_5_fc_4 = self.features_14_out_5_fc_4(features_14_out_5_fc_3)
mul_21 = features_14_out_4.__mul__(features_14_out_5_fc_4)
features_14_out_6 = self.features_14_out_6(mul_21)
features_14_out_7 = self.features_14_out_7(features_14_out_6)
features_14_out_8 = self.features_14_out_8(features_14_out_7)
features_15_out_0 = self.features_15_out_0(features_14_out_8)
features_15_out_1 = self.features_15_out_1(features_15_out_0)
sigmoid_13 = features_15_out_1.sigmoid()
mul_22 = features_15_out_1.mul_(sigmoid_13)
features_15_out_3 = self.features_15_out_3(mul_22)
features_15_out_4 = self.features_15_out_4(features_15_out_3)
features_15_out_5_avg_pool = self.features_15_out_5_avg_pool(features_15_out_4)
features_15_out_5_fc_0 = self.features_15_out_5_fc_0(features_15_out_5_avg_pool)
features_15_out_5_fc_1 = self.features_15_out_5_fc_1(features_15_out_5_fc_0)
features_15_out_5_fc_2 = self.features_15_out_5_fc_2(features_15_out_5_fc_1)
features_15_out_5_fc_3 = self.features_15_out_5_fc_3(features_15_out_5_fc_2)
features_15_out_5_fc_4 = self.features_15_out_5_fc_4(features_15_out_5_fc_3)
mul_23 = features_15_out_4.__mul__(features_15_out_5_fc_4)
features_15_out_6 = self.features_15_out_6(mul_23)
features_15_out_7 = self.features_15_out_7(features_15_out_6)
features_15_out_8 = self.features_15_out_8(features_15_out_7)
getitem_8 = features_15_out_8[:, 0:140]
add_8 = getitem_8.__iadd__(features_14_out_8)
features_16_out_0 = self.features_16_out_0(features_15_out_8)
features_16_out_1 = self.features_16_out_1(features_16_out_0)
sigmoid_14 = features_16_out_1.sigmoid()
mul_24 = features_16_out_1.mul_(sigmoid_14)
features_16_out_3 = self.features_16_out_3(mul_24)
features_16_out_4 = self.features_16_out_4(features_16_out_3)
features_16_out_5_avg_pool = self.features_16_out_5_avg_pool(features_16_out_4)
features_16_out_5_fc_0 = self.features_16_out_5_fc_0(features_16_out_5_avg_pool)
features_16_out_5_fc_1 = self.features_16_out_5_fc_1(features_16_out_5_fc_0)
features_16_out_5_fc_2 = self.features_16_out_5_fc_2(features_16_out_5_fc_1)
features_16_out_5_fc_3 = self.features_16_out_5_fc_3(features_16_out_5_fc_2)
features_16_out_5_fc_4 = self.features_16_out_5_fc_4(features_16_out_5_fc_3)
mul_25 = features_16_out_4.__mul__(features_16_out_5_fc_4)
features_16_out_6 = self.features_16_out_6(mul_25)
features_16_out_7 = self.features_16_out_7(features_16_out_6)
features_16_out_8 = self.features_16_out_8(features_16_out_7)
getitem_9 = features_16_out_8[:, 0:151]
add_9 = getitem_9.__iadd__(features_15_out_8)
features_17_out_0 = self.features_17_out_0(features_16_out_8)
features_17_out_1 = self.features_17_out_1(features_17_out_0)
sigmoid_15 = features_17_out_1.sigmoid()
mul_26 = features_17_out_1.mul_(sigmoid_15)
features_17_out_3 = self.features_17_out_3(mul_26)
features_17_out_4 = self.features_17_out_4(features_17_out_3)
features_17_out_5_avg_pool = self.features_17_out_5_avg_pool(features_17_out_4)
features_17_out_5_fc_0 = self.features_17_out_5_fc_0(features_17_out_5_avg_pool)
features_17_out_5_fc_1 = self.features_17_out_5_fc_1(features_17_out_5_fc_0)
features_17_out_5_fc_2 = self.features_17_out_5_fc_2(features_17_out_5_fc_1)
features_17_out_5_fc_3 = self.features_17_out_5_fc_3(features_17_out_5_fc_2)
features_17_out_5_fc_4 = self.features_17_out_5_fc_4(features_17_out_5_fc_3)
mul_27 = features_17_out_4.__mul__(features_17_out_5_fc_4)
features_17_out_6 = self.features_17_out_6(mul_27)
features_17_out_7 = self.features_17_out_7(features_17_out_6)
features_17_out_8 = self.features_17_out_8(features_17_out_7)
getitem_10 = features_17_out_8[:, 0:162]
add_10 = getitem_10.__iadd__(features_16_out_8)
features_18_out_0 = self.features_18_out_0(features_17_out_8)
features_18_out_1 = self.features_18_out_1(features_18_out_0)
sigmoid_16 = features_18_out_1.sigmoid()
mul_28 = features_18_out_1.mul_(sigmoid_16)
features_18_out_3 = self.features_18_out_3(mul_28)
features_18_out_4 = self.features_18_out_4(features_18_out_3)
features_18_out_5_avg_pool = self.features_18_out_5_avg_pool(features_18_out_4)
features_18_out_5_fc_0 = self.features_18_out_5_fc_0(features_18_out_5_avg_pool)
features_18_out_5_fc_1 = self.features_18_out_5_fc_1(features_18_out_5_fc_0)
features_18_out_5_fc_2 = self.features_18_out_5_fc_2(features_18_out_5_fc_1)
features_18_out_5_fc_3 = self.features_18_out_5_fc_3(features_18_out_5_fc_2)
features_18_out_5_fc_4 = self.features_18_out_5_fc_4(features_18_out_5_fc_3)
mul_29 = features_18_out_4.__mul__(features_18_out_5_fc_4)
features_18_out_6 = self.features_18_out_6(mul_29)
features_18_out_7 = self.features_18_out_7(features_18_out_6)
features_18_out_8 = self.features_18_out_8(features_18_out_7)
getitem_11 = features_18_out_8[:, 0:174]
add_11 = getitem_11.__iadd__(features_17_out_8)
features_19 = self.features_19(features_18_out_8)
features_20 = self.features_20(features_19)
sigmoid_17 = features_20.sigmoid()
mul_30 = features_20.mul_(sigmoid_17)
features_22 = self.features_22(mul_30)
output_0 = self.output_0(features_22)
output_1 = self.output_1(output_0)
flatten_1 = output_1.flatten(1)
return flatten_1
if __name__ == "__main__":
model = rexnetv1()
model.eval()
model.cpu()
dummy_input_0 = torch.ones((2, 3, 224, 224), dtype=torch.float32)
output = model(dummy_input_0)
print(output)
| true | true |
1c2b5bec4cab56954edbec66a7e38f74ff08915c | 699 | py | Python | deciphon/task_result.py | EBI-Metagenomics/deciphon-py | 81df946c4f2f53c55ac96fc78ed2f95958b291d8 | [
"MIT"
] | null | null | null | deciphon/task_result.py | EBI-Metagenomics/deciphon-py | 81df946c4f2f53c55ac96fc78ed2f95958b291d8 | [
"MIT"
] | 1 | 2021-07-02T10:24:19.000Z | 2021-07-02T10:24:19.000Z | deciphon/task_result.py | EBI-Metagenomics/deciphon-py | 81df946c4f2f53c55ac96fc78ed2f95958b291d8 | [
"MIT"
] | null | null | null | from __future__ import annotations
from ._cdata import CData
from ._ffi import ffi, lib
from .codon_table import CodonTable
from .result import Result
__all__ = ["TaskResult"]
class TaskResult:
def __init__(self, dcp_results: CData, codon_table: CodonTable):
self._dcp_results = dcp_results
if self._dcp_results == ffi.NULL:
raise RuntimeError("`dcp_results` is NULL.")
self._results = []
r = lib.dcp_results_first(self._dcp_results)
while r != ffi.NULL:
self._results.append(Result(r, codon_table))
r = lib.dcp_results_next(self._dcp_results, r)
@property
def results(self):
return self._results
| 26.884615 | 68 | 0.672389 | from __future__ import annotations
from ._cdata import CData
from ._ffi import ffi, lib
from .codon_table import CodonTable
from .result import Result
__all__ = ["TaskResult"]
class TaskResult:
def __init__(self, dcp_results: CData, codon_table: CodonTable):
self._dcp_results = dcp_results
if self._dcp_results == ffi.NULL:
raise RuntimeError("`dcp_results` is NULL.")
self._results = []
r = lib.dcp_results_first(self._dcp_results)
while r != ffi.NULL:
self._results.append(Result(r, codon_table))
r = lib.dcp_results_next(self._dcp_results, r)
@property
def results(self):
return self._results
| true | true |
1c2b5c72e77f2a2281155c99de844dc719473b3f | 8,283 | py | Python | metadata-etl/src/main/resources/jython/CodeSearchExtract.py | simplesteph/WhereHows | e34bbcc629d529238a62b4bd4405713f8ee1519c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | metadata-etl/src/main/resources/jython/CodeSearchExtract.py | simplesteph/WhereHows | e34bbcc629d529238a62b4bd4405713f8ee1519c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | metadata-etl/src/main/resources/jython/CodeSearchExtract.py | simplesteph/WhereHows | e34bbcc629d529238a62b4bd4405713f8ee1519c | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-02-03T14:12:46.000Z | 2021-07-25T03:23:56.000Z | #
# Copyright 2015 LinkedIn Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
import sys,os,re
import requests
import subprocess
from wherehows.common import Constant
from wherehows.common.schemas import SCMOwnerRecord
from wherehows.common.writers import FileWriter
from org.slf4j import LoggerFactory
class CodeSearchExtract:
"""
Lists all repos for oracle & espresso databases. Since this feature is not
available through the UI, we need to use http://go/codesearch to discover
the multiproduct repos that use 'li-db' plugin.
"""
# verbose = False
limit_search_result = 500
# limit_multiproduct = None
# limit_plugin = None
def __init__(self):
self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__)
self.base_url = args[Constant.BASE_URL_KEY]
self.code_search_committer_writer = FileWriter(args[Constant.DATABASE_SCM_REPO_OUTPUT_KEY])
def run(self):
offset_min = 1
offset_max = 100
databases = []
search_request = \
{"request":
{
"other":{"CurrentResult":str(offset_min),"requestTimeout":"200000000"},
"queryContext":{"numToScore":1000,"docDataSet":"results","rawQuery":"type:gradle plugin:*'li-db'"},
"paginationContext":{"numToReturn":offset_max}
}
}
while True:
resp = requests.post(self.base_url + '/galene-codesearch?action=search',
json=search_request,
verify=False)
if resp.status_code != 200:
# This means something went wrong.
d = resp.json()
self.logger.info("Request Error! Stack trace {}".format(d['stackTrace']))
# raise Exception('Request Error', 'POST /galene-codesearch?action=search %s' % (resp.status_code))
break
result = resp.json()['value']
self.logger.debug("Pagination offset = {}".format(result['total']))
for element in result['elements']:
fpath = element['docData']['filepath']
ri = fpath.rindex('/')
prop_file = fpath[:ri] + '/database.properties'
# e.g. identity-mt/database/Identity/database.properties
# network/database/externmembermap/database.properties
# cap-backend/database/campaigns-db/database.properties
databases.append( {'filepath': prop_file, 'app_name': element['docData']['mp']} )
if result['total'] < 100:
break
offset_min += int(result['total'])
offset_max += 100 # if result['total'] < 100 else result['total']
search_request['request']['other']['CurrentResult'] = str(offset_min)
search_request['request']['paginationContext']['numToReturn'] = offset_max
self.logger.debug("Property file path {}".format(search_request))
self.logger.debug(" length of databases is {}".format(len(databases)))
owner_count = 0
committers_count = 0
for db in databases:
prop_file = db['filepath']
file_request = \
{"request":{
"other":{"filepath":prop_file,
"TextTokenize":"True",
"CurrentResult":"1",
"requestTimeout":"2000000000"
},
"queryContext":{"numToScore":10,"docDataSet":"result"},
"paginationContext":{"numToReturn":1}
}
}
resp = requests.post(self.base_url + '/galene-codesearch?action=search',
json=file_request,
verify=False)
if resp.status_code != 200:
# This means something went wrong.
d = resp.json()
self.logger.info("Request Error! Stack trace {}".format(d['stackTrace']))
continue
result = resp.json()['value']
if result['total'] < 1:
self.logger.info("Nothing found for {}".format(prop_file))
continue
if "repoUrl" in result['elements'][0]['docData']:
db['scm_url'] = result['elements'][0]['docData']['repoUrl']
db['scm_type'] = result['elements'][0]['docData']['repotype']
db['committers'] = ''
if db['scm_type'] == 'SVN':
schema_in_repo = re.sub(r"http://(\w+)\.([\w\.\-/].*)database.properties\?view=markup",
"http://svn." + r"\2" + "schema", db['scm_url'])
db['committers'] = self.get_svn_committers(schema_in_repo)
committers_count +=1
self.logger.info("Committers for {} => {}".format(schema_in_repo,db['committers']))
else:
self.logger.info("Search request {}".format(prop_file))
code = result['elements'][0]['docData']['code']
code_dict = dict(line.split("=", 1) for line in code.strip().splitlines())
if "database.name" in code_dict:
db['database_name'] = code_dict['database.name']
if "database.type" in code_dict:
db['database_type'] = code_dict['database.type']
owner_record = SCMOwnerRecord(
db['scm_url'],
db['database_name'],
db['database_type'],
db['app_name'],
db['filepath'],
db['committers'],
db['scm_type']
)
owner_count += 1
self.code_search_committer_writer.append(owner_record)
self.code_search_committer_writer.close()
self.logger.info('Finish Fetching committers, total {} committers entries'.format(committers_count))
self.logger.info('Finish Fetching SVN owners, total {} records'.format(owner_count))
def get_svn_committers(self, svn_repo_path):
"""Collect recent committers from the cmd
svn log %s | grep '^\(A=\|r[0-9]* \)' | head -10
e.g.
r1617887 | htang | 2016-09-21 14:27:40 -0700 (Wed, 21 Sep 2016) | 12 lines
A=shanda,pravi
r1600397 | llu | 2016-08-08 17:14:22 -0700 (Mon, 08 Aug 2016) | 3 lines
A=rramakri,htang
"""
#svn_cmd = """svn log %s | grep '^\(A=\|r[0-9]* \)' | head -10"""
committers = []
possible_svn_paths = [svn_repo_path, svn_repo_path + "ta"]
for svn_repo_path in possible_svn_paths:
p = subprocess.Popen('svn log ' + svn_repo_path + " |grep '^\(A=\|r[0-9]* \)' |head -10",
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
svn_log_output, svn_log_err = p.communicate()
if svn_log_err[:12] == 'svn: E160013':
continue # try the next possible path
for line in svn_log_output.split('\n'):
if re.match(r"r[0-9]+", line):
committer = line.split('|')[1].strip()
if committer not in committers:
committers.append(committer)
elif line[:2] == 'A=':
for apvr in line[2:].split(','):
if apvr not in committers:
committers.append(apvr)
if len(committers) > 0:
self.logger.debug(" {}, ' => ', {}".format(svn_repo_path,committers))
break
return ','.join(committers)
if __name__ == "__main__":
args = sys.argv[1]
e = CodeSearchExtract()
e.run()
| 43.366492 | 119 | 0.543644 |
import sys,os,re
import requests
import subprocess
from wherehows.common import Constant
from wherehows.common.schemas import SCMOwnerRecord
from wherehows.common.writers import FileWriter
from org.slf4j import LoggerFactory
class CodeSearchExtract:
limit_search_result = 500
def __init__(self):
self.logger = LoggerFactory.getLogger('jython script : ' + self.__class__.__name__)
self.base_url = args[Constant.BASE_URL_KEY]
self.code_search_committer_writer = FileWriter(args[Constant.DATABASE_SCM_REPO_OUTPUT_KEY])
def run(self):
offset_min = 1
offset_max = 100
databases = []
search_request = \
{"request":
{
"other":{"CurrentResult":str(offset_min),"requestTimeout":"200000000"},
"queryContext":{"numToScore":1000,"docDataSet":"results","rawQuery":"type:gradle plugin:*'li-db'"},
"paginationContext":{"numToReturn":offset_max}
}
}
while True:
resp = requests.post(self.base_url + '/galene-codesearch?action=search',
json=search_request,
verify=False)
if resp.status_code != 200:
d = resp.json()
self.logger.info("Request Error! Stack trace {}".format(d['stackTrace']))
break
result = resp.json()['value']
self.logger.debug("Pagination offset = {}".format(result['total']))
for element in result['elements']:
fpath = element['docData']['filepath']
ri = fpath.rindex('/')
prop_file = fpath[:ri] + '/database.properties'
databases.append( {'filepath': prop_file, 'app_name': element['docData']['mp']} )
if result['total'] < 100:
break
offset_min += int(result['total'])
offset_max += 100
search_request['request']['other']['CurrentResult'] = str(offset_min)
search_request['request']['paginationContext']['numToReturn'] = offset_max
self.logger.debug("Property file path {}".format(search_request))
self.logger.debug(" length of databases is {}".format(len(databases)))
owner_count = 0
committers_count = 0
for db in databases:
prop_file = db['filepath']
file_request = \
{"request":{
"other":{"filepath":prop_file,
"TextTokenize":"True",
"CurrentResult":"1",
"requestTimeout":"2000000000"
},
"queryContext":{"numToScore":10,"docDataSet":"result"},
"paginationContext":{"numToReturn":1}
}
}
resp = requests.post(self.base_url + '/galene-codesearch?action=search',
json=file_request,
verify=False)
if resp.status_code != 200:
d = resp.json()
self.logger.info("Request Error! Stack trace {}".format(d['stackTrace']))
continue
result = resp.json()['value']
if result['total'] < 1:
self.logger.info("Nothing found for {}".format(prop_file))
continue
if "repoUrl" in result['elements'][0]['docData']:
db['scm_url'] = result['elements'][0]['docData']['repoUrl']
db['scm_type'] = result['elements'][0]['docData']['repotype']
db['committers'] = ''
if db['scm_type'] == 'SVN':
schema_in_repo = re.sub(r"http://(\w+)\.([\w\.\-/].*)database.properties\?view=markup",
"http://svn." + r"\2" + "schema", db['scm_url'])
db['committers'] = self.get_svn_committers(schema_in_repo)
committers_count +=1
self.logger.info("Committers for {} => {}".format(schema_in_repo,db['committers']))
else:
self.logger.info("Search request {}".format(prop_file))
code = result['elements'][0]['docData']['code']
code_dict = dict(line.split("=", 1) for line in code.strip().splitlines())
if "database.name" in code_dict:
db['database_name'] = code_dict['database.name']
if "database.type" in code_dict:
db['database_type'] = code_dict['database.type']
owner_record = SCMOwnerRecord(
db['scm_url'],
db['database_name'],
db['database_type'],
db['app_name'],
db['filepath'],
db['committers'],
db['scm_type']
)
owner_count += 1
self.code_search_committer_writer.append(owner_record)
self.code_search_committer_writer.close()
self.logger.info('Finish Fetching committers, total {} committers entries'.format(committers_count))
self.logger.info('Finish Fetching SVN owners, total {} records'.format(owner_count))
def get_svn_committers(self, svn_repo_path):
committers = []
possible_svn_paths = [svn_repo_path, svn_repo_path + "ta"]
for svn_repo_path in possible_svn_paths:
p = subprocess.Popen('svn log ' + svn_repo_path + " |grep '^\(A=\|r[0-9]* \)' |head -10",
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
svn_log_output, svn_log_err = p.communicate()
if svn_log_err[:12] == 'svn: E160013':
continue
for line in svn_log_output.split('\n'):
if re.match(r"r[0-9]+", line):
committer = line.split('|')[1].strip()
if committer not in committers:
committers.append(committer)
elif line[:2] == 'A=':
for apvr in line[2:].split(','):
if apvr not in committers:
committers.append(apvr)
if len(committers) > 0:
self.logger.debug(" {}, ' => ', {}".format(svn_repo_path,committers))
break
return ','.join(committers)
if __name__ == "__main__":
args = sys.argv[1]
e = CodeSearchExtract()
e.run()
| true | true |
1c2b5cb3fc61452aed2ffa3b1df033d244d6a253 | 2,232 | py | Python | WaveBlocksND/GradientLinearCombinationHAWP.py | raoulbq/WaveBlocksND | 225b5dd9b1af1998bd40b5f6467ee959292b6a83 | [
"BSD-3-Clause"
] | 3 | 2016-09-01T21:13:54.000Z | 2020-03-23T15:45:32.000Z | WaveBlocksND/GradientLinearCombinationHAWP.py | raoulbq/WaveBlocksND | 225b5dd9b1af1998bd40b5f6467ee959292b6a83 | [
"BSD-3-Clause"
] | null | null | null | WaveBlocksND/GradientLinearCombinationHAWP.py | raoulbq/WaveBlocksND | 225b5dd9b1af1998bd40b5f6467ee959292b6a83 | [
"BSD-3-Clause"
] | 6 | 2016-03-16T15:22:01.000Z | 2021-03-13T14:06:54.000Z | """The WaveBlocks Project
Compute the action of the gradient operator applied to a
linear combination of Hagedorn wavepackets.
@author: R. Bourquin
@copyright: Copyright (C) 2013, 2014 R. Bourquin
@license: Modified BSD License
"""
from numpy import squeeze
from WaveBlocksND.Gradient import Gradient
from WaveBlocksND.GradientHAWP import GradientHAWP
from WaveBlocksND.LinearCombinationOfHAWPs import LinearCombinationOfHAWPs
__all__ = ["GradientLinearCombinationHAWP"]
class GradientLinearCombinationHAWP(Gradient):
r"""This class implements the computation of the action of the
gradient operator :math:`-i \varepsilon^2 \nabla_x` applied to
a linear combination :math:`\Upsilon` of Hagedorn wavepackets :math:`\Psi`.
"""
def __init__(self):
r"""
"""
pass
# TODO: Find a more efficient way to compute gradients
def apply_gradient(self, lincomb, component=None):
r"""Compute the effect of the gradient operator :math:`-i \varepsilon^2 \nabla_x`
on the linear combination :math:`\Upsilon` of Hagedorn wavepackets :math:`\Psi`.
:param lincomb: The linear combination :math:`\Upsilon`.
:type lincomb: A :py:class:`LinearCombinationOfHAWPs` instance.
:param component: The index :math:`i` of the component :math:`\Phi_i`.
:type component: Integer or ``None``.
:return: One linear combination :math:`\Upsilon_d` containing the gradients
for the component :math:`\partial_{x_d}` for each space dimension
component :math:`d = 1, \ldots, D`.
"""
D = lincomb.get_dimension()
N = lincomb.get_number_components()
J = lincomb.get_number_packets()
Cj = squeeze(lincomb.get_coefficients())
eps = lincomb.get_eps()
G = GradientHAWP()
new_lincombs = [LinearCombinationOfHAWPs(D, N, eps) for d in range(D)]
# Handle each wavepacket individually
for j in range(J):
packet = lincomb.get_wavepacket(j)
grads = G.apply_gradient(packet, component=component)
for d, grad in enumerate(grads):
new_lincombs[d].add_wavepacket(grad, Cj[j])
return new_lincombs
| 34.875 | 89 | 0.672939 |
from numpy import squeeze
from WaveBlocksND.Gradient import Gradient
from WaveBlocksND.GradientHAWP import GradientHAWP
from WaveBlocksND.LinearCombinationOfHAWPs import LinearCombinationOfHAWPs
__all__ = ["GradientLinearCombinationHAWP"]
class GradientLinearCombinationHAWP(Gradient):
def __init__(self):
pass
def apply_gradient(self, lincomb, component=None):
D = lincomb.get_dimension()
N = lincomb.get_number_components()
J = lincomb.get_number_packets()
Cj = squeeze(lincomb.get_coefficients())
eps = lincomb.get_eps()
G = GradientHAWP()
new_lincombs = [LinearCombinationOfHAWPs(D, N, eps) for d in range(D)]
for j in range(J):
packet = lincomb.get_wavepacket(j)
grads = G.apply_gradient(packet, component=component)
for d, grad in enumerate(grads):
new_lincombs[d].add_wavepacket(grad, Cj[j])
return new_lincombs
| true | true |
1c2b5cddeed73856f0a5b51097432c841f9523fa | 1,846 | py | Python | x20bf/depends/git/git/ext/gitdb/gitdb/test/db/test_git.py | bitkarrot/x20bf | cf61146fcb9aadfb4b6d6e2a45bf4ac7a3217345 | [
"Apache-2.0"
] | 4 | 2022-02-20T07:25:43.000Z | 2022-03-01T21:15:40.000Z | x20bf/depends/git/git/ext/gitdb/gitdb/test/db/test_git.py | bitkarrot/x20bf | cf61146fcb9aadfb4b6d6e2a45bf4ac7a3217345 | [
"Apache-2.0"
] | 8 | 2022-02-26T15:20:47.000Z | 2022-03-09T03:19:21.000Z | x20bf/depends/git/git/ext/gitdb/gitdb/test/db/test_git.py | bitkarrot/x20bf | cf61146fcb9aadfb4b6d6e2a45bf4ac7a3217345 | [
"Apache-2.0"
] | 2 | 2022-02-21T04:25:55.000Z | 2022-02-22T22:50:42.000Z | # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
#
# This module is part of GitDB and is released under
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
import os
from gitdb.base import OInfo, OStream
from gitdb.db import GitDB
from gitdb.exc import BadObject
from gitdb.test.db.lib import TestDBBase, with_rw_directory
from gitdb.util import bin_to_hex
class TestGitDB(TestDBBase):
def test_reading(self):
gdb = GitDB(os.path.join(self.gitrepopath, "objects"))
# we have packs and loose objects, alternates doesn't necessarily exist
assert 1 < len(gdb.databases()) < 4
# access should be possible
gitdb_sha = next(gdb.sha_iter())
assert isinstance(gdb.info(gitdb_sha), OInfo)
assert isinstance(gdb.stream(gitdb_sha), OStream)
ni = 50
assert gdb.size() >= ni
sha_list = list(gdb.sha_iter())
assert len(sha_list) == gdb.size()
sha_list = sha_list[:ni] # speed up tests ...
# This is actually a test for compound functionality, but it doesn't
# have a separate test module
# test partial shas
# this one as uneven and quite short
gitdb_sha_hex = bin_to_hex(gitdb_sha)
assert gdb.partial_to_complete_sha_hex(gitdb_sha_hex[:5]) == gitdb_sha
# mix even/uneven hexshas
for i, binsha in enumerate(sha_list):
assert (
gdb.partial_to_complete_sha_hex(bin_to_hex(binsha)[: 8 - (i % 2)])
== binsha
)
# END for each sha
self.assertRaises(BadObject, gdb.partial_to_complete_sha_hex, "0000")
@with_rw_directory
def test_writing(self, path):
gdb = GitDB(path)
# its possible to write objects
self._assert_object_writing(gdb)
| 34.185185 | 82 | 0.656555 |
import os
from gitdb.base import OInfo, OStream
from gitdb.db import GitDB
from gitdb.exc import BadObject
from gitdb.test.db.lib import TestDBBase, with_rw_directory
from gitdb.util import bin_to_hex
class TestGitDB(TestDBBase):
def test_reading(self):
gdb = GitDB(os.path.join(self.gitrepopath, "objects"))
assert 1 < len(gdb.databases()) < 4
# access should be possible
gitdb_sha = next(gdb.sha_iter())
assert isinstance(gdb.info(gitdb_sha), OInfo)
assert isinstance(gdb.stream(gitdb_sha), OStream)
ni = 50
assert gdb.size() >= ni
sha_list = list(gdb.sha_iter())
assert len(sha_list) == gdb.size()
sha_list = sha_list[:ni] # speed up tests ...
# This is actually a test for compound functionality, but it doesn't
gitdb_sha_hex = bin_to_hex(gitdb_sha)
assert gdb.partial_to_complete_sha_hex(gitdb_sha_hex[:5]) == gitdb_sha
for i, binsha in enumerate(sha_list):
assert (
gdb.partial_to_complete_sha_hex(bin_to_hex(binsha)[: 8 - (i % 2)])
== binsha
)
self.assertRaises(BadObject, gdb.partial_to_complete_sha_hex, "0000")
@with_rw_directory
def test_writing(self, path):
gdb = GitDB(path)
self._assert_object_writing(gdb)
| true | true |
1c2b5cf477d0d0e802f44eb132aa3ae05d93d7b2 | 2,199 | py | Python | app/browser_action.py | I-s-23/selenium-docker-env | 5ee0a2f3a6ca8be90d4cfb3cfcdea1fde3cf07df | [
"MIT"
] | null | null | null | app/browser_action.py | I-s-23/selenium-docker-env | 5ee0a2f3a6ca8be90d4cfb3cfcdea1fde3cf07df | [
"MIT"
] | null | null | null | app/browser_action.py | I-s-23/selenium-docker-env | 5ee0a2f3a6ca8be90d4cfb3cfcdea1fde3cf07df | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import annotations
import sys
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from webdriver_manager.chrome import ChromeDriverManager
from pyvirtualdisplay import Display
class Chrome:
def preparation(self, headless: bool, set_size=(0, 0)):
"""SeleniumのChromeWebDruverの準備
Args:
headless (bool): ヘッドレスでブラウザ起動を行う場合True
Returns:
[type]: 必要な設定の入ったChromeのWebDriverを返却
"""
display = (
Display(visible=True, size=(800, 600))
if headless == True
else Display(
visible=True,
size=(1920, 2080) if set_size == (0, 0) else set_size,
backend="xvfb",
use_xauth=True,
)
)
display.start()
options = webdriver.ChromeOptions()
options.add_argument("--headless") if headless == True else options.add_argument(
"--window-size=1920x1080"
)
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage") # <=これを追加
options.add_argument("--disable-gpu") # ヘッドレスモードで起動するときに必要
options.add_argument("--lang=ja-JP")
options.add_experimental_option(
"prefs",
{
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing.enabled": True,
},
)
# ブラウザを開く(pathは、webdriverをインストールした場所に設定してください。)
return webdriver.Chrome(ChromeDriverManager().install(), options=options), display
def open_run_task(self, function, headless: bool, args1=None):
"""ブラウザの自動操作。引数の関数を実行。エラーハンドリングなど"""
driver, display = self.preparation(headless)
try:
function(driver) if args1 is None else function(driver, args1)
except TimeoutException:
print("Timeout Error", sys.exc_info()[0])
except:
print("Unexpected error:", sys.exc_info()[0])
raise
finally:
driver.close()
driver.quit()
display.stop()
| 30.541667 | 90 | 0.58754 |
from __future__ import annotations
import sys
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from webdriver_manager.chrome import ChromeDriverManager
from pyvirtualdisplay import Display
class Chrome:
def preparation(self, headless: bool, set_size=(0, 0)):
display = (
Display(visible=True, size=(800, 600))
if headless == True
else Display(
visible=True,
size=(1920, 2080) if set_size == (0, 0) else set_size,
backend="xvfb",
use_xauth=True,
)
)
display.start()
options = webdriver.ChromeOptions()
options.add_argument("--headless") if headless == True else options.add_argument(
"--window-size=1920x1080"
)
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--disable-gpu")
options.add_argument("--lang=ja-JP")
options.add_experimental_option(
"prefs",
{
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing.enabled": True,
},
)
return webdriver.Chrome(ChromeDriverManager().install(), options=options), display
def open_run_task(self, function, headless: bool, args1=None):
driver, display = self.preparation(headless)
try:
function(driver) if args1 is None else function(driver, args1)
except TimeoutException:
print("Timeout Error", sys.exc_info()[0])
except:
print("Unexpected error:", sys.exc_info()[0])
raise
finally:
driver.close()
driver.quit()
display.stop()
| true | true |
1c2b5d1f0b9407169b2ba7a7623ba0fe3a0ada62 | 715 | py | Python | setup.py | sevashasla/TowerDefence | 73625d88cdb70d4c026d6f452604d193bc32c127 | [
"MIT"
] | null | null | null | setup.py | sevashasla/TowerDefence | 73625d88cdb70d4c026d6f452604d193bc32c127 | [
"MIT"
] | null | null | null | setup.py | sevashasla/TowerDefence | 73625d88cdb70d4c026d6f452604d193bc32c127 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="TowerDefence",
version="0.0.6",
author="ArtemyBobkov & sevashasla",
url="https://github.com/sevashasla/TowerDefence",
long_description=read("README.md"),
description="simple Tower-Defence game",
packages=find_packages(where="src"),
package_dir={'': 'src'},
package_data={
"TowerDefence": ["Assets/*", "Data/*"],
},
install_requires=[
"numpy>=1.17.4",
"pygame>=1.9.6"
],
entry_points={
'console_scripts': [
'TowerDefence=TowerDefence:app',
],
},
)
| 22.34375 | 70 | 0.604196 | from setuptools import setup, find_packages
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="TowerDefence",
version="0.0.6",
author="ArtemyBobkov & sevashasla",
url="https://github.com/sevashasla/TowerDefence",
long_description=read("README.md"),
description="simple Tower-Defence game",
packages=find_packages(where="src"),
package_dir={'': 'src'},
package_data={
"TowerDefence": ["Assets/*", "Data/*"],
},
install_requires=[
"numpy>=1.17.4",
"pygame>=1.9.6"
],
entry_points={
'console_scripts': [
'TowerDefence=TowerDefence:app',
],
},
)
| true | true |
1c2b5d87f190678bdff399a2bcdc29f35b3abe34 | 709 | py | Python | mooringlicensing/migrations/0267_auto_20211007_1204.py | mintcoding/mooringlicensing | aac8cba1c84834b834a702c15b758121aeae0de7 | [
"Apache-2.0"
] | null | null | null | mooringlicensing/migrations/0267_auto_20211007_1204.py | mintcoding/mooringlicensing | aac8cba1c84834b834a702c15b758121aeae0de7 | [
"Apache-2.0"
] | null | null | null | mooringlicensing/migrations/0267_auto_20211007_1204.py | mintcoding/mooringlicensing | aac8cba1c84834b834a702c15b758121aeae0de7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-10-07 04:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mooringlicensing', '0266_remove_applicationfee_fee_items'),
]
operations = [
migrations.RemoveField(
model_name='applicationfee',
name='fee_items_for_aa',
),
migrations.AddField(
model_name='applicationfee',
name='fee_items',
field=models.ManyToManyField(related_name='application_fees', through='mooringlicensing.FeeItemApplicationFee', to='mooringlicensing.FeeItem'),
),
]
| 28.36 | 155 | 0.657264 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mooringlicensing', '0266_remove_applicationfee_fee_items'),
]
operations = [
migrations.RemoveField(
model_name='applicationfee',
name='fee_items_for_aa',
),
migrations.AddField(
model_name='applicationfee',
name='fee_items',
field=models.ManyToManyField(related_name='application_fees', through='mooringlicensing.FeeItemApplicationFee', to='mooringlicensing.FeeItem'),
),
]
| true | true |
1c2b5ec53d77460426ee69f984ffe6c23061e78a | 18,163 | py | Python | platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/dumbo/phys/Phys_connect.py | lmnotran/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 82 | 2016-06-29T17:24:43.000Z | 2021-04-16T06:49:17.000Z | platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/dumbo/phys/Phys_connect.py | lmnotran/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 6 | 2022-01-12T18:22:08.000Z | 2022-03-25T10:19:27.000Z | platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/dumbo/phys/Phys_connect.py | lmnotran/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 56 | 2016-08-02T10:50:50.000Z | 2021-07-19T08:57:34.000Z | from pyradioconfig.calculator_model_framework.interfaces.iphy import IPhy
from py_2_and_3_compatibility import *
class PHYS_connect(IPhy):
def Connect_base(self, phy, model):
phy.profile_inputs.baudrate_tol_ppm.value = 0
phy.profile_inputs.dsss_chipping_code.value = long(0)
phy.profile_inputs.dsss_len.value = 0
phy.profile_inputs.dsss_spreading_factor.value = 0
phy.profile_inputs.fsk_symbol_map.value = model.vars.fsk_symbol_map.var_enum.MAP0
phy.profile_inputs.modulation_type.value = model.vars.modulation_type.var_enum.FSK2
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.Gaussian
phy.profile_inputs.shaping_filter_param.value = 0.5
phy.profile_inputs.symbol_encoding.value = model.vars.symbol_encoding.var_enum.NRZ
phy.profile_inputs.xtal_frequency_hz.value = 38400000
phy.profile_inputs.diff_encoding_mode.value = model.vars.diff_encoding_mode.var_enum.DISABLED
phy.profile_inputs.crc_byte_endian.value = model.vars.crc_byte_endian.var_enum.MSB_FIRST
phy.profile_inputs.crc_bit_endian.value = model.vars.crc_bit_endian.var_enum.MSB_FIRST
phy.profile_inputs.preamble_pattern_len.value = 2
phy.profile_inputs.preamble_length.value = 32
phy.profile_inputs.preamble_pattern.value = 1
phy.profile_inputs.syncword_0.value = long(11732)
phy.profile_inputs.syncword_1.value = long(0)
phy.profile_inputs.syncword_length.value = 16
phy.profile_inputs.white_poly.value = model.vars.white_poly.var_enum.PN9
def PHY_Connect_902MHz_2GFSK_200kbps(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.Connect, 'US FCC 902, Brazil 902', readable_name="Connect 902MHz 2GFSK 200kbps")
self.Connect_base(phy, model)
phy.profile_inputs.agc_period.value = 0
phy.profile_inputs.base_frequency_hz.value = long(902000000)
phy.profile_inputs.bitrate.value = 200000
phy.profile_inputs.deviation.value = 50000
phy.profile_inputs.channel_spacing_hz.value = 400000
phy.profile_inputs.rx_xtal_error_ppm.value = 20
phy.profile_inputs.symbols_in_timing_window.value = 14
phy.profile_inputs.tx_xtal_error_ppm.value = 20
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.Gaussian
phy.profile_inputs.frequency_comp_mode.value = model.vars.frequency_comp_mode.var_enum.INTERNAL_LOCK_AT_PREAMBLE_DETECT
def PHY_Connect_434MHz_2GFSK_200kbps(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.Connect, 'US FCC 434', readable_name="Connect 434MHz 2GFSK 200kbps")
self.Connect_base(phy, model)
phy.profile_inputs.agc_period.value = 0
phy.profile_inputs.bandwidth_hz.value = 400000
phy.profile_inputs.base_frequency_hz.value = long(434000000)
phy.profile_inputs.bitrate.value = 200000
phy.profile_inputs.deviation.value = 100000
phy.profile_inputs.channel_spacing_hz.value = 500000
phy.profile_inputs.rx_xtal_error_ppm.value = 20
phy.profile_inputs.symbols_in_timing_window.value = 14
phy.profile_inputs.tx_xtal_error_ppm.value = 20
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.Gaussian
phy.profile_inputs.frequency_comp_mode.value = model.vars.frequency_comp_mode.var_enum.INTERNAL_LOCK_AT_PREAMBLE_DETECT
def PHY_Connect_863MHz_2GFSK_100kbps(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.Connect, 'Europe 868', readable_name="Connect 863MHz 2GFSK 100kbps")
self.Connect_base(phy, model)
phy.profile_inputs.agc_period.value = 0
phy.profile_inputs.bandwidth_hz.value = 200000
phy.profile_inputs.base_frequency_hz.value = long(863000000)
phy.profile_inputs.bitrate.value = 100000
phy.profile_inputs.deviation.value = 50000
phy.profile_inputs.channel_spacing_hz.value = 400000
phy.profile_inputs.rx_xtal_error_ppm.value = 20
phy.profile_inputs.symbols_in_timing_window.value = 14
phy.profile_inputs.tx_xtal_error_ppm.value = 20
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.Gaussian
phy.profile_inputs.frequency_comp_mode.value = model.vars.frequency_comp_mode.var_enum.INTERNAL_LOCK_AT_PREAMBLE_DETECT
def PHY_Connect_169MHz_2GFSK_4p8kbps(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.Connect, 'Europe 169', readable_name="Connect 169MHz 2GFSK 4.8kbps")
self.Connect_base(phy, model)
phy.profile_inputs.agc_period.value = 0
phy.profile_inputs.base_frequency_hz.value = long(169000000)
phy.profile_inputs.bitrate.value = 4800
phy.profile_inputs.deviation.value = 1200
phy.profile_inputs.channel_spacing_hz.value = 12500
phy.profile_inputs.number_of_timing_windows.value = 2
phy.profile_inputs.rx_xtal_error_ppm.value = 7
phy.profile_inputs.symbols_in_timing_window.value = 6
phy.profile_inputs.timing_detection_threshold.value = 10
phy.profile_inputs.tx_xtal_error_ppm.value = 7
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.Gaussian
phy.profile_inputs.frequency_comp_mode.value = model.vars.frequency_comp_mode.var_enum.INTERNAL_LOCK_AT_PREAMBLE_DETECT
def PHY_Connect_490MHz_2GFSK_10kbps(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.Connect, 'China 490', readable_name="Connect 490MHz 2GFSK 10kbps")
self.Connect_base(phy, model)
phy.profile_inputs.agc_period.value = 0
phy.profile_inputs.base_frequency_hz.value = long(490000000)
phy.profile_inputs.bitrate.value = 10000
phy.profile_inputs.deviation.value = 25000
phy.profile_inputs.channel_spacing_hz.value = 200000
phy.profile_inputs.rx_xtal_error_ppm.value = 20
phy.profile_inputs.symbols_in_timing_window.value = 14
phy.profile_inputs.tx_xtal_error_ppm.value = 20
phy.profile_inputs.frequency_comp_mode.value = model.vars.frequency_comp_mode.var_enum.INTERNAL_LOCK_AT_PREAMBLE_DETECT
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.Gaussian
def PHY_Connect_920MHz_2GFSK_100kbps(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.Connect, 'Japan 915', readable_name="Connect 920MHz 2GFSK 100kbps")
self.Connect_base(phy, model)
phy.profile_inputs.agc_period.value = 0
phy.profile_inputs.bandwidth_hz.value = 198000
phy.profile_inputs.base_frequency_hz.value = long(920000000)
phy.profile_inputs.bitrate.value = 100000
phy.profile_inputs.deviation.value = 50000
phy.profile_inputs.channel_spacing_hz.value = 400000
phy.profile_inputs.rx_xtal_error_ppm.value = 0
phy.profile_inputs.symbols_in_timing_window.value = 14
phy.profile_inputs.tx_xtal_error_ppm.value = 0
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.Gaussian
phy.profile_inputs.frequency_comp_mode.value = model.vars.frequency_comp_mode.var_enum.INTERNAL_LOCK_AT_PREAMBLE_DETECT
def PHY_Connect_424MHz_2GFSK_4p8kbps(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.Connect, 'Korea 424', readable_name="Connect 424MHz 2GFSK 4.8kbps")
self.Connect_base(phy, model)
phy.profile_inputs.agc_period.value = 0
phy.profile_inputs.bandwidth_hz.value = 12000
phy.profile_inputs.base_frequency_hz.value = long(424700000)
phy.profile_inputs.bitrate.value = 4800
phy.profile_inputs.deviation.value = 2400
phy.profile_inputs.channel_spacing_hz.value = 12500
phy.profile_inputs.rx_xtal_error_ppm.value = 20
phy.profile_inputs.symbols_in_timing_window.value = 14
phy.profile_inputs.tx_xtal_error_ppm.value = 20
phy.profile_inputs.freq_offset_hz.value = 1450
phy.profile_inputs.frequency_comp_mode.value = model.vars.frequency_comp_mode.var_enum.INTERNAL_LOCK_AT_PREAMBLE_DETECT
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.Gaussian
def PHY_Connect_447MHz_2GFSK_4p8kbps(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.Connect, 'Korea 447', readable_name="Connect 447MHz 2GFSK 4.8kbps")
self.Connect_base(phy, model)
phy.profile_inputs.agc_period.value = 0
phy.profile_inputs.bandwidth_hz.value = 12000
phy.profile_inputs.base_frequency_hz.value = long(447000000)
phy.profile_inputs.bitrate.value = 4800
phy.profile_inputs.deviation.value = 2400
phy.profile_inputs.channel_spacing_hz.value = 12500
phy.profile_inputs.rx_xtal_error_ppm.value = 20
phy.profile_inputs.symbols_in_timing_window.value = 14
phy.profile_inputs.tx_xtal_error_ppm.value = 20
phy.profile_inputs.freq_offset_hz.value = 1450
phy.profile_inputs.frequency_comp_mode.value = model.vars.frequency_comp_mode.var_enum.INTERNAL_LOCK_AT_PREAMBLE_DETECT
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.Gaussian
def PHY_Connect_917MHz_2GFSK_4p8kbps(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.Connect, 'Korea 915', readable_name="Connect 917MHz 2GFSK 4.8kbps")
self.Connect_base(phy, model)
phy.profile_inputs.agc_period.value = 0
phy.profile_inputs.base_frequency_hz.value = long(917100000)
phy.profile_inputs.bitrate.value = 4800
phy.profile_inputs.deviation.value = 2400
phy.profile_inputs.channel_spacing_hz.value = 200000
phy.profile_inputs.number_of_timing_windows.value = 10
phy.profile_inputs.rx_xtal_error_ppm.value = 2
phy.profile_inputs.symbols_in_timing_window.value = 1
phy.profile_inputs.tx_xtal_error_ppm.value = 3
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.NONE
phy.profile_inputs.frequency_comp_mode.value = model.vars.frequency_comp_mode.var_enum.INTERNAL_LOCK_AT_PREAMBLE_DETECT
def PHY_Connect_915mhz_oqpsk_800kcps_100kbps(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.Connect, 'DSSS 100', readable_name="Connect 915MHz OQPSK 800kcps 100kbps")
self.Connect_base(phy, model)
phy.profile_inputs.agc_hysteresis.value = 0
phy.profile_inputs.agc_power_target.value = -6
phy.profile_inputs.agc_settling_delay.value = 40
phy.profile_inputs.base_frequency_hz.value = long(902000000)
phy.profile_inputs.baudrate_tol_ppm.value = 4000
phy.profile_inputs.bitrate.value = 100000
phy.profile_inputs.channel_spacing_hz.value = 2000000
phy.profile_inputs.crc_bit_endian.value = model.vars.crc_bit_endian.var_enum.LSB_FIRST
phy.profile_inputs.crc_byte_endian.value = model.vars.crc_byte_endian.var_enum.LSB_FIRST
phy.profile_inputs.deviation.value = 200000
phy.profile_inputs.diff_encoding_mode.value = model.vars.diff_encoding_mode.var_enum.DISABLED
phy.profile_inputs.dsss_chipping_code.value = long(1951056795)
phy.profile_inputs.dsss_len.value = 32
phy.profile_inputs.dsss_spreading_factor.value = 8
phy.profile_inputs.fsk_symbol_map.value = model.vars.fsk_symbol_map.var_enum.MAP0
phy.profile_inputs.manchester_mapping.value = model.vars.manchester_mapping.var_enum.Default
phy.profile_inputs.modulation_type.value = model.vars.modulation_type.var_enum.OQPSK
phy.profile_inputs.pll_bandwidth_tx.value = model.vars.pll_bandwidth_tx.var_enum.BW_2520KHz
phy.profile_inputs.preamble_length.value = 32
phy.profile_inputs.preamble_pattern.value = 0
phy.profile_inputs.preamble_pattern_len.value = 4
phy.profile_inputs.rssi_period.value = 8
phy.profile_inputs.rx_xtal_error_ppm.value = 0
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.NONE
phy.profile_inputs.shaping_filter_param.value = 0.5
phy.profile_inputs.symbol_encoding.value = model.vars.symbol_encoding.var_enum.DSSS
phy.profile_inputs.syncword_0.value = long(167)
phy.profile_inputs.syncword_1.value = long(0)
phy.profile_inputs.syncword_length.value = 8
phy.profile_inputs.timing_detection_threshold.value = 65
phy.profile_inputs.timing_sample_threshold.value = 0
phy.profile_inputs.tx_xtal_error_ppm.value = 0
phy.profile_inputs.white_poly.value = model.vars.white_poly.var_enum.NONE
phy.profile_inputs.xtal_frequency_hz.value = 38400000
def PHY_Connect_915mhz_oqpsk_2Mcps_250kbps(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.Connect, 'DSSS 250', readable_name="Connect 915MHz OQPSK 2Mcps 250kbps")
self.Connect_base(phy, model)
phy.profile_inputs.xtal_frequency_hz.value = 38400000
phy.profile_inputs.rx_xtal_error_ppm.value = 0
phy.profile_inputs.tx_xtal_error_ppm.value = 0
phy.profile_inputs.syncword_0.value = long(167)
phy.profile_inputs.syncword_1.value = long(0)
phy.profile_inputs.syncword_tx_skip.value = False
phy.profile_inputs.syncword_length.value = 8
phy.profile_inputs.preamble_pattern_len.value = 4
phy.profile_inputs.preamble_length.value = 32
phy.profile_inputs.preamble_pattern.value = 0
phy.profile_inputs.modulation_type.value = model.vars.modulation_type.var_enum.OQPSK
phy.profile_inputs.deviation.value = 500000
phy.profile_inputs.channel_spacing_hz.value = 2000000
phy.profile_inputs.bitrate.value = 250000
phy.profile_inputs.baudrate_tol_ppm.value = 4000
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.NONE
phy.profile_inputs.base_frequency_hz.value = long(902000000)
phy.profile_inputs.fsk_symbol_map.value = model.vars.fsk_symbol_map.var_enum.MAP0
phy.profile_inputs.diff_encoding_mode.value = model.vars.diff_encoding_mode.var_enum.DISABLED
phy.profile_inputs.shaping_filter_param.value = 0.5
phy.profile_inputs.symbol_encoding.value = model.vars.symbol_encoding.var_enum.DSSS
phy.profile_inputs.manchester_mapping.value = model.vars.manchester_mapping.var_enum.Default
phy.profile_inputs.dsss_chipping_code.value = long(1951056795)
phy.profile_inputs.dsss_len.value = 32
phy.profile_inputs.dsss_spreading_factor.value = 8
phy.profile_inputs.asynchronous_rx_enable.value = False
phy.profile_inputs.crc_byte_endian.value = model.vars.crc_byte_endian.var_enum.LSB_FIRST
phy.profile_inputs.crc_bit_endian.value = model.vars.crc_bit_endian.var_enum.LSB_FIRST
phy.profile_inputs.white_poly.value = model.vars.white_poly.var_enum.NONE
phy.profile_inputs.timing_detection_threshold.value = 65
phy.profile_inputs.timing_sample_threshold.value = 0
phy.profile_inputs.pll_bandwidth_tx.value = model.vars.pll_bandwidth_tx.var_enum.BW_2520KHz
phy.profile_inputs.agc_power_target.value = -6
phy.profile_inputs.rssi_period.value = 8
phy.profile_inputs.agc_hysteresis.value = 0
phy.profile_inputs.agc_settling_delay.value = 40
def PHY_Connect_2_4GHz_OQPSK_2Mcps_250kbps(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.Connect, '2.4GHz OQPSK 2Mcps 250kbps', readable_name="Connect 2.4GHz OQPSK 2Mcps 250kbps")
self.Connect_base(phy, model)
phy.profile_inputs.base_frequency_hz.value = long(2405000000)
phy.profile_inputs.agc_hysteresis.value = 0
phy.profile_inputs.agc_power_target.value = -6
phy.profile_inputs.agc_settling_delay.value = 40
phy.profile_inputs.asynchronous_rx_enable.value = False
phy.profile_inputs.baudrate_tol_ppm.value = 4000
phy.profile_inputs.bitrate.value = 250000
phy.profile_inputs.channel_spacing_hz.value = 5000000
phy.profile_inputs.crc_bit_endian.value = model.vars.crc_bit_endian.var_enum.MSB_FIRST
phy.profile_inputs.crc_byte_endian.value = model.vars.crc_byte_endian.var_enum.MSB_FIRST
phy.profile_inputs.deviation.value = 500000
phy.profile_inputs.diff_encoding_mode.value = model.vars.diff_encoding_mode.var_enum.DISABLED
phy.profile_inputs.dsss_chipping_code.value = long(1951056795)
phy.profile_inputs.dsss_len.value = 32
phy.profile_inputs.dsss_spreading_factor.value = 8
phy.profile_inputs.fsk_symbol_map.value = model.vars.fsk_symbol_map.var_enum.MAP0
phy.profile_inputs.manchester_mapping.value = model.vars.manchester_mapping.var_enum.Default
phy.profile_inputs.modulation_type.value = model.vars.modulation_type.var_enum.OQPSK
phy.profile_inputs.pll_bandwidth_tx.value = model.vars.pll_bandwidth_tx.var_enum.BW_2520KHz
phy.profile_inputs.preamble_length.value = 32
phy.profile_inputs.preamble_pattern.value = 0
phy.profile_inputs.preamble_pattern_len.value = 4
phy.profile_inputs.rssi_period.value = 8
phy.profile_inputs.rx_xtal_error_ppm.value = 0
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.Custom_OQPSK
phy.profile_inputs.symbol_encoding.value = model.vars.symbol_encoding.var_enum.DSSS
phy.profile_inputs.syncword_0.value = long(229)
phy.profile_inputs.syncword_1.value = long(0)
phy.profile_inputs.syncword_length.value = 8
phy.profile_inputs.timing_detection_threshold.value = 65
phy.profile_inputs.timing_resync_period.value = 2
phy.profile_inputs.timing_sample_threshold.value = 0
phy.profile_inputs.tx_xtal_error_ppm.value = 0
phy.profile_inputs.white_poly.value = model.vars.white_poly.var_enum.NONE
phy.profile_inputs.xtal_frequency_hz.value = 38400000
| 60.949664 | 140 | 0.754721 | from pyradioconfig.calculator_model_framework.interfaces.iphy import IPhy
from py_2_and_3_compatibility import *
class PHYS_connect(IPhy):
def Connect_base(self, phy, model):
phy.profile_inputs.baudrate_tol_ppm.value = 0
phy.profile_inputs.dsss_chipping_code.value = long(0)
phy.profile_inputs.dsss_len.value = 0
phy.profile_inputs.dsss_spreading_factor.value = 0
phy.profile_inputs.fsk_symbol_map.value = model.vars.fsk_symbol_map.var_enum.MAP0
phy.profile_inputs.modulation_type.value = model.vars.modulation_type.var_enum.FSK2
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.Gaussian
phy.profile_inputs.shaping_filter_param.value = 0.5
phy.profile_inputs.symbol_encoding.value = model.vars.symbol_encoding.var_enum.NRZ
phy.profile_inputs.xtal_frequency_hz.value = 38400000
phy.profile_inputs.diff_encoding_mode.value = model.vars.diff_encoding_mode.var_enum.DISABLED
phy.profile_inputs.crc_byte_endian.value = model.vars.crc_byte_endian.var_enum.MSB_FIRST
phy.profile_inputs.crc_bit_endian.value = model.vars.crc_bit_endian.var_enum.MSB_FIRST
phy.profile_inputs.preamble_pattern_len.value = 2
phy.profile_inputs.preamble_length.value = 32
phy.profile_inputs.preamble_pattern.value = 1
phy.profile_inputs.syncword_0.value = long(11732)
phy.profile_inputs.syncword_1.value = long(0)
phy.profile_inputs.syncword_length.value = 16
phy.profile_inputs.white_poly.value = model.vars.white_poly.var_enum.PN9
def PHY_Connect_902MHz_2GFSK_200kbps(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.Connect, 'US FCC 902, Brazil 902', readable_name="Connect 902MHz 2GFSK 200kbps")
self.Connect_base(phy, model)
phy.profile_inputs.agc_period.value = 0
phy.profile_inputs.base_frequency_hz.value = long(902000000)
phy.profile_inputs.bitrate.value = 200000
phy.profile_inputs.deviation.value = 50000
phy.profile_inputs.channel_spacing_hz.value = 400000
phy.profile_inputs.rx_xtal_error_ppm.value = 20
phy.profile_inputs.symbols_in_timing_window.value = 14
phy.profile_inputs.tx_xtal_error_ppm.value = 20
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.Gaussian
phy.profile_inputs.frequency_comp_mode.value = model.vars.frequency_comp_mode.var_enum.INTERNAL_LOCK_AT_PREAMBLE_DETECT
def PHY_Connect_434MHz_2GFSK_200kbps(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.Connect, 'US FCC 434', readable_name="Connect 434MHz 2GFSK 200kbps")
self.Connect_base(phy, model)
phy.profile_inputs.agc_period.value = 0
phy.profile_inputs.bandwidth_hz.value = 400000
phy.profile_inputs.base_frequency_hz.value = long(434000000)
phy.profile_inputs.bitrate.value = 200000
phy.profile_inputs.deviation.value = 100000
phy.profile_inputs.channel_spacing_hz.value = 500000
phy.profile_inputs.rx_xtal_error_ppm.value = 20
phy.profile_inputs.symbols_in_timing_window.value = 14
phy.profile_inputs.tx_xtal_error_ppm.value = 20
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.Gaussian
phy.profile_inputs.frequency_comp_mode.value = model.vars.frequency_comp_mode.var_enum.INTERNAL_LOCK_AT_PREAMBLE_DETECT
def PHY_Connect_863MHz_2GFSK_100kbps(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.Connect, 'Europe 868', readable_name="Connect 863MHz 2GFSK 100kbps")
self.Connect_base(phy, model)
phy.profile_inputs.agc_period.value = 0
phy.profile_inputs.bandwidth_hz.value = 200000
phy.profile_inputs.base_frequency_hz.value = long(863000000)
phy.profile_inputs.bitrate.value = 100000
phy.profile_inputs.deviation.value = 50000
phy.profile_inputs.channel_spacing_hz.value = 400000
phy.profile_inputs.rx_xtal_error_ppm.value = 20
phy.profile_inputs.symbols_in_timing_window.value = 14
phy.profile_inputs.tx_xtal_error_ppm.value = 20
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.Gaussian
phy.profile_inputs.frequency_comp_mode.value = model.vars.frequency_comp_mode.var_enum.INTERNAL_LOCK_AT_PREAMBLE_DETECT
def PHY_Connect_169MHz_2GFSK_4p8kbps(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.Connect, 'Europe 169', readable_name="Connect 169MHz 2GFSK 4.8kbps")
self.Connect_base(phy, model)
phy.profile_inputs.agc_period.value = 0
phy.profile_inputs.base_frequency_hz.value = long(169000000)
phy.profile_inputs.bitrate.value = 4800
phy.profile_inputs.deviation.value = 1200
phy.profile_inputs.channel_spacing_hz.value = 12500
phy.profile_inputs.number_of_timing_windows.value = 2
phy.profile_inputs.rx_xtal_error_ppm.value = 7
phy.profile_inputs.symbols_in_timing_window.value = 6
phy.profile_inputs.timing_detection_threshold.value = 10
phy.profile_inputs.tx_xtal_error_ppm.value = 7
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.Gaussian
phy.profile_inputs.frequency_comp_mode.value = model.vars.frequency_comp_mode.var_enum.INTERNAL_LOCK_AT_PREAMBLE_DETECT
def PHY_Connect_490MHz_2GFSK_10kbps(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.Connect, 'China 490', readable_name="Connect 490MHz 2GFSK 10kbps")
self.Connect_base(phy, model)
phy.profile_inputs.agc_period.value = 0
phy.profile_inputs.base_frequency_hz.value = long(490000000)
phy.profile_inputs.bitrate.value = 10000
phy.profile_inputs.deviation.value = 25000
phy.profile_inputs.channel_spacing_hz.value = 200000
phy.profile_inputs.rx_xtal_error_ppm.value = 20
phy.profile_inputs.symbols_in_timing_window.value = 14
phy.profile_inputs.tx_xtal_error_ppm.value = 20
phy.profile_inputs.frequency_comp_mode.value = model.vars.frequency_comp_mode.var_enum.INTERNAL_LOCK_AT_PREAMBLE_DETECT
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.Gaussian
def PHY_Connect_920MHz_2GFSK_100kbps(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.Connect, 'Japan 915', readable_name="Connect 920MHz 2GFSK 100kbps")
self.Connect_base(phy, model)
phy.profile_inputs.agc_period.value = 0
phy.profile_inputs.bandwidth_hz.value = 198000
phy.profile_inputs.base_frequency_hz.value = long(920000000)
phy.profile_inputs.bitrate.value = 100000
phy.profile_inputs.deviation.value = 50000
phy.profile_inputs.channel_spacing_hz.value = 400000
phy.profile_inputs.rx_xtal_error_ppm.value = 0
phy.profile_inputs.symbols_in_timing_window.value = 14
phy.profile_inputs.tx_xtal_error_ppm.value = 0
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.Gaussian
phy.profile_inputs.frequency_comp_mode.value = model.vars.frequency_comp_mode.var_enum.INTERNAL_LOCK_AT_PREAMBLE_DETECT
def PHY_Connect_424MHz_2GFSK_4p8kbps(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.Connect, 'Korea 424', readable_name="Connect 424MHz 2GFSK 4.8kbps")
self.Connect_base(phy, model)
phy.profile_inputs.agc_period.value = 0
phy.profile_inputs.bandwidth_hz.value = 12000
phy.profile_inputs.base_frequency_hz.value = long(424700000)
phy.profile_inputs.bitrate.value = 4800
phy.profile_inputs.deviation.value = 2400
phy.profile_inputs.channel_spacing_hz.value = 12500
phy.profile_inputs.rx_xtal_error_ppm.value = 20
phy.profile_inputs.symbols_in_timing_window.value = 14
phy.profile_inputs.tx_xtal_error_ppm.value = 20
phy.profile_inputs.freq_offset_hz.value = 1450
phy.profile_inputs.frequency_comp_mode.value = model.vars.frequency_comp_mode.var_enum.INTERNAL_LOCK_AT_PREAMBLE_DETECT
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.Gaussian
def PHY_Connect_447MHz_2GFSK_4p8kbps(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.Connect, 'Korea 447', readable_name="Connect 447MHz 2GFSK 4.8kbps")
self.Connect_base(phy, model)
phy.profile_inputs.agc_period.value = 0
phy.profile_inputs.bandwidth_hz.value = 12000
phy.profile_inputs.base_frequency_hz.value = long(447000000)
phy.profile_inputs.bitrate.value = 4800
phy.profile_inputs.deviation.value = 2400
phy.profile_inputs.channel_spacing_hz.value = 12500
phy.profile_inputs.rx_xtal_error_ppm.value = 20
phy.profile_inputs.symbols_in_timing_window.value = 14
phy.profile_inputs.tx_xtal_error_ppm.value = 20
phy.profile_inputs.freq_offset_hz.value = 1450
phy.profile_inputs.frequency_comp_mode.value = model.vars.frequency_comp_mode.var_enum.INTERNAL_LOCK_AT_PREAMBLE_DETECT
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.Gaussian
def PHY_Connect_917MHz_2GFSK_4p8kbps(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.Connect, 'Korea 915', readable_name="Connect 917MHz 2GFSK 4.8kbps")
self.Connect_base(phy, model)
phy.profile_inputs.agc_period.value = 0
phy.profile_inputs.base_frequency_hz.value = long(917100000)
phy.profile_inputs.bitrate.value = 4800
phy.profile_inputs.deviation.value = 2400
phy.profile_inputs.channel_spacing_hz.value = 200000
phy.profile_inputs.number_of_timing_windows.value = 10
phy.profile_inputs.rx_xtal_error_ppm.value = 2
phy.profile_inputs.symbols_in_timing_window.value = 1
phy.profile_inputs.tx_xtal_error_ppm.value = 3
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.NONE
phy.profile_inputs.frequency_comp_mode.value = model.vars.frequency_comp_mode.var_enum.INTERNAL_LOCK_AT_PREAMBLE_DETECT
def PHY_Connect_915mhz_oqpsk_800kcps_100kbps(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.Connect, 'DSSS 100', readable_name="Connect 915MHz OQPSK 800kcps 100kbps")
self.Connect_base(phy, model)
phy.profile_inputs.agc_hysteresis.value = 0
phy.profile_inputs.agc_power_target.value = -6
phy.profile_inputs.agc_settling_delay.value = 40
phy.profile_inputs.base_frequency_hz.value = long(902000000)
phy.profile_inputs.baudrate_tol_ppm.value = 4000
phy.profile_inputs.bitrate.value = 100000
phy.profile_inputs.channel_spacing_hz.value = 2000000
phy.profile_inputs.crc_bit_endian.value = model.vars.crc_bit_endian.var_enum.LSB_FIRST
phy.profile_inputs.crc_byte_endian.value = model.vars.crc_byte_endian.var_enum.LSB_FIRST
phy.profile_inputs.deviation.value = 200000
phy.profile_inputs.diff_encoding_mode.value = model.vars.diff_encoding_mode.var_enum.DISABLED
phy.profile_inputs.dsss_chipping_code.value = long(1951056795)
phy.profile_inputs.dsss_len.value = 32
phy.profile_inputs.dsss_spreading_factor.value = 8
phy.profile_inputs.fsk_symbol_map.value = model.vars.fsk_symbol_map.var_enum.MAP0
phy.profile_inputs.manchester_mapping.value = model.vars.manchester_mapping.var_enum.Default
phy.profile_inputs.modulation_type.value = model.vars.modulation_type.var_enum.OQPSK
phy.profile_inputs.pll_bandwidth_tx.value = model.vars.pll_bandwidth_tx.var_enum.BW_2520KHz
phy.profile_inputs.preamble_length.value = 32
phy.profile_inputs.preamble_pattern.value = 0
phy.profile_inputs.preamble_pattern_len.value = 4
phy.profile_inputs.rssi_period.value = 8
phy.profile_inputs.rx_xtal_error_ppm.value = 0
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.NONE
phy.profile_inputs.shaping_filter_param.value = 0.5
phy.profile_inputs.symbol_encoding.value = model.vars.symbol_encoding.var_enum.DSSS
phy.profile_inputs.syncword_0.value = long(167)
phy.profile_inputs.syncword_1.value = long(0)
phy.profile_inputs.syncword_length.value = 8
phy.profile_inputs.timing_detection_threshold.value = 65
phy.profile_inputs.timing_sample_threshold.value = 0
phy.profile_inputs.tx_xtal_error_ppm.value = 0
phy.profile_inputs.white_poly.value = model.vars.white_poly.var_enum.NONE
phy.profile_inputs.xtal_frequency_hz.value = 38400000
def PHY_Connect_915mhz_oqpsk_2Mcps_250kbps(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.Connect, 'DSSS 250', readable_name="Connect 915MHz OQPSK 2Mcps 250kbps")
self.Connect_base(phy, model)
phy.profile_inputs.xtal_frequency_hz.value = 38400000
phy.profile_inputs.rx_xtal_error_ppm.value = 0
phy.profile_inputs.tx_xtal_error_ppm.value = 0
phy.profile_inputs.syncword_0.value = long(167)
phy.profile_inputs.syncword_1.value = long(0)
phy.profile_inputs.syncword_tx_skip.value = False
phy.profile_inputs.syncword_length.value = 8
phy.profile_inputs.preamble_pattern_len.value = 4
phy.profile_inputs.preamble_length.value = 32
phy.profile_inputs.preamble_pattern.value = 0
phy.profile_inputs.modulation_type.value = model.vars.modulation_type.var_enum.OQPSK
phy.profile_inputs.deviation.value = 500000
phy.profile_inputs.channel_spacing_hz.value = 2000000
phy.profile_inputs.bitrate.value = 250000
phy.profile_inputs.baudrate_tol_ppm.value = 4000
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.NONE
phy.profile_inputs.base_frequency_hz.value = long(902000000)
phy.profile_inputs.fsk_symbol_map.value = model.vars.fsk_symbol_map.var_enum.MAP0
phy.profile_inputs.diff_encoding_mode.value = model.vars.diff_encoding_mode.var_enum.DISABLED
phy.profile_inputs.shaping_filter_param.value = 0.5
phy.profile_inputs.symbol_encoding.value = model.vars.symbol_encoding.var_enum.DSSS
phy.profile_inputs.manchester_mapping.value = model.vars.manchester_mapping.var_enum.Default
phy.profile_inputs.dsss_chipping_code.value = long(1951056795)
phy.profile_inputs.dsss_len.value = 32
phy.profile_inputs.dsss_spreading_factor.value = 8
phy.profile_inputs.asynchronous_rx_enable.value = False
phy.profile_inputs.crc_byte_endian.value = model.vars.crc_byte_endian.var_enum.LSB_FIRST
phy.profile_inputs.crc_bit_endian.value = model.vars.crc_bit_endian.var_enum.LSB_FIRST
phy.profile_inputs.white_poly.value = model.vars.white_poly.var_enum.NONE
phy.profile_inputs.timing_detection_threshold.value = 65
phy.profile_inputs.timing_sample_threshold.value = 0
phy.profile_inputs.pll_bandwidth_tx.value = model.vars.pll_bandwidth_tx.var_enum.BW_2520KHz
phy.profile_inputs.agc_power_target.value = -6
phy.profile_inputs.rssi_period.value = 8
phy.profile_inputs.agc_hysteresis.value = 0
phy.profile_inputs.agc_settling_delay.value = 40
def PHY_Connect_2_4GHz_OQPSK_2Mcps_250kbps(self, model, phy_name=None):
phy = self._makePhy(model, model.profiles.Connect, '2.4GHz OQPSK 2Mcps 250kbps', readable_name="Connect 2.4GHz OQPSK 2Mcps 250kbps")
self.Connect_base(phy, model)
phy.profile_inputs.base_frequency_hz.value = long(2405000000)
phy.profile_inputs.agc_hysteresis.value = 0
phy.profile_inputs.agc_power_target.value = -6
phy.profile_inputs.agc_settling_delay.value = 40
phy.profile_inputs.asynchronous_rx_enable.value = False
phy.profile_inputs.baudrate_tol_ppm.value = 4000
phy.profile_inputs.bitrate.value = 250000
phy.profile_inputs.channel_spacing_hz.value = 5000000
phy.profile_inputs.crc_bit_endian.value = model.vars.crc_bit_endian.var_enum.MSB_FIRST
phy.profile_inputs.crc_byte_endian.value = model.vars.crc_byte_endian.var_enum.MSB_FIRST
phy.profile_inputs.deviation.value = 500000
phy.profile_inputs.diff_encoding_mode.value = model.vars.diff_encoding_mode.var_enum.DISABLED
phy.profile_inputs.dsss_chipping_code.value = long(1951056795)
phy.profile_inputs.dsss_len.value = 32
phy.profile_inputs.dsss_spreading_factor.value = 8
phy.profile_inputs.fsk_symbol_map.value = model.vars.fsk_symbol_map.var_enum.MAP0
phy.profile_inputs.manchester_mapping.value = model.vars.manchester_mapping.var_enum.Default
phy.profile_inputs.modulation_type.value = model.vars.modulation_type.var_enum.OQPSK
phy.profile_inputs.pll_bandwidth_tx.value = model.vars.pll_bandwidth_tx.var_enum.BW_2520KHz
phy.profile_inputs.preamble_length.value = 32
phy.profile_inputs.preamble_pattern.value = 0
phy.profile_inputs.preamble_pattern_len.value = 4
phy.profile_inputs.rssi_period.value = 8
phy.profile_inputs.rx_xtal_error_ppm.value = 0
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.Custom_OQPSK
phy.profile_inputs.symbol_encoding.value = model.vars.symbol_encoding.var_enum.DSSS
phy.profile_inputs.syncword_0.value = long(229)
phy.profile_inputs.syncword_1.value = long(0)
phy.profile_inputs.syncword_length.value = 8
phy.profile_inputs.timing_detection_threshold.value = 65
phy.profile_inputs.timing_resync_period.value = 2
phy.profile_inputs.timing_sample_threshold.value = 0
phy.profile_inputs.tx_xtal_error_ppm.value = 0
phy.profile_inputs.white_poly.value = model.vars.white_poly.var_enum.NONE
phy.profile_inputs.xtal_frequency_hz.value = 38400000
| true | true |
1c2b5efb56224d81baa5d6537238741d384c72c3 | 470 | py | Python | src/111.minimum-depth-of-binary-tree.py | hippieZhou/The-Way-Of-LeetCode | c63d777e01413726b6214c616c20c61f8e5b330b | [
"MIT"
] | null | null | null | src/111.minimum-depth-of-binary-tree.py | hippieZhou/The-Way-Of-LeetCode | c63d777e01413726b6214c616c20c61f8e5b330b | [
"MIT"
] | null | null | null | src/111.minimum-depth-of-binary-tree.py | hippieZhou/The-Way-Of-LeetCode | c63d777e01413726b6214c616c20c61f8e5b330b | [
"MIT"
] | null | null | null | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def minDepth(self, root: TreeNode) -> int:
if not root:
return 0
if root.left and root.right:
return min(self.minDepth(root.left), self.minDepth(root.right)) + 1
else:
return max(self.minDepth(root.left), self.minDepth(root.right)) + 1
| 27.647059 | 79 | 0.589362 |
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def minDepth(self, root: TreeNode) -> int:
if not root:
return 0
if root.left and root.right:
return min(self.minDepth(root.left), self.minDepth(root.right)) + 1
else:
return max(self.minDepth(root.left), self.minDepth(root.right)) + 1
| true | true |
1c2b606fe7eda08c0d867b30d7c90a164e33a20c | 29,600 | py | Python | Packs/PrismaCloud/Integrations/RedLock/RedLock.py | satyakidroid/content | b5342c522d44aec8f31f4ee0fc8ad269ac970903 | [
"MIT"
] | null | null | null | Packs/PrismaCloud/Integrations/RedLock/RedLock.py | satyakidroid/content | b5342c522d44aec8f31f4ee0fc8ad269ac970903 | [
"MIT"
] | 51 | 2022-02-25T22:28:40.000Z | 2022-03-31T22:34:58.000Z | Packs/PrismaCloud/Integrations/RedLock/RedLock.py | satyakidroid/content | b5342c522d44aec8f31f4ee0fc8ad269ac970903 | [
"MIT"
] | 1 | 2021-11-27T09:12:29.000Z | 2021-11-27T09:12:29.000Z | from CommonServerPython import *
# disable insecure warnings
requests.packages.urllib3.disable_warnings() # pylint: disable=no-member
URL = ''
VERIFY = False
DEFAULT_LIMIT = 100
# Standard headers
HEADERS = {'Content-Type': 'application/json', 'Accept': '*/*'}
TOKEN = None
def get_token():
"""
Retrieve the token using the credentials
"""
response = requests.post(URL + 'login', headers=HEADERS, verify=VERIFY, json={
'customerName': demisto.getParam('customer') or '',
'username': demisto.getParam('credentials')['identifier'],
'password': demisto.getParam('credentials')['password']
})
if response.status_code != requests.codes.ok: # pylint: disable=no-member
raise Exception('Error authenticating to RedLock service [%d] - %s' % (response.status_code, response.text))
try:
response_json = response.json()
TOKEN = response_json.get('token')
if not TOKEN:
demisto.debug(json.dumps(response_json))
message = 'Could not retrieve token from server: {}'.format(response_json.get("message"))
if response_json.get('message') == 'login_needs_customer_name':
available_customer_names = [name.get('customerName') for name in response_json.get('customerNames')]
message = 'In order to login a customer name need to be configured. Available customer names: {}'.format(
{", ".join(available_customer_names)})
raise Exception(message)
except ValueError as exception:
demisto.log(exception)
raise Exception('Could not parse API response.')
HEADERS['x-redlock-auth'] = TOKEN
def req(method, path, data, param_data):
"""
Generic request to Prisma Cloud (RedLock)
"""
if not TOKEN:
get_token()
response = requests.request(method, URL + path, json=data, params=param_data, headers=HEADERS, verify=VERIFY)
if response.status_code != requests.codes.ok: # pylint: disable=no-member
text = response.text
if response.headers.get('x-redlock-status'):
try:
statuses = json.loads(response.headers.get('x-redlock-status')) # type: ignore
for status in statuses:
text += '\n%s [%s]' % (status.get('i18nKey', ''), status.get('subject', ''))
# Handle case for no remediation details
if status['i18nKey'] == 'remediation_unavailable':
return False
if status['i18nKey'] == 'alert_no_longer_in_expected_state':
return False
except Exception:
pass
raise Exception('Error in API call to RedLock service [%d] - %s' % (response.status_code, text))
if not response.text:
return {}
return response.json()
def format_response(response):
if response and isinstance(response, dict):
response = {pascalToSpace(key).replace(" ", ""): format_response(value) for key, value in response.items()}
elif response and isinstance(response, list):
response = [format_response(item) for item in response]
return response
def list_filters():
"""
List the acceptable filters on alerts
"""
response = req('GET', 'filter/alert/suggest', None, None)
filters = [{
'Name': filter_,
'Options': ','.join(response.get(filter_).get('options')),
'Static': response.get(filter_).get('staticFilter')
} for filter_ in response]
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': response,
'HumanReadable': tableToMarkdown('Filter options', filters, ['Name', 'Options', 'Static'])
})
def convert_date_to_unix(date_str, date_format="%m/%d/%Y"):
"""
Convert the given string in the given format (by default - MM/DD/YYYY) to millis since epoch
"""
date = datetime.strptime(date_str, date_format)
return int((date - datetime.utcfromtimestamp(0)).total_seconds() * 1000)
def convert_unix_to_date(timestamp):
"""
Convert milliseconds since epoch to date formatted MM/DD/YYYY HH:MI:SS
"""
if timestamp:
date_time = datetime.utcfromtimestamp(timestamp / 1000)
return date_time.strftime('%m/%d/%Y %H:%M:%S')
return 'N/A'
def convert_unix_to_demisto(timestamp):
"""
Convert milliseconds since epoch to date formatted MM/DD/YYYYTHH:MI:SS
"""
if timestamp:
date_time = datetime.utcfromtimestamp(timestamp / 1000)
return date_time.strftime('%Y-%m-%dT%H:%M:%SZ')
return ''
def handle_time_filter(payload, base_case):
"""
Add the time filter to the payload
"""
unit = demisto.getArg('time-range-unit')
value = demisto.getArg('time-range-value')
time_from = demisto.getArg('time-range-date-from')
time_to = demisto.getArg('time-range-date-to')
relative = ('hour', 'day', 'week', 'month', 'year')
to_now = relative[1:] + ('epoch', 'login')
if unit:
if time_from or time_to:
return_error('You cannot specify absolute times [time-range-date-from, time-range-date-to] '
+ 'with relative times [time-range-unit, time-range-value]')
if value:
if unit not in relative:
return_error('Time unit for relative time must be one of the following: ' + ','.join(relative))
payload['timeRange'] = {'type': 'relative', 'value': {'amount': int(value), 'unit': unit}}
else:
if unit not in to_now:
return_error('Time unit for to_now time must be one of the following: ' + ','.join(to_now))
payload['timeRange'] = {'type': 'to_now', 'value': unit}
else:
if not time_from or not time_to:
payload['timeRange'] = base_case
else:
payload['timeRange'] = {'type': 'absolute', 'value': {
'startTime': convert_date_to_unix(time_from), 'endTime': convert_date_to_unix(time_to)}}
def handle_filters(payload):
"""
Add filters to the filter object based on received arguments
"""
args_conversion = {
'alert-status': 'alert.status',
'policy-name': 'policy.name',
'policy-label': 'policy.label',
'policy-compliance-standard': 'policy.complianceStandard',
'cloud-account': 'cloud.account',
'cloud-region': 'cloud.region',
'alert-rule-name': 'alertRule.name',
'resource-id': 'resource.id',
'resource-name': 'resource.name',
'resource-type': 'resource.type',
'alert-id': 'alert.id',
'cloud-type': 'cloud.type',
'risk-grade': 'risk.grade',
'policy-type': 'policy.type',
'policy-severity': 'policy.severity'
}
payload['filters'] = []
for filter_ in demisto.args():
if filter_ in ('policy-name', 'policy-label', 'policy-compliance-standard', 'cloud-account', 'cloud-region',
'alert-rule-name', 'resource-id', 'resource-name', 'resource-type', 'alert-status', 'alert-id',
'cloud-type', 'risk-grade', 'policy-type', 'policy-severity') and demisto.getArg(filter_):
payload['filters'].append(
{'name': args_conversion[filter_], 'operator': '=', 'value': demisto.getArg(filter_)})
def alert_to_readable(alert):
"""
Transform an alert to a nice readable object
"""
return {
'ID': alert.get('id'),
'Status': alert.get('status'),
'FirstSeen': convert_unix_to_date(alert.get('firstSeen')),
'LastSeen': convert_unix_to_date(alert.get('lastSeen')),
'AlertTime': convert_unix_to_date(alert.get('alertTime')),
'PolicyName': demisto.get(alert, 'policy.name'),
'PolicyType': demisto.get(alert, 'policy.policyType'),
'PolicyDescription': demisto.get(alert, 'policy.description'),
'PolicySeverity': demisto.get(alert, 'policy.severity'),
'PolicyRecommendation': demisto.get(alert, 'policy.recommendation'),
'PolicyDeleted': demisto.get(alert, 'policy.deleted'),
'PolicyRemediable': demisto.get(alert, 'policy.remediable'),
'RiskRating': demisto.get(alert, 'riskDetail.rating'),
'ResourceName': demisto.get(alert, 'resource.name'),
'ResourceAccount': demisto.get(alert, 'resource.account'),
'ResourceType': demisto.get(alert, 'resource.resourceType'),
'ResourceCloudType': demisto.get(alert, 'resource.cloudType')
}
def alert_to_context(alert):
"""
Transform a single alert to context struct
"""
ec = {
'ID': alert.get('id'),
'Status': alert.get('status'),
'AlertTime': convert_unix_to_date(alert.get('alertTime')),
'Policy': {
'ID': demisto.get(alert, 'policy.policyId'),
'Name': demisto.get(alert, 'policy.name'),
'Type': demisto.get(alert, 'policy.policyType'),
'Severity': demisto.get(alert, 'policy.severity'),
'Remediable': demisto.get(alert, 'policy.remediable')
},
'RiskDetail': {
'Rating': demisto.get(alert, 'riskDetail.rating'),
'Score': demisto.get(alert, 'riskDetail.riskScore.score')
},
'Resource': {
'ID': demisto.get(alert, 'resource.id'),
'Name': demisto.get(alert, 'resource.name'),
'Account': demisto.get(alert, 'resource.account'),
'AccountID': demisto.get(alert, 'resource.accountId')
}
}
if alert.get('alertRules'):
ec['AlertRules'] = [alert_rule.get('name') for alert_rule in alert.get('alertRules')]
return ec
def search_alerts():
"""
Retrieves alerts by filter
"""
payload = {} # type: dict
handle_time_filter(payload, {'type': 'relative', 'value': {'amount': 7, 'unit': 'day'}})
handle_filters(payload)
response = req('POST', 'alert', payload, {'detailed': 'true'})
alerts = []
context_path = 'Redlock.Alert(val.ID === obj.ID)'
context = {context_path: []} # type: dict
for alert in response:
alerts.append(alert_to_readable(alert))
context[context_path].append(alert_to_context(alert))
context['Redlock.Metadata.CountOfAlerts'] = len(response)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': response,
'EntryContext': context,
'HumanReadable': tableToMarkdown('Alerts', alerts, [
'ID', 'Status', 'FirstSeen', 'LastSeen', 'AlertTime', 'PolicyName', 'PolicyType', 'PolicyDescription',
'PolicySeverity', 'PolicyRecommendation', 'PolicyDeleted', 'PolicyRemediable', 'RiskRating', 'ResourceName',
'ResourceAccount', 'ResourceType', 'ResourceCloudType'
])
})
def get_alert_details():
"""
Retrieve alert details by given ID
"""
response = req('GET', 'alert/' + demisto.getArg('alert-id'), None,
None) # {'detailed': demisto.getArg('detailed')})
alert = alert_to_readable(response)
alert.update({
'PolicyID': demisto.get(response, 'policy.policyID'),
'PolicySystemDefault': demisto.get(response, 'policy.systemDefault'),
'PolicyLabels': demisto.get(response, 'policy.labels'),
'PolicyLastModifiedOn': demisto.get(response, 'policy.lastModifiedOn'),
'PolicyLastModifiedBy': demisto.get(response, 'policy.lastModifiedBy'),
'RiskScore': demisto.get(response, 'riskDetail.riskScore.score'),
'ResourceRRN': demisto.get(response, 'resource.rrn'),
'ResourceID': demisto.get(response, 'resource.id'),
'ResourceAccountID': demisto.get(response, 'resource.accountId'),
'ResourceRegionID': demisto.get(response, 'resource.regionId'),
'ResourceApiName': demisto.get(response, 'resource.resourceApiName'),
'ResourceUrl': demisto.get(response, 'resource.url'),
'ResourceData': demisto.get(response, 'resource.data'),
'ResourceAccessKeyAge': demisto.get(response, 'resource.additionalInfo.accessKeyAge'),
'ResourceInactiveSinceTs': demisto.get(response, 'resource.additionalInfo.inactiveSinceTs')
})
context = {'Redlock.Alert(val.ID === obj.ID)': alert_to_context(response)}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': response,
'EntryContext': context,
'HumanReadable': tableToMarkdown('Alert', alert, removeNull=True)
})
def dismiss_alerts():
"""
Dismiss the given list of alerts based on given filter
"""
ids = argToList(demisto.getArg('alert-id'))
policies = argToList(demisto.getArg('policy-id'))
payload = {'alerts': ids, 'policies': policies, 'dismissalNote': demisto.getArg('dismissal-note'), 'filter': {}}
demisto.args().pop('alert-id', None)
args = demisto.args()
snooze_value = args.get('snooze-value', None)
snooze_unit = args.get('snooze-unit', None)
msg_notes = ['dismissed', 'Dismissal']
if snooze_value and snooze_unit:
payload['dismissalTimeRange'] = {
'type': 'relative',
'value': {
'unit': snooze_unit,
'amount': int(snooze_value)
}
}
msg_notes = ['snoozed', 'Snooze']
handle_filters(payload['filter'])
handle_time_filter(payload['filter'], {'type': 'to_now', 'value': 'epoch'})
if not ids and not policies:
return_error('You must specify either alert-id or policy-id for dismissing alerts')
response = req('POST', 'alert/dismiss', payload, None)
if response is False:
demisto.results("Alert not in expected state.")
else:
context = {}
if ids:
context['Redlock.DismissedAlert.ID'] = ids
md = '### Alerts {} successfully. {} Note: {}.'.format(msg_notes[0], msg_notes[1],
demisto.getArg('dismissal-note'))
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': response,
'EntryContext': context,
'HumanReadable': md
})
def reopen_alerts():
"""
Reopen the given list of alerts based on given filter
"""
ids = argToList(demisto.getArg('alert-id'))
policies = argToList(demisto.getArg('policy-id'))
payload = {'alerts': ids, 'policies': policies, 'filter': {}}
demisto.args().pop('alert-id', None)
handle_filters(payload['filter'])
handle_time_filter(payload['filter'], {'type': 'to_now', 'value': 'epoch'})
if not ids and not policies:
return_error('You must specify either alert-id or policy-id for re-opening alerts')
response = req('POST', 'alert/reopen', payload, None)
context = {}
if ids:
context['Redlock.ReopenedAlert.ID'] = ids
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': response,
'EntryContext': context,
'HumanReadable': '### Alerts re-opened successfully.'
})
def translate_severity(alert):
"""
Translate alert severity to demisto
Might take risk grade into account in the future
"""
severity = demisto.get(alert, 'policy.severity')
if severity == 'high':
return 3
if severity == 'medium':
return 2
if severity == 'low':
return 1
return 0
def get_rql_response(args):
""""
Retrieve any RQL
"""
rql = args.get('rql').encode("utf-8")
limit = str(args.get('limit', '1'))
rql += " limit search records to {}".format(limit)
payload = {"query": rql, "filter": {}}
handle_filters(payload['filter'])
handle_time_filter(payload['filter'], {'type': 'to_now', 'value': 'epoch'})
response = req('POST', 'search/config', payload, None)
human_readable = []
attributes = response.get('data')
items = attributes.get('items', [])
for item in items:
tmp_human_readable = {
"ResourceName": item["name"],
"Service": item["service"],
"Account": item["accountName"],
"Region": item["regionName"],
"Deleted": item["deleted"]
}
human_readable.append(tmp_human_readable)
contents = format_response(items)
rql_data = {
"Query": rql,
"Response": contents
}
md = tableToMarkdown(name="RQL Output:", t=human_readable, headerTransform=pascalToSpace, removeNull=True)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': rql_data,
'EntryContext': {'Redlock.RQL(val.Query === obj.Query)': rql_data},
'HumanReadable': md
})
def get_remediation_details():
"""
Retrieve remediation details for a given alert
"""
alert_ids = argToList(demisto.getArg('alert-id'))
payload = {'alerts': alert_ids, 'filter': {}}
handle_filters(payload['filter'])
handle_time_filter(payload['filter'], {'type': 'to_now', 'value': 'epoch'})
md_data = []
context = []
response = req('POST', 'alert/remediation', payload, None)
if response:
for alert_id in alert_ids:
details = {
'ID': alert_id,
'Remediation': {
'CLI': response['alertIdVsCliScript'][alert_id],
'Description': response['cliDescription']
}
}
human_readable_details = {
'ID': details['ID'],
'RemediationCLI': details['Remediation']['CLI'],
'RemediationDescription': details['Remediation']['Description']
}
context.append(details)
md_data.append(human_readable_details)
MD = tableToMarkdown("Remediation Details", md_data)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': response,
'EntryContext': {'Redlock.Alert(val.ID == obj.ID)': context},
'HumanReadable': MD
})
else:
demisto.results('No Remediation Details Found')
def redlock_search_config():
"""
Run query in config
"""
query = demisto.args().get('query', None)
limit = demisto.args().get('limit', None)
if not limit:
limit = DEFAULT_LIMIT
else:
limit = int(limit)
if not query:
return_error('You must specify a query to retrieve assets')
payload = {
'query': query,
'limit': limit,
'sort': [{"direction": "desc", "field": "insertTs"}],
'withResourceJson': True
}
handle_time_filter(payload, {'type': 'to_now', 'value': 'epoch'})
response = req('POST', 'search/config', payload, None)
if (
not response
or 'data' not in response
or not isinstance(response['data'], dict)
or 'items' not in response['data']
or not isinstance(response['data']['items'], list)
):
demisto.results('No results found')
else:
response_data = response.get('data')
items = response_data.get('items', [])
md = tableToMarkdown("Configuration Details", items)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': items,
'EntryContext': {'Redlock.Asset(val.id == obj.id)': items},
'HumanReadable': md
})
def redlock_list_scans():
"""
Returns a list of IaC scans that meet the given conditions.
See Also:
https://prisma.pan.dev/api/cloud/cspm/iac-scan/#operation/getScans
"""
args = demisto.args()
group_by = args.get('group_by', 'scanId')
page_size = args.get('page_size', 25)
page_number = args.get('page_number', 1)
sort = args.get('sort', None)
filter_type = args.get('filter_type', 'relative')
filter_time_amount = args.get('filter_time_amount', 1)
to_now_time_unit = args.get('to_now_time_unit', 'login')
relative_time_unit = args.get('relative_time_unit', 'day')
filter_user = args.get('filter_user', None)
filter_status = args.get('filter_status', None)
filter_asset_type = args.get('filter_asset_type', None)
filter_asset_name = args.get('filter_asset_name', None)
filter_start_time = args.get('filter_start_time', None)
filter_end_time = args.get('filter_end_time', None)
list_filter = {
'groupBy': group_by,
'page[size]': page_size,
'page[number]': page_number,
'filter[timeType]': filter_type
}
if sort:
list_filter['sort'] = sort
if filter_type == 'relative':
if relative_time_unit and filter_time_amount:
list_filter['filter[timeUnit]'] = relative_time_unit
list_filter['filter[timeAmount]'] = filter_time_amount
else:
return_error('You must specify a relative_time_unit and filter_time_amount with relative type filter')
elif filter_type == 'to_now':
if to_now_time_unit:
list_filter['filter[timeUnit]'] = to_now_time_unit
else:
return_error('You must specify to_now_time_unit with to_now type filter')
elif filter_type == 'absolute':
if filter_start_time and filter_end_time:
list_filter['filter[startTime]'] = convert_date_to_unix(filter_start_time, date_format="%m/%d/%Y %H:%M:%S")
list_filter['filter[endTime]'] = convert_date_to_unix(filter_end_time, date_format="%m/%d/%Y %H:%M:%S")
else:
return_error('You must specify a filter_start_time and filter_end_time with absolute type filter')
if filter_user:
list_filter['filter[user]'] = filter_user
if filter_status:
list_filter['filter[status]'] = filter_status
if filter_asset_type:
list_filter['filter[assetType]'] = filter_asset_type
if filter_asset_name:
list_filter['filter[assetName]'] = filter_asset_name
response = req('GET', 'iac/v2/scans', param_data=list_filter, data={})
if (
not response
or 'data' not in response
or not isinstance(response.get('data'), list)
):
demisto.results('No results found')
else:
items = response.get('data', [])
readable_output = []
for item in items:
id = item.get('id')
attributes = item.get('attributes', {})
readable_output.append({
"ID": id,
"Name": attributes.get('name', []),
"Type": attributes.get('type', []),
"Scan Time": attributes.get('scanTime'),
"User": attributes.get('user', [])
})
# flatten the attributes section of the item - i.e removes 'attributes' key
item.pop('attributes', None)
for key, value in attributes.items():
item[key] = value
md = tableToMarkdown("Scans List:", readable_output)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': items,
'EntryContext': {'Redlock.Scans(val.id == obj.id)': items},
'HumanReadable': md
})
def redlock_get_scan_status():
"""
Returns the status of the asynchronous IaC scan job that has the specified scan ID.
See Also:
https://prisma.pan.dev/api/cloud/cspm/iac-scan/#operation/getAsyncScanStatus
"""
scan_id = demisto.args().get('scan_id', None)
response = req('GET', f'iac/v2/scans/{scan_id}/status', param_data={}, data={})
if (
not response
or 'data' not in response
):
demisto.results('No results found')
else:
result = response.get('data', {})
id = result.get('id')
status = result.get('attributes', {}).get('status')
readable_output = {
"ID": id,
"Status": status
}
result = {
'id': id,
'status': status
}
md = tableToMarkdown("Scan Status:", readable_output)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': result,
'EntryContext': {'Redlock.Scans(val.id == obj.id)': result},
'HumanReadable': md
})
def redlock_get_scan_results():
"""
Returns scan result details for the completed scan that has the specified scan ID.
See Also:
https://prisma.pan.dev/api/cloud/cspm/iac-scan/#operation/getScanResult
"""
scan_id = demisto.args().get('scan_id', None)
response = req('GET', f'iac/v2/scans/{scan_id}/results', param_data={}, data={})
if (
not response
or 'data' not in response
or not isinstance(response.get('data'), list)
):
demisto.results('No results found')
else:
items = response.get('data', [])
readable_output = []
for item in items:
id = item.get('id')
attributes = item.get('attributes', {})
readable_output.append({
"ID": id,
"Name": attributes.get('name'),
"Policy ID": attributes.get('policyId'),
"Description": attributes.get('desc'),
"Severity": attributes.get('severity')
})
results = {
"id": scan_id,
"results": items
}
md = tableToMarkdown("Scan Results:", readable_output)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': results,
'EntryContext': {'Redlock.Scans(val.id == obj.id)': results},
'HumanReadable': md
})
def fetch_incidents():
"""
Retrieve new incidents periodically based on pre-defined instance parameters
"""
now = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds() * 1000)
last_run = demisto.getLastRun().get('time')
if not last_run: # first time fetch
last_run = now - parse_date_range(demisto.params().get('fetch_time', '3 days').strip(), to_timestamp=True)[0]
payload = {'timeRange': {
'type': 'absolute',
'value': {
'startTime': last_run,
'endTime': now
}
}, 'filters': [{'name': 'alert.status', 'operator': '=', 'value': 'open'}]}
if demisto.getParam('ruleName'):
payload['filters'].append({'name': 'alertRule.name', 'operator': '=', # type: ignore
'value': demisto.getParam('ruleName')})
if demisto.getParam('policySeverity'):
payload['filters'].append({'name': 'policy.severity', 'operator': '=', # type: ignore
'value': demisto.getParam('policySeverity')})
if demisto.getParam('policyName'):
payload['filters'].append({'name': 'policy.name', 'operator': '=', # type: ignore
'value': demisto.getParam('policyName')})
demisto.info("Executing Prisma Cloud (RedLock) fetch_incidents with payload: {}".format(payload))
response = req('POST', 'alert', payload, {'detailed': 'true'})
incidents = []
for alert in response:
incidents.append({
'name': alert.get('policy.name', 'No policy') + ' - ' + alert.get('id'),
'occurred': convert_unix_to_demisto(alert.get('alertTime')),
'severity': translate_severity(alert),
'rawJSON': json.dumps(alert)
})
return incidents, now
def main():
global URL, VERIFY
handle_proxy()
params = demisto.params()
URL = params.get('url')
if URL[-1] != '/':
URL += '/'
VERIFY = not params.get('unsecure', False)
try:
command = demisto.command()
if command == 'test-module':
get_token()
return_results('ok')
elif command == 'redlock-search-alerts':
search_alerts()
elif command == 'redlock-list-alert-filters':
list_filters()
elif command == 'redlock-get-alert-details':
get_alert_details()
elif command == 'redlock-dismiss-alerts':
dismiss_alerts()
elif command == 'redlock-reopen-alerts':
reopen_alerts()
elif command == 'redlock-get-remediation-details':
get_remediation_details()
elif command == 'redlock-get-rql-response':
get_rql_response(demisto.args())
elif command == 'redlock-search-config':
redlock_search_config()
elif command == 'redlock-list-scans':
redlock_list_scans()
elif command == 'redlock-get-scan-status':
redlock_get_scan_status()
elif command == 'redlock-get-scan-results':
redlock_get_scan_results()
elif command == 'fetch-incidents':
incidents, new_run = fetch_incidents()
demisto.incidents(incidents)
demisto.setLastRun({'time': new_run})
else:
raise Exception('Unrecognized command: ' + command)
except Exception as err:
demisto.error(traceback.format_exc()) # print the traceback
return_error(str(err))
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 37.232704 | 121 | 0.592804 | from CommonServerPython import *
requests.packages.urllib3.disable_warnings()
URL = ''
VERIFY = False
DEFAULT_LIMIT = 100
HEADERS = {'Content-Type': 'application/json', 'Accept': '*/*'}
TOKEN = None
def get_token():
response = requests.post(URL + 'login', headers=HEADERS, verify=VERIFY, json={
'customerName': demisto.getParam('customer') or '',
'username': demisto.getParam('credentials')['identifier'],
'password': demisto.getParam('credentials')['password']
})
if response.status_code != requests.codes.ok:
raise Exception('Error authenticating to RedLock service [%d] - %s' % (response.status_code, response.text))
try:
response_json = response.json()
TOKEN = response_json.get('token')
if not TOKEN:
demisto.debug(json.dumps(response_json))
message = 'Could not retrieve token from server: {}'.format(response_json.get("message"))
if response_json.get('message') == 'login_needs_customer_name':
available_customer_names = [name.get('customerName') for name in response_json.get('customerNames')]
message = 'In order to login a customer name need to be configured. Available customer names: {}'.format(
{", ".join(available_customer_names)})
raise Exception(message)
except ValueError as exception:
demisto.log(exception)
raise Exception('Could not parse API response.')
HEADERS['x-redlock-auth'] = TOKEN
def req(method, path, data, param_data):
if not TOKEN:
get_token()
response = requests.request(method, URL + path, json=data, params=param_data, headers=HEADERS, verify=VERIFY)
if response.status_code != requests.codes.ok:
text = response.text
if response.headers.get('x-redlock-status'):
try:
statuses = json.loads(response.headers.get('x-redlock-status'))
for status in statuses:
text += '\n%s [%s]' % (status.get('i18nKey', ''), status.get('subject', ''))
if status['i18nKey'] == 'remediation_unavailable':
return False
if status['i18nKey'] == 'alert_no_longer_in_expected_state':
return False
except Exception:
pass
raise Exception('Error in API call to RedLock service [%d] - %s' % (response.status_code, text))
if not response.text:
return {}
return response.json()
def format_response(response):
if response and isinstance(response, dict):
response = {pascalToSpace(key).replace(" ", ""): format_response(value) for key, value in response.items()}
elif response and isinstance(response, list):
response = [format_response(item) for item in response]
return response
def list_filters():
response = req('GET', 'filter/alert/suggest', None, None)
filters = [{
'Name': filter_,
'Options': ','.join(response.get(filter_).get('options')),
'Static': response.get(filter_).get('staticFilter')
} for filter_ in response]
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': response,
'HumanReadable': tableToMarkdown('Filter options', filters, ['Name', 'Options', 'Static'])
})
def convert_date_to_unix(date_str, date_format="%m/%d/%Y"):
date = datetime.strptime(date_str, date_format)
return int((date - datetime.utcfromtimestamp(0)).total_seconds() * 1000)
def convert_unix_to_date(timestamp):
if timestamp:
date_time = datetime.utcfromtimestamp(timestamp / 1000)
return date_time.strftime('%m/%d/%Y %H:%M:%S')
return 'N/A'
def convert_unix_to_demisto(timestamp):
if timestamp:
date_time = datetime.utcfromtimestamp(timestamp / 1000)
return date_time.strftime('%Y-%m-%dT%H:%M:%SZ')
return ''
def handle_time_filter(payload, base_case):
unit = demisto.getArg('time-range-unit')
value = demisto.getArg('time-range-value')
time_from = demisto.getArg('time-range-date-from')
time_to = demisto.getArg('time-range-date-to')
relative = ('hour', 'day', 'week', 'month', 'year')
to_now = relative[1:] + ('epoch', 'login')
if unit:
if time_from or time_to:
return_error('You cannot specify absolute times [time-range-date-from, time-range-date-to] '
+ 'with relative times [time-range-unit, time-range-value]')
if value:
if unit not in relative:
return_error('Time unit for relative time must be one of the following: ' + ','.join(relative))
payload['timeRange'] = {'type': 'relative', 'value': {'amount': int(value), 'unit': unit}}
else:
if unit not in to_now:
return_error('Time unit for to_now time must be one of the following: ' + ','.join(to_now))
payload['timeRange'] = {'type': 'to_now', 'value': unit}
else:
if not time_from or not time_to:
payload['timeRange'] = base_case
else:
payload['timeRange'] = {'type': 'absolute', 'value': {
'startTime': convert_date_to_unix(time_from), 'endTime': convert_date_to_unix(time_to)}}
def handle_filters(payload):
args_conversion = {
'alert-status': 'alert.status',
'policy-name': 'policy.name',
'policy-label': 'policy.label',
'policy-compliance-standard': 'policy.complianceStandard',
'cloud-account': 'cloud.account',
'cloud-region': 'cloud.region',
'alert-rule-name': 'alertRule.name',
'resource-id': 'resource.id',
'resource-name': 'resource.name',
'resource-type': 'resource.type',
'alert-id': 'alert.id',
'cloud-type': 'cloud.type',
'risk-grade': 'risk.grade',
'policy-type': 'policy.type',
'policy-severity': 'policy.severity'
}
payload['filters'] = []
for filter_ in demisto.args():
if filter_ in ('policy-name', 'policy-label', 'policy-compliance-standard', 'cloud-account', 'cloud-region',
'alert-rule-name', 'resource-id', 'resource-name', 'resource-type', 'alert-status', 'alert-id',
'cloud-type', 'risk-grade', 'policy-type', 'policy-severity') and demisto.getArg(filter_):
payload['filters'].append(
{'name': args_conversion[filter_], 'operator': '=', 'value': demisto.getArg(filter_)})
def alert_to_readable(alert):
return {
'ID': alert.get('id'),
'Status': alert.get('status'),
'FirstSeen': convert_unix_to_date(alert.get('firstSeen')),
'LastSeen': convert_unix_to_date(alert.get('lastSeen')),
'AlertTime': convert_unix_to_date(alert.get('alertTime')),
'PolicyName': demisto.get(alert, 'policy.name'),
'PolicyType': demisto.get(alert, 'policy.policyType'),
'PolicyDescription': demisto.get(alert, 'policy.description'),
'PolicySeverity': demisto.get(alert, 'policy.severity'),
'PolicyRecommendation': demisto.get(alert, 'policy.recommendation'),
'PolicyDeleted': demisto.get(alert, 'policy.deleted'),
'PolicyRemediable': demisto.get(alert, 'policy.remediable'),
'RiskRating': demisto.get(alert, 'riskDetail.rating'),
'ResourceName': demisto.get(alert, 'resource.name'),
'ResourceAccount': demisto.get(alert, 'resource.account'),
'ResourceType': demisto.get(alert, 'resource.resourceType'),
'ResourceCloudType': demisto.get(alert, 'resource.cloudType')
}
def alert_to_context(alert):
ec = {
'ID': alert.get('id'),
'Status': alert.get('status'),
'AlertTime': convert_unix_to_date(alert.get('alertTime')),
'Policy': {
'ID': demisto.get(alert, 'policy.policyId'),
'Name': demisto.get(alert, 'policy.name'),
'Type': demisto.get(alert, 'policy.policyType'),
'Severity': demisto.get(alert, 'policy.severity'),
'Remediable': demisto.get(alert, 'policy.remediable')
},
'RiskDetail': {
'Rating': demisto.get(alert, 'riskDetail.rating'),
'Score': demisto.get(alert, 'riskDetail.riskScore.score')
},
'Resource': {
'ID': demisto.get(alert, 'resource.id'),
'Name': demisto.get(alert, 'resource.name'),
'Account': demisto.get(alert, 'resource.account'),
'AccountID': demisto.get(alert, 'resource.accountId')
}
}
if alert.get('alertRules'):
ec['AlertRules'] = [alert_rule.get('name') for alert_rule in alert.get('alertRules')]
return ec
def search_alerts():
payload = {}
handle_time_filter(payload, {'type': 'relative', 'value': {'amount': 7, 'unit': 'day'}})
handle_filters(payload)
response = req('POST', 'alert', payload, {'detailed': 'true'})
alerts = []
context_path = 'Redlock.Alert(val.ID === obj.ID)'
context = {context_path: []}
for alert in response:
alerts.append(alert_to_readable(alert))
context[context_path].append(alert_to_context(alert))
context['Redlock.Metadata.CountOfAlerts'] = len(response)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': response,
'EntryContext': context,
'HumanReadable': tableToMarkdown('Alerts', alerts, [
'ID', 'Status', 'FirstSeen', 'LastSeen', 'AlertTime', 'PolicyName', 'PolicyType', 'PolicyDescription',
'PolicySeverity', 'PolicyRecommendation', 'PolicyDeleted', 'PolicyRemediable', 'RiskRating', 'ResourceName',
'ResourceAccount', 'ResourceType', 'ResourceCloudType'
])
})
def get_alert_details():
response = req('GET', 'alert/' + demisto.getArg('alert-id'), None,
None)
alert = alert_to_readable(response)
alert.update({
'PolicyID': demisto.get(response, 'policy.policyID'),
'PolicySystemDefault': demisto.get(response, 'policy.systemDefault'),
'PolicyLabels': demisto.get(response, 'policy.labels'),
'PolicyLastModifiedOn': demisto.get(response, 'policy.lastModifiedOn'),
'PolicyLastModifiedBy': demisto.get(response, 'policy.lastModifiedBy'),
'RiskScore': demisto.get(response, 'riskDetail.riskScore.score'),
'ResourceRRN': demisto.get(response, 'resource.rrn'),
'ResourceID': demisto.get(response, 'resource.id'),
'ResourceAccountID': demisto.get(response, 'resource.accountId'),
'ResourceRegionID': demisto.get(response, 'resource.regionId'),
'ResourceApiName': demisto.get(response, 'resource.resourceApiName'),
'ResourceUrl': demisto.get(response, 'resource.url'),
'ResourceData': demisto.get(response, 'resource.data'),
'ResourceAccessKeyAge': demisto.get(response, 'resource.additionalInfo.accessKeyAge'),
'ResourceInactiveSinceTs': demisto.get(response, 'resource.additionalInfo.inactiveSinceTs')
})
context = {'Redlock.Alert(val.ID === obj.ID)': alert_to_context(response)}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': response,
'EntryContext': context,
'HumanReadable': tableToMarkdown('Alert', alert, removeNull=True)
})
def dismiss_alerts():
ids = argToList(demisto.getArg('alert-id'))
policies = argToList(demisto.getArg('policy-id'))
payload = {'alerts': ids, 'policies': policies, 'dismissalNote': demisto.getArg('dismissal-note'), 'filter': {}}
demisto.args().pop('alert-id', None)
args = demisto.args()
snooze_value = args.get('snooze-value', None)
snooze_unit = args.get('snooze-unit', None)
msg_notes = ['dismissed', 'Dismissal']
if snooze_value and snooze_unit:
payload['dismissalTimeRange'] = {
'type': 'relative',
'value': {
'unit': snooze_unit,
'amount': int(snooze_value)
}
}
msg_notes = ['snoozed', 'Snooze']
handle_filters(payload['filter'])
handle_time_filter(payload['filter'], {'type': 'to_now', 'value': 'epoch'})
if not ids and not policies:
return_error('You must specify either alert-id or policy-id for dismissing alerts')
response = req('POST', 'alert/dismiss', payload, None)
if response is False:
demisto.results("Alert not in expected state.")
else:
context = {}
if ids:
context['Redlock.DismissedAlert.ID'] = ids
md = '### Alerts {} successfully. {} Note: {}.'.format(msg_notes[0], msg_notes[1],
demisto.getArg('dismissal-note'))
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': response,
'EntryContext': context,
'HumanReadable': md
})
def reopen_alerts():
ids = argToList(demisto.getArg('alert-id'))
policies = argToList(demisto.getArg('policy-id'))
payload = {'alerts': ids, 'policies': policies, 'filter': {}}
demisto.args().pop('alert-id', None)
handle_filters(payload['filter'])
handle_time_filter(payload['filter'], {'type': 'to_now', 'value': 'epoch'})
if not ids and not policies:
return_error('You must specify either alert-id or policy-id for re-opening alerts')
response = req('POST', 'alert/reopen', payload, None)
context = {}
if ids:
context['Redlock.ReopenedAlert.ID'] = ids
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': response,
'EntryContext': context,
'HumanReadable': '### Alerts re-opened successfully.'
})
def translate_severity(alert):
severity = demisto.get(alert, 'policy.severity')
if severity == 'high':
return 3
if severity == 'medium':
return 2
if severity == 'low':
return 1
return 0
def get_rql_response(args):
rql = args.get('rql').encode("utf-8")
limit = str(args.get('limit', '1'))
rql += " limit search records to {}".format(limit)
payload = {"query": rql, "filter": {}}
handle_filters(payload['filter'])
handle_time_filter(payload['filter'], {'type': 'to_now', 'value': 'epoch'})
response = req('POST', 'search/config', payload, None)
human_readable = []
attributes = response.get('data')
items = attributes.get('items', [])
for item in items:
tmp_human_readable = {
"ResourceName": item["name"],
"Service": item["service"],
"Account": item["accountName"],
"Region": item["regionName"],
"Deleted": item["deleted"]
}
human_readable.append(tmp_human_readable)
contents = format_response(items)
rql_data = {
"Query": rql,
"Response": contents
}
md = tableToMarkdown(name="RQL Output:", t=human_readable, headerTransform=pascalToSpace, removeNull=True)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': rql_data,
'EntryContext': {'Redlock.RQL(val.Query === obj.Query)': rql_data},
'HumanReadable': md
})
def get_remediation_details():
alert_ids = argToList(demisto.getArg('alert-id'))
payload = {'alerts': alert_ids, 'filter': {}}
handle_filters(payload['filter'])
handle_time_filter(payload['filter'], {'type': 'to_now', 'value': 'epoch'})
md_data = []
context = []
response = req('POST', 'alert/remediation', payload, None)
if response:
for alert_id in alert_ids:
details = {
'ID': alert_id,
'Remediation': {
'CLI': response['alertIdVsCliScript'][alert_id],
'Description': response['cliDescription']
}
}
human_readable_details = {
'ID': details['ID'],
'RemediationCLI': details['Remediation']['CLI'],
'RemediationDescription': details['Remediation']['Description']
}
context.append(details)
md_data.append(human_readable_details)
MD = tableToMarkdown("Remediation Details", md_data)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': response,
'EntryContext': {'Redlock.Alert(val.ID == obj.ID)': context},
'HumanReadable': MD
})
else:
demisto.results('No Remediation Details Found')
def redlock_search_config():
query = demisto.args().get('query', None)
limit = demisto.args().get('limit', None)
if not limit:
limit = DEFAULT_LIMIT
else:
limit = int(limit)
if not query:
return_error('You must specify a query to retrieve assets')
payload = {
'query': query,
'limit': limit,
'sort': [{"direction": "desc", "field": "insertTs"}],
'withResourceJson': True
}
handle_time_filter(payload, {'type': 'to_now', 'value': 'epoch'})
response = req('POST', 'search/config', payload, None)
if (
not response
or 'data' not in response
or not isinstance(response['data'], dict)
or 'items' not in response['data']
or not isinstance(response['data']['items'], list)
):
demisto.results('No results found')
else:
response_data = response.get('data')
items = response_data.get('items', [])
md = tableToMarkdown("Configuration Details", items)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': items,
'EntryContext': {'Redlock.Asset(val.id == obj.id)': items},
'HumanReadable': md
})
def redlock_list_scans():
args = demisto.args()
group_by = args.get('group_by', 'scanId')
page_size = args.get('page_size', 25)
page_number = args.get('page_number', 1)
sort = args.get('sort', None)
filter_type = args.get('filter_type', 'relative')
filter_time_amount = args.get('filter_time_amount', 1)
to_now_time_unit = args.get('to_now_time_unit', 'login')
relative_time_unit = args.get('relative_time_unit', 'day')
filter_user = args.get('filter_user', None)
filter_status = args.get('filter_status', None)
filter_asset_type = args.get('filter_asset_type', None)
filter_asset_name = args.get('filter_asset_name', None)
filter_start_time = args.get('filter_start_time', None)
filter_end_time = args.get('filter_end_time', None)
list_filter = {
'groupBy': group_by,
'page[size]': page_size,
'page[number]': page_number,
'filter[timeType]': filter_type
}
if sort:
list_filter['sort'] = sort
if filter_type == 'relative':
if relative_time_unit and filter_time_amount:
list_filter['filter[timeUnit]'] = relative_time_unit
list_filter['filter[timeAmount]'] = filter_time_amount
else:
return_error('You must specify a relative_time_unit and filter_time_amount with relative type filter')
elif filter_type == 'to_now':
if to_now_time_unit:
list_filter['filter[timeUnit]'] = to_now_time_unit
else:
return_error('You must specify to_now_time_unit with to_now type filter')
elif filter_type == 'absolute':
if filter_start_time and filter_end_time:
list_filter['filter[startTime]'] = convert_date_to_unix(filter_start_time, date_format="%m/%d/%Y %H:%M:%S")
list_filter['filter[endTime]'] = convert_date_to_unix(filter_end_time, date_format="%m/%d/%Y %H:%M:%S")
else:
return_error('You must specify a filter_start_time and filter_end_time with absolute type filter')
if filter_user:
list_filter['filter[user]'] = filter_user
if filter_status:
list_filter['filter[status]'] = filter_status
if filter_asset_type:
list_filter['filter[assetType]'] = filter_asset_type
if filter_asset_name:
list_filter['filter[assetName]'] = filter_asset_name
response = req('GET', 'iac/v2/scans', param_data=list_filter, data={})
if (
not response
or 'data' not in response
or not isinstance(response.get('data'), list)
):
demisto.results('No results found')
else:
items = response.get('data', [])
readable_output = []
for item in items:
id = item.get('id')
attributes = item.get('attributes', {})
readable_output.append({
"ID": id,
"Name": attributes.get('name', []),
"Type": attributes.get('type', []),
"Scan Time": attributes.get('scanTime'),
"User": attributes.get('user', [])
})
item.pop('attributes', None)
for key, value in attributes.items():
item[key] = value
md = tableToMarkdown("Scans List:", readable_output)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': items,
'EntryContext': {'Redlock.Scans(val.id == obj.id)': items},
'HumanReadable': md
})
def redlock_get_scan_status():
scan_id = demisto.args().get('scan_id', None)
response = req('GET', f'iac/v2/scans/{scan_id}/status', param_data={}, data={})
if (
not response
or 'data' not in response
):
demisto.results('No results found')
else:
result = response.get('data', {})
id = result.get('id')
status = result.get('attributes', {}).get('status')
readable_output = {
"ID": id,
"Status": status
}
result = {
'id': id,
'status': status
}
md = tableToMarkdown("Scan Status:", readable_output)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': result,
'EntryContext': {'Redlock.Scans(val.id == obj.id)': result},
'HumanReadable': md
})
def redlock_get_scan_results():
scan_id = demisto.args().get('scan_id', None)
response = req('GET', f'iac/v2/scans/{scan_id}/results', param_data={}, data={})
if (
not response
or 'data' not in response
or not isinstance(response.get('data'), list)
):
demisto.results('No results found')
else:
items = response.get('data', [])
readable_output = []
for item in items:
id = item.get('id')
attributes = item.get('attributes', {})
readable_output.append({
"ID": id,
"Name": attributes.get('name'),
"Policy ID": attributes.get('policyId'),
"Description": attributes.get('desc'),
"Severity": attributes.get('severity')
})
results = {
"id": scan_id,
"results": items
}
md = tableToMarkdown("Scan Results:", readable_output)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': results,
'EntryContext': {'Redlock.Scans(val.id == obj.id)': results},
'HumanReadable': md
})
def fetch_incidents():
now = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds() * 1000)
last_run = demisto.getLastRun().get('time')
if not last_run:
last_run = now - parse_date_range(demisto.params().get('fetch_time', '3 days').strip(), to_timestamp=True)[0]
payload = {'timeRange': {
'type': 'absolute',
'value': {
'startTime': last_run,
'endTime': now
}
}, 'filters': [{'name': 'alert.status', 'operator': '=', 'value': 'open'}]}
if demisto.getParam('ruleName'):
payload['filters'].append({'name': 'alertRule.name', 'operator': '=',
'value': demisto.getParam('ruleName')})
if demisto.getParam('policySeverity'):
payload['filters'].append({'name': 'policy.severity', 'operator': '=',
'value': demisto.getParam('policySeverity')})
if demisto.getParam('policyName'):
payload['filters'].append({'name': 'policy.name', 'operator': '=',
'value': demisto.getParam('policyName')})
demisto.info("Executing Prisma Cloud (RedLock) fetch_incidents with payload: {}".format(payload))
response = req('POST', 'alert', payload, {'detailed': 'true'})
incidents = []
for alert in response:
incidents.append({
'name': alert.get('policy.name', 'No policy') + ' - ' + alert.get('id'),
'occurred': convert_unix_to_demisto(alert.get('alertTime')),
'severity': translate_severity(alert),
'rawJSON': json.dumps(alert)
})
return incidents, now
def main():
global URL, VERIFY
handle_proxy()
params = demisto.params()
URL = params.get('url')
if URL[-1] != '/':
URL += '/'
VERIFY = not params.get('unsecure', False)
try:
command = demisto.command()
if command == 'test-module':
get_token()
return_results('ok')
elif command == 'redlock-search-alerts':
search_alerts()
elif command == 'redlock-list-alert-filters':
list_filters()
elif command == 'redlock-get-alert-details':
get_alert_details()
elif command == 'redlock-dismiss-alerts':
dismiss_alerts()
elif command == 'redlock-reopen-alerts':
reopen_alerts()
elif command == 'redlock-get-remediation-details':
get_remediation_details()
elif command == 'redlock-get-rql-response':
get_rql_response(demisto.args())
elif command == 'redlock-search-config':
redlock_search_config()
elif command == 'redlock-list-scans':
redlock_list_scans()
elif command == 'redlock-get-scan-status':
redlock_get_scan_status()
elif command == 'redlock-get-scan-results':
redlock_get_scan_results()
elif command == 'fetch-incidents':
incidents, new_run = fetch_incidents()
demisto.incidents(incidents)
demisto.setLastRun({'time': new_run})
else:
raise Exception('Unrecognized command: ' + command)
except Exception as err:
demisto.error(traceback.format_exc())
return_error(str(err))
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| true | true |
1c2b60828b8f0fee941b317732028ca4b322668c | 3,143 | py | Python | switchboard/base.py | frankban/switchboard | 9982b36308273b5157701fd6b984238add44a047 | [
"Apache-2.0"
] | null | null | null | switchboard/base.py | frankban/switchboard | 9982b36308273b5157701fd6b984238add44a047 | [
"Apache-2.0"
] | null | null | null | switchboard/base.py | frankban/switchboard | 9982b36308273b5157701fd6b984238add44a047 | [
"Apache-2.0"
] | null | null | null | """
switchboard.base
~~~~~~~~~~~~~~~
:copyright: (c) 2015 Kyle Adams.
:license: Apache License 2.0, see LICENSE for more details.
"""
import threading
class ModelDict(threading.local):
"""
Dictionary-style access to :func:`~switchboard.model.Model` data.
If ``auto_create=True`` accessing modeldict[key] when key does not exist
will attempt to create it in the datastore.
Functions in two different ways, depending on the constructor:
# Assume the datastore has a record like so:
# { key: '000-abc', 'name': 'Jim', 'phone': '1235677890' }
mydict = ModelDict(Model)
mydict['000-abc']
>>> Model({ 'key': '000-abc', 'name': 'Jim', 'phone': '1234567890' }) #doctest: +SKIP
If you want to use another key besides ``key``, you may specify that in the
constructor:
mydict = ModelDict(Model, key='phone')
mydict['1234567890']
>>> Model({ 'key': '000-abc', 'name': 'Jim', 'phone': '1234567890' }) #doctest: +SKIP
The ModelDict needs to be thread local so that information is not shared
across threads, e.g., requests.
"""
def __init__(self, model, key='key', auto_create=False, *args, **kwargs):
self._key = key
self._model = model
self._auto_create = auto_create
def __getitem__(self, key):
if self._auto_create:
instance = self._model.get_or_create(key)[0]
else:
instance = self._model.get(key)
if instance is None:
raise KeyError(key)
return instance
def __setitem__(self, key, instance):
if not hasattr(instance, 'key'):
instance.key = key
instance.save()
def __delitem__(self, key):
self._model.remove(key)
def __len__(self): # pragma: nocover
return self._model.count()
def __contains__(self, key): # pragma: nocover
return self._model.contains(key)
def __iter__(self):
return self.iterkeys()
def __repr__(self): # pragma: nocover
return "<%s>" % (self.__class__.__name__)
def iteritems(self):
def make_item(model):
return (getattr(model, self._key), model)
items = [make_item(model) for model in self._model.all()]
return iter(items)
def itervalues(self):
return iter(self._model.all())
def iterkeys(self):
return iter([getattr(model, self._key) for model in self._model.all()])
def keys(self): # pragma: nocover
return list(self.iterkeys())
def values(self): # pragma: nocover
return list(self.itervalues())
def items(self): # pragma: nocover
return list(self.iteritems())
def get(self, key, default=None):
try:
value = self[key]
except KeyError:
value = default
return value
def pop(self, key, default=None):
value = self.get(key, default)
try:
del self[key]
except KeyError:
pass
return value
def setdefault(self, key, instance):
self._model.get_or_create(key, defaults=instance.__dict__)
| 28.572727 | 93 | 0.602291 |
import threading
class ModelDict(threading.local):
def __init__(self, model, key='key', auto_create=False, *args, **kwargs):
self._key = key
self._model = model
self._auto_create = auto_create
def __getitem__(self, key):
if self._auto_create:
instance = self._model.get_or_create(key)[0]
else:
instance = self._model.get(key)
if instance is None:
raise KeyError(key)
return instance
def __setitem__(self, key, instance):
if not hasattr(instance, 'key'):
instance.key = key
instance.save()
def __delitem__(self, key):
self._model.remove(key)
def __len__(self):
return self._model.count()
def __contains__(self, key):
return self._model.contains(key)
def __iter__(self):
return self.iterkeys()
def __repr__(self):
return "<%s>" % (self.__class__.__name__)
def iteritems(self):
def make_item(model):
return (getattr(model, self._key), model)
items = [make_item(model) for model in self._model.all()]
return iter(items)
def itervalues(self):
return iter(self._model.all())
def iterkeys(self):
return iter([getattr(model, self._key) for model in self._model.all()])
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def items(self):
return list(self.iteritems())
def get(self, key, default=None):
try:
value = self[key]
except KeyError:
value = default
return value
def pop(self, key, default=None):
value = self.get(key, default)
try:
del self[key]
except KeyError:
pass
return value
def setdefault(self, key, instance):
self._model.get_or_create(key, defaults=instance.__dict__)
| true | true |
1c2b62e2140ca22c2a53fafc8a0aaf485f25b2c5 | 170 | py | Python | tests/models.py | Apkawa/django-archive-field | a2d7f7550a3a3c676b6343a511f25e676b360ba3 | [
"MIT"
] | null | null | null | tests/models.py | Apkawa/django-archive-field | a2d7f7550a3a3c676b6343a511f25e676b360ba3 | [
"MIT"
] | 1 | 2019-12-17T13:06:07.000Z | 2019-12-17T13:06:07.000Z | tests/models.py | Apkawa/django-archive-field | a2d7f7550a3a3c676b6343a511f25e676b360ba3 | [
"MIT"
] | null | null | null | from django.db import models
from archive_field.fields import ArchiveFileField
class TestModel(models.Model):
archive = ArchiveFileField(upload_to='test_archive')
| 21.25 | 56 | 0.811765 | from django.db import models
from archive_field.fields import ArchiveFileField
class TestModel(models.Model):
archive = ArchiveFileField(upload_to='test_archive')
| true | true |
1c2b62eb69141adfc572ee53f5dc30246bc76465 | 1,141 | py | Python | tests/builtins/test_sorted.py | SouravJohar/voc | 82d1d03dff8619dc04cddd0e7fdfeb712f82363a | [
"BSD-3-Clause"
] | 1 | 2018-10-04T21:46:37.000Z | 2018-10-04T21:46:37.000Z | tests/builtins/test_sorted.py | SouravJohar/voc | 82d1d03dff8619dc04cddd0e7fdfeb712f82363a | [
"BSD-3-Clause"
] | null | null | null | tests/builtins/test_sorted.py | SouravJohar/voc | 82d1d03dff8619dc04cddd0e7fdfeb712f82363a | [
"BSD-3-Clause"
] | 1 | 2020-06-16T17:07:25.000Z | 2020-06-16T17:07:25.000Z | from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class SortedTests(TranspileTestCase):
def test_minimal(self):
self.assertCodeExecution("""
samples = [
([1, 5, 3, 2, 4, 9, 12], None),
(["foo", "bar"], None),
(["foo", "bar"], "invalid"),
(["one", "two", "three", "four"], len),
([(1, 2), (5, 6), (3, 4)], None),
([(1, 2), (3, 4), (5, 6, 7)], len),
]
for seq, key in samples:
try:
print('Sample:', seq)
print('Sorted:', sorted(seq, key=key))
print('Reverse sorted:', sorted(seq, key=key, reverse=True))
except Exception as e:
print(e)
""", run_in_function=False)
class BuiltinSortedFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["sorted"]
not_implemented = [
'test_bytearray',
'test_bytes',
'test_class',
'test_dict',
'test_frozenset',
'test_str',
'test_set',
]
| 30.837838 | 80 | 0.467134 | from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class SortedTests(TranspileTestCase):
def test_minimal(self):
self.assertCodeExecution("""
samples = [
([1, 5, 3, 2, 4, 9, 12], None),
(["foo", "bar"], None),
(["foo", "bar"], "invalid"),
(["one", "two", "three", "four"], len),
([(1, 2), (5, 6), (3, 4)], None),
([(1, 2), (3, 4), (5, 6, 7)], len),
]
for seq, key in samples:
try:
print('Sample:', seq)
print('Sorted:', sorted(seq, key=key))
print('Reverse sorted:', sorted(seq, key=key, reverse=True))
except Exception as e:
print(e)
""", run_in_function=False)
class BuiltinSortedFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["sorted"]
not_implemented = [
'test_bytearray',
'test_bytes',
'test_class',
'test_dict',
'test_frozenset',
'test_str',
'test_set',
]
| true | true |
1c2b63a2b5ee33eb07f1f7ba3563a844177a3538 | 5,768 | py | Python | src/cpu/StaticInstFlags.py | He-Liu-ooo/Computer-Architecture-THUEE-2022-spring- | 9d36aaacbc7eea357608524113bec97bae2ea229 | [
"BSD-3-Clause"
] | 4 | 2020-12-25T03:12:00.000Z | 2022-01-07T03:35:35.000Z | src/cpu/StaticInstFlags.py | He-Liu-ooo/Computer-Architecture-THUEE-2022-spring- | 9d36aaacbc7eea357608524113bec97bae2ea229 | [
"BSD-3-Clause"
] | 2 | 2020-09-09T15:42:46.000Z | 2020-10-22T20:45:04.000Z | src/cpu/StaticInstFlags.py | He-Liu-ooo/Computer-Architecture-THUEE-2022-spring- | 9d36aaacbc7eea357608524113bec97bae2ea229 | [
"BSD-3-Clause"
] | 3 | 2020-04-27T06:22:06.000Z | 2021-04-15T10:12:33.000Z | # Copyright (c) 2020 ARM Limited
# Copyright (c) 2003-2005 The Regents of The University of Michigan
# Copyright (c) 2013 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.params import *
# Set of boolean static instruction properties.
#
# Notes:
# - The IsInteger and IsFloating flags are based on the class of registers
# accessed by the instruction. Although most instructions will have exactly
# one of these two flags set, it is possible for an instruction to have
# neither (e.g., direct unconditional branches, memory barriers) or both
# (e.g., an FP/int conversion).
# - If IsMemRef is set, then exactly one of IsLoad or IsStore will be set.
# - If IsControl is set, then exactly one of IsDirectControl or IsIndirect
# Control will be set, and exactly one of IsCondControl or IsUncondControl
# will be set.
# - IsSerializing, IsMemBarrier, and IsWriteBarrier are implemented as flags
# since in the current model there's no other way for instructions to inject
# behavior into the pipeline outside of fetch. Once we go to an exec-in-exec
# CPU model we should be able to get rid of these flags and implement this
# behavior via the execute() methods.
class StaticInstFlags(Enum):
wrapper_name = 'StaticInstFlags'
wrapper_is_struct = True
enum_name = 'Flags'
vals = [
'IsNop', # Is a no-op (no effect at all).
'IsInteger', # References integer regs.
'IsFloating', # References FP regs.
'IsCC', # References CC regs.
'IsVector', # References Vector regs.
'IsVectorElem', # References Vector reg elems.
'IsMemRef', # References memory (load, store, or prefetch)
'IsLoad', # Reads from memory (load or prefetch).
'IsStore', # Writes to memory.
'IsAtomic', # Does atomic RMW to memory.
'IsStoreConditional', # Store conditional instruction.
'IsIndexed', # Accesses memory with an indexed address
# computation
'IsInstPrefetch', # Instruction-cache prefetch.
'IsDataPrefetch', # Data-cache prefetch.
'IsControl', # Control transfer instruction.
'IsDirectControl', # PC relative control transfer.
'IsIndirectControl',# Register indirect control transfer.
'IsCondControl', # Conditional control transfer.
'IsUncondControl', # Unconditional control transfer.
'IsCall', # Subroutine call.
'IsReturn', # Subroutine return.
'IsCondDelaySlot', # Conditional Delay-Slot Instruction
'IsThreadSync', # Thread synchronization operation.
'IsSerializing', # Serializes pipeline: won't execute until all
# older instructions have committed.
'IsSerializeBefore',
'IsSerializeAfter',
'IsMemBarrier', # Is a memory barrier
'IsWriteBarrier', # Is a write barrier
'IsReadBarrier', # Is a read barrier
'IsERET', # <- Causes the IFU to stall (MIPS ISA)
'IsNonSpeculative', # Should not be executed speculatively
'IsQuiesce', # Is a quiesce instruction
'IsIprAccess', # Accesses IPRs
'IsUnverifiable', # Can't be verified by a checker
'IsSyscall', # Causes a system call to be emulated in syscall
# emulation mode.
# Flags for microcode
'IsMacroop', # Is a macroop containing microops
'IsMicroop', # Is a microop
'IsDelayedCommit', # This microop doesn't commit right away
'IsLastMicroop', # This microop ends a microop sequence
'IsFirstMicroop', # This microop begins a microop sequence
# This flag doesn't do anything yet
'IsMicroBranch', # This microop branches within the microcode for
# a macroop
'IsDspOp',
'IsSquashAfter', # Squash all uncommitted state after executed
# hardware transactional memory
'IsHtmStart', # Starts a HTM transaction
'IsHtmStop', # Stops (commits) a HTM transaction
'IsHtmCancel' # Explicitely aborts a HTM transaction
]
| 48.470588 | 77 | 0.67181 |
from m5.params import *
# behavior into the pipeline outside of fetch. Once we go to an exec-in-exec
# CPU model we should be able to get rid of these flags and implement this
# behavior via the execute() methods.
class StaticInstFlags(Enum):
wrapper_name = 'StaticInstFlags'
wrapper_is_struct = True
enum_name = 'Flags'
vals = [
'IsNop', # Is a no-op (no effect at all).
'IsInteger', # References integer regs.
'IsFloating', # References FP regs.
'IsCC', # References CC regs.
'IsVector', # References Vector regs.
'IsVectorElem', # References Vector reg elems.
'IsMemRef', # References memory (load, store, or prefetch)
'IsLoad', # Reads from memory (load or prefetch).
'IsStore', # Writes to memory.
'IsAtomic', # Does atomic RMW to memory.
'IsStoreConditional', # Store conditional instruction.
'IsIndexed', # Accesses memory with an indexed address
# computation
'IsInstPrefetch', # Instruction-cache prefetch.
'IsDataPrefetch', # Data-cache prefetch.
'IsControl', # Control transfer instruction.
'IsDirectControl', # PC relative control transfer.
'IsIndirectControl',# Register indirect control transfer.
'IsCondControl', # Conditional control transfer.
'IsUncondControl', # Unconditional control transfer.
'IsCall', # Subroutine call.
'IsReturn', # Subroutine return.
'IsCondDelaySlot', # Conditional Delay-Slot Instruction
'IsThreadSync', # Thread synchronization operation.
'IsSerializing', # Serializes pipeline: won't execute until all
'IsSerializeBefore',
'IsSerializeAfter',
'IsMemBarrier',
'IsWriteBarrier',
'IsReadBarrier',
'IsERET',
'IsNonSpeculative',
'IsQuiesce',
'IsIprAccess',
'IsUnverifiable',
'IsSyscall', # Causes a system call to be emulated in syscall
# emulation mode.
# Flags for microcode
'IsMacroop', # Is a macroop containing microops
'IsMicroop', # Is a microop
'IsDelayedCommit', # This microop doesn't commit right away
'IsLastMicroop',
'IsFirstMicroop',
'IsMicroBranch', # This microop branches within the microcode for
# a macroop
'IsDspOp',
'IsSquashAfter', # Squash all uncommitted state after executed
# hardware transactional memory
'IsHtmStart', # Starts a HTM transaction
'IsHtmStop', # Stops (commits) a HTM transaction
'IsHtmCancel' # Explicitely aborts a HTM transaction
]
| true | true |
1c2b644bea2ff45a9fe96975432add11f8e84e34 | 4,955 | py | Python | 2016/08_Two-FactorAuthentication/test_display.py | deanearlwright/AdventOfCode | ca4cf6315c0efa38bd7748fb6f4bc99e7934871d | [
"MIT"
] | 1 | 2021-01-03T23:09:28.000Z | 2021-01-03T23:09:28.000Z | 2016/08_Two-FactorAuthentication/test_display.py | deanearlwright/AdventOfCode | ca4cf6315c0efa38bd7748fb6f4bc99e7934871d | [
"MIT"
] | 6 | 2020-12-26T21:02:42.000Z | 2020-12-26T21:02:52.000Z | 2016/08_Two-FactorAuthentication/test_display.py | deanearlwright/AdventOfCode | ca4cf6315c0efa38bd7748fb6f4bc99e7934871d | [
"MIT"
] | null | null | null | # ======================================================================
# Two-Factor Authentication
# Advent of Code 2016 Day 08 -- Eric Wastl -- https://adventofcode.com
#
# Python implementation by Dr. Dean Earl Wright III
# ======================================================================
# ======================================================================
# t e s t _ d i s p l a y . p y
# ======================================================================
"Test solver for Advent of Code 2016 day 08, Two-Factor Authentication"
# ----------------------------------------------------------------------
# import
# ----------------------------------------------------------------------
import unittest
import aoc_08
import display
# ----------------------------------------------------------------------
# constants
# ----------------------------------------------------------------------
EXAMPLE_TEXT = """
rect 3x2
rotate column x=1 by 1
rotate row y=0 by 4
rotate column x=1 by 1
"""
DISPLAY_START = """.......
.......
......."""
DISPLAY_ONE = """###....
###....
......."""
DISPLAY_TWO = """#.#....
###....
.#....."""
DISPLAY_THREE = """....#.#
###....
.#....."""
DISPLAY_FOUR = """.#..#.#
#.#....
.#....."""
PART_ONE_TEXT = EXAMPLE_TEXT
PART_TWO_TEXT = EXAMPLE_TEXT
PART_ONE_RESULT = 6
PART_TWO_RESULT = DISPLAY_FOUR
# ======================================================================
# TestDisplay
# ======================================================================
class TestDisplay(unittest.TestCase): # pylint: disable=R0904
"Test Display object"
def test_empty_init(self):
"Test the default Display creation"
# 1. Create default Display object
myobj = display.Display()
# 2. Make sure it has the default values
self.assertEqual(myobj.part2, False)
self.assertEqual(myobj.text, None)
self.assertEqual(myobj.tall, 6)
self.assertEqual(myobj.wide, 50)
self.assertEqual(len(myobj.pixels), 6)
self.assertEqual(myobj.inst, 0)
def test_text_init(self):
"Test the Display object creation from text"
# 1. Create Display object from text
myobj = display.Display(text=aoc_08.from_text(EXAMPLE_TEXT), wide=7, tall=3)
# 2. Make sure it has the expected values
self.assertEqual(myobj.part2, False)
self.assertEqual(len(myobj.text), 4)
self.assertEqual(myobj.tall, 3)
self.assertEqual(myobj.wide, 7)
self.assertEqual(len(myobj.pixels), 3)
self.assertEqual(myobj.inst, 0)
# 3. Check methods
self.assertEqual(myobj.lit(), 0)
self.assertEqual(str(myobj), DISPLAY_START)
self.assertEqual(myobj.one_inst(), True)
self.assertEqual(myobj.inst, 1)
self.assertEqual(str(myobj), DISPLAY_ONE)
self.assertEqual(myobj.lit(), 6)
self.assertEqual(myobj.one_inst(), True)
self.assertEqual(myobj.inst, 2)
self.assertEqual(str(myobj), DISPLAY_TWO)
self.assertEqual(myobj.lit(), 6)
self.assertEqual(myobj.one_inst(), True)
self.assertEqual(myobj.inst, 3)
self.assertEqual(str(myobj), DISPLAY_THREE)
self.assertEqual(myobj.lit(), 6)
self.assertEqual(myobj.one_inst(), True)
self.assertEqual(myobj.inst, 4)
self.assertEqual(str(myobj), DISPLAY_FOUR)
self.assertEqual(myobj.lit(), 6)
self.assertEqual(myobj.one_inst(), False)
self.assertEqual(myobj.inst, 4)
self.assertEqual(str(myobj), DISPLAY_FOUR)
self.assertEqual(myobj.lit(), 6)
def test_part_one(self):
"Test part one example of Display object"
# 1. Create Display object from text
myobj = display.Display(text=aoc_08.from_text(PART_ONE_TEXT), wide=7, tall=3)
# 2. Check the part one result
self.assertEqual(myobj.part_one(verbose=False), PART_ONE_RESULT)
def test_part_two(self):
"Test part two example of Display object"
# 1. Create Display object from text
myobj = display.Display(part2=True, text=aoc_08.from_text(PART_TWO_TEXT), wide=7, tall=3)
# 2. Check the part two result
self.assertEqual(myobj.part_two(verbose=False), PART_TWO_RESULT)
# ----------------------------------------------------------------------
# module initialization
# ----------------------------------------------------------------------
if __name__ == '__main__':
pass
# ======================================================================
# end t e s t _ d i s p l a y . p y end
# ======================================================================
| 34.172414 | 97 | 0.47003 |
import unittest
import aoc_08
import display
EXAMPLE_TEXT = """
rect 3x2
rotate column x=1 by 1
rotate row y=0 by 4
rotate column x=1 by 1
"""
DISPLAY_START = """.......
.......
......."""
DISPLAY_ONE = """###....
###....
......."""
DISPLAY_TWO = """#.#....
###....
.#....."""
DISPLAY_THREE = """....#.#
###....
.#....."""
DISPLAY_FOUR = """.#..#.#
#.#....
.#....."""
PART_ONE_TEXT = EXAMPLE_TEXT
PART_TWO_TEXT = EXAMPLE_TEXT
PART_ONE_RESULT = 6
PART_TWO_RESULT = DISPLAY_FOUR
class TestDisplay(unittest.TestCase):
def test_empty_init(self):
myobj = display.Display()
self.assertEqual(myobj.part2, False)
self.assertEqual(myobj.text, None)
self.assertEqual(myobj.tall, 6)
self.assertEqual(myobj.wide, 50)
self.assertEqual(len(myobj.pixels), 6)
self.assertEqual(myobj.inst, 0)
def test_text_init(self):
myobj = display.Display(text=aoc_08.from_text(EXAMPLE_TEXT), wide=7, tall=3)
self.assertEqual(myobj.part2, False)
self.assertEqual(len(myobj.text), 4)
self.assertEqual(myobj.tall, 3)
self.assertEqual(myobj.wide, 7)
self.assertEqual(len(myobj.pixels), 3)
self.assertEqual(myobj.inst, 0)
self.assertEqual(myobj.lit(), 0)
self.assertEqual(str(myobj), DISPLAY_START)
self.assertEqual(myobj.one_inst(), True)
self.assertEqual(myobj.inst, 1)
self.assertEqual(str(myobj), DISPLAY_ONE)
self.assertEqual(myobj.lit(), 6)
self.assertEqual(myobj.one_inst(), True)
self.assertEqual(myobj.inst, 2)
self.assertEqual(str(myobj), DISPLAY_TWO)
self.assertEqual(myobj.lit(), 6)
self.assertEqual(myobj.one_inst(), True)
self.assertEqual(myobj.inst, 3)
self.assertEqual(str(myobj), DISPLAY_THREE)
self.assertEqual(myobj.lit(), 6)
self.assertEqual(myobj.one_inst(), True)
self.assertEqual(myobj.inst, 4)
self.assertEqual(str(myobj), DISPLAY_FOUR)
self.assertEqual(myobj.lit(), 6)
self.assertEqual(myobj.one_inst(), False)
self.assertEqual(myobj.inst, 4)
self.assertEqual(str(myobj), DISPLAY_FOUR)
self.assertEqual(myobj.lit(), 6)
def test_part_one(self):
myobj = display.Display(text=aoc_08.from_text(PART_ONE_TEXT), wide=7, tall=3)
self.assertEqual(myobj.part_one(verbose=False), PART_ONE_RESULT)
def test_part_two(self):
myobj = display.Display(part2=True, text=aoc_08.from_text(PART_TWO_TEXT), wide=7, tall=3)
self.assertEqual(myobj.part_two(verbose=False), PART_TWO_RESULT)
if __name__ == '__main__':
pass
| true | true |
1c2b653ecaad8665de7e54918f181253274a3a9c | 18,704 | py | Python | apper/apper/Fusion360AppEvents.py | WilkoV/Fusion360_ExportIt | ab32bcb8003aed9a9a5b29ae66a326db44d04df6 | [
"MIT"
] | 6 | 2020-09-20T01:01:16.000Z | 2022-03-30T11:35:24.000Z | apper/apper/Fusion360AppEvents.py | WilkoV/Fusion360_ExportIt | ab32bcb8003aed9a9a5b29ae66a326db44d04df6 | [
"MIT"
] | 23 | 2020-06-05T16:30:11.000Z | 2022-01-11T06:48:10.000Z | apper/apper/Fusion360AppEvents.py | WilkoV/Fusion360_ExportIt | ab32bcb8003aed9a9a5b29ae66a326db44d04df6 | [
"MIT"
] | null | null | null | """
Fusion360AppEvents.py
=====================
Python module for creating Fusion 360 event handlers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2019 by Patrick Rainsberry.
:license: Apache 2.0, see LICENSE for more details.
"""
import adsk.core
import adsk.fusion
import adsk.cam
import traceback
import threading
import json
handlers = []
# The class for the new thread.
class Fusion360CustomThread:
"""Creates a new Custom Event handler and a new thread
Args:
event_id: Unique id, can be used by other functions to trigger the event
"""
def __init__(self, event_id, auto_start=True):
self.event_id = event_id
self.thread = None
self.fusion_app = None
app = adsk.core.Application.get()
ui = app.userInterface
try:
# Register the custom event and connect the handler.
app.unregisterCustomEvent(event_id)
custom_event = app.registerCustomEvent(event_id)
on_thread_event = _CustomThreadEventHandler(self.custom_event_received)
custom_event.add(on_thread_event)
handlers.append(on_thread_event)
# create and start the new thread
self.stop_flag = threading.Event()
self.thread = _FusionThread(self.event_id, self.run_in_thread, self.stop_flag)
self.thread.daemon = True
if auto_start:
self.thread.start()
except Exception as e:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
def custom_event_received(self, event_dict):
"""Function that will run when event is triggered
Args:
event_dict: Argument passed to event. Decoded JSON as a dict
"""
pass
def run_in_thread(self, thread, event_id, input_data=None):
"""Function to run in new thread
Args:
thread: Reference to thread that function is running in
event_id: reference to an event id, not necessarily relevant in this case
input_data: Optional parameter to pass extra data to the thread
"""
pass
def fire_event(self, args: dict):
app = adsk.core.Application.get()
app.fireCustomEvent(self.event_id, json.dumps(args))
def start_thread(self):
if not self.stop_flag:
pass
self.thread.start()
def restart_thread(self):
self.stop_flag.set()
self.stop_flag = threading.Event()
self.thread = _FusionThread(self.event_id, self.run_in_thread, self.stop_flag)
self.thread.daemon = True
self.thread.start()
def on_stop(self):
"""Function is run when the addin stops.
Clean up. If overridden ensure to execute with super().on_stop()
"""
app = adsk.core.Application.get()
app.unregisterCustomEvent(self.event_id)
self.stop_flag.set()
class _CustomThreadEventHandler(adsk.core.CustomEventHandler):
def __init__(self, receiver_function):
self.receiver_function = receiver_function
super().__init__()
def notify(self, args):
"""Method overwritten on parent class that will be executed when the event fires
Args:
args: event arguments
"""
app = adsk.core.Application.get()
ui = adsk.core.UserInterface.cast(app.userInterface)
try:
# Make sure a command isn't running before changes are made.
if ui.activeCommand != 'SelectCommand':
ui.commandDefinitions.itemById('SelectCommand').execute()
# Get the value from the JSON data passed through the event.
event_dict = json.loads(args.additionalInfo)
self.receiver_function(event_dict)
except:
ui.messageBox('Thread Handler Failed:\n{}'.format(traceback.format_exc()))
class _FusionThread(threading.Thread):
def __init__(self, event_id, run_in_thread, stop_event, input_data=None):
"""Starts a new thread and runs the given function in it
Args:
event_id: Unique id, can be used by other functions to trigger the event
run_in_thread: Function to run in new thread
input_data: Optional parameter to pass extra data to the thread
"""
threading.Thread.__init__(self)
self.stopped = stop_event
self.event_id = event_id
self.run_function = run_in_thread
self.input_data = input_data
def run(self):
"""Method overwritten on parent class that will be executed when the thread executes
"""
self.run_function(self, self.event_id, self.input_data)
class Fusion360NewThread:
"""Starts a new thread and runs the given function in it
Args:
event_id: Unique id, can be used by other functions to trigger the event
input_data: Optional parameter to pass extra data to the thread
"""
def __init__(self, event_id, input_data=None):
self.event_id = event_id
self.thread = None
self.fusion_app = None
self.input_data = input_data
try:
# create and start the new thread
self.stop_flag = threading.Event()
self.thread = _FusionThread(self.event_id, self.run_in_thread, self.stop_flag, self.input_data)
self.thread.daemon = True
self.thread.start()
except Exception as e:
app = adsk.core.Application.get()
ui = app.userInterface
ui.messageBox('Failed Crating New Thread:\n{}'.format(traceback.format_exc()))
def run_in_thread(self, thread, event_id, input_data=None):
"""Function to run in new thread
Args:
thread: Reference to thread that function is running in
event_id: reference to an event id, not necessarily relevant in this case
input_data: Optional parameter to pass extra data to the thread
"""
pass
def stop_thread(self):
"""Function is run to stop thread.
Clean up. If overridden ensure to execute with super().on_stop()
"""
self.stop_flag.set()
class Fusion360CustomEvent:
"""Creates a new Custom Event handler
Args:
event_id: Unique id, can be used by other functions to trigger the event
"""
def __init__(self, event_id):
self.event_id = event_id
self.fusion_app = None
app = adsk.core.Application.get()
ui = app.userInterface
try:
# Register the custom event and connect the handler.
app.unregisterCustomEvent(event_id)
custom_event = app.registerCustomEvent(event_id)
on_custom_event = _CustomThreadEventHandler(self.custom_event_received)
custom_event.add(on_custom_event)
handlers.append(on_custom_event)
except Exception as e:
ui.messageBox('Failed creating custom event:\n{}'.format(traceback.format_exc()))
def custom_event_received(self, event_dict: dict):
"""Function that will run when event is triggered
Args:
event_dict: Argument passed to event. Decoded JSON as a dict
"""
pass
def on_stop(self):
"""Function is run when the addin stops.
Clean up. If overridden ensure to execute with super().on_stop()
"""
app = adsk.core.Application.get()
app.unregisterCustomEvent(self.event_id)
# The class for the new thread.
class Fusion360DocumentEvent:
"""Creates a new Document Event handler
Args:
event_id: Unique id, can be used by other functions to trigger the event
event_type: Any document event in the current application
"""
def __init__(self, event_id: str, event_type):
self.event_id = event_id
self.fusion_app = None
self.event_type = event_type
self.document_handler = _DocumentHandler(self.document_event_received)
event_type.add(self.document_handler)
handlers.append(self.document_handler)
def document_event_received(self, event_args, document):
"""
Args:
event_args:
document:
"""
pass
def on_stop(self):
"""Function is run when the addin stops.
Clean up. If overridden ensure to execute with super().on_stop()
"""
self.event_type.remove(self.document_handler)
class Fusion360WorkspaceEvent:
def __init__(self, event_id, event_type):
"""Creates a new Workspace Event handler
Args:
event_id:
event_type:
"""
self.event_id = event_id
self.fusion_app = None
self.event_type = event_type
self.workspace_handler = _WorkspaceHandler(self.workspace_event_received)
event_type.add(self.workspace_handler)
handlers.append(self.workspace_handler)
def workspace_event_received(self, event_args, workspace):
"""
Args:
event_args:
workspace:
"""
pass
def on_stop(self):
"""Function is run when the addin stops.
Clean up. If overridden ensure to execute with super().on_stop()
"""
self.event_type.remove(self.workspace_handler)
# Event handler for the documentActivated event.
class _DocumentHandler(adsk.core.DocumentEventHandler):
def __init__(self, document_event_received):
self.document_function = document_event_received
super().__init__()
def notify(self, args):
"""Method overwritten on parent class that will be executed when the event fires
Args:
args: event arguments
"""
try:
event_args = adsk.core.DocumentEventArgs.cast(args)
document = event_args.document
self.document_function(event_args, document)
except:
app = adsk.core.Application.cast(adsk.core.Application.get())
ui = app.userInterface
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
class _WorkspaceHandler(adsk.core.WorkspaceEventHandler):
def __init__(self, workspace_event_received):
super().__init__()
self.workspace_function = workspace_event_received
def notify(self, args):
"""Method overwritten on parent class that will be executed when the event fires
Args:
args: event arguments
"""
try:
event_args = adsk.core.WorkspaceEventArgs.cast(args)
workspace = event_args.workspace
self.workspace_function(event_args, workspace)
except:
app = adsk.core.Application.cast(adsk.core.Application.get())
ui = app.userInterface
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
# Event handler for the workspaceActivated event.
class _WebRequestHandler(adsk.core.WebRequestEventHandler):
def __init__(self, web_request_event_received):
super().__init__()
self.web_request_function = web_request_event_received
def notify(self, args):
"""Method overwritten on parent class that will be executed when the event fires
Args:
args: event arguments
"""
try:
event_args = adsk.core.WebRequestEventArgs.cast(args)
file = event_args.file
fusion_id = event_args.id
occurrence_or_document = event_args.occurrenceOrDocument
private_info = event_args.privateInfo
properties = event_args.properties
# TODO implement error checking and type checks here. Was getting weird errors.
# if len(event_args.privateInfo) > 1:
# try:
# private_info = json.loads(event_args.privateInfo)
# except:
# private_info = ""
# if len(event_args.properties) > 1:
# try:
# properties = json.loads(event_args.properties)
# except:
# properties = ""
self.web_request_function(event_args, file, fusion_id, occurrence_or_document, private_info, properties)
except:
app = adsk.core.Application.cast(adsk.core.Application.get())
ui = app.userInterface
ui.messageBox('Failed to load data in event handler:\n{}'.format(traceback.format_exc()))
class Fusion360WebRequestEvent:
"""Create a new Web Request Event action
Args:
event_id: A unique id for this event
event_type: One of: [Application.insertedFromURL, Application.insertingFromURL, Application.openedFromURL, Application.openingFromURL]
"""
def __init__(self, event_id: str, event_type):
self.event_id = event_id
self.fusion_app = None
self.event_type = event_type
self.web_request_handler = _WebRequestHandler(self.web_request_event_received)
event_type.add(self.web_request_handler)
handlers.append(self.web_request_handler)
def web_request_event_received(self, event_args, file, fusion_id, occurrence_or_document, private_info, properties):
"""This function will be executed in response to the command event
Args:
properties: design properties passed with the file (Partnumber Number, Description, Name)
private_info: Extra info passed as json object
fusion_id: A unique identifier to help determine whether the component is new or an instance
occurrence_or_document: If opened, then it is a new document. If it was inserted, it is the created occurence
file: Path to the file that was just received
event_args: adsk.core.WebRequestEventArgs
"""
pass
def on_stop(self):
"""Function is run when the addin stops.
Clean up. If overridden ensure to execute with super().on_stop()
"""
self.event_type.remove(self.web_request_handler)
class _CommandEventHandler(adsk.core.ApplicationCommandEventHandler):
def __init__(self, command_function):
super().__init__()
self.command_function = command_function
def notify(self, args):
"""Method overwritten on parent class that will be executed when the event fires
Args:
args: adsk.core.ApplicationCommandEventArgs
"""
try:
event_args = adsk.core.ApplicationCommandEventArgs.cast(args)
command_id = event_args.commandId
command_definition = event_args.commandDefinition
self.command_function(event_args, command_id, command_definition)
except:
app = adsk.core.Application.cast(adsk.core.Application.get())
ui = app.userInterface
ui.messageBox('Failed to handle Command Event:\n{}'.format(traceback.format_exc()))
class Fusion360CommandEvent:
"""Create a new Command Event action
Args:
event_id: A unique id for this event
event_type: One of: [UserInterface.commandCreated, UserInterface.commandStarting, UserInterface.commandTerminated]
"""
def __init__(self, event_id, event_type):
self.event_id = event_id
self.fusion_app = None
self.event_type = event_type
self.command_handler = _CommandEventHandler(self.command_event_received)
event_type.add(self.command_handler)
handlers.append(self.command_handler)
def command_event_received(self, event_args, command_id, command_definition):
"""This function will be executed in response to the command event
Args:
command_definition: the command definition of the command that was just executed
command_id: the id of the command that was just executed
event_args: adsk.core.ApplicationCommandEventArgs
"""
pass
def on_stop(self):
"""Function is run when the addin stops.
Clean up. If overridden ensure to execute with super().on_stop()
"""
self.event_type.remove(self.command_handler)
class _ActiveSelectionEventHandler(adsk.core.ActiveSelectionEventHandler):
def __init__(self, command_function):
super().__init__()
self.command_function = command_function
def notify(self, args):
"""Method overwritten on parent class that will be executed when the event fires
Args:
args: adsk.core.ApplicationCommandEventArgs
"""
try:
event_args = adsk.core.ActiveSelectionEventArgs.cast(args)
current_selection: [adsk.core.Selection] = event_args.currentSelection
self.command_function(event_args, current_selection)
except:
app = adsk.core.Application.cast(adsk.core.Application.get())
ui = app.userInterface
ui.messageBox('Failed to handle Selection Event:\n{}'.format(traceback.format_exc()))
class Fusion360ActiveSelectionEvent:
"""Create a new Active Selection Event action
Args:
event_id: A unique id for this event
"""
def __init__(self, event_id, event_type):
app = adsk.core.Application.get()
ui = app.userInterface
self.event_id = event_id
self.fusion_app = None
self.command_handler = _ActiveSelectionEventHandler(self.selection_event_received)
self.event_type = event_type
self.event_type.add(self.command_handler)
handlers.append(self.command_handler)
def selection_event_received(self, event_args, current_selection):
"""This function will be executed in response to the command event
Args:
current_selection: An array of type adsk.core.Selection
event_args: adsk.core.ApplicationCommandEventArgs
"""
pass
def on_stop(self):
"""Function is run when the addin stops.
Clean up. If overridden ensure to execute with super().on_stop()
"""
self.event_type.remove(self.command_handler)
| 35.091932 | 147 | 0.626176 | import adsk.core
import adsk.fusion
import adsk.cam
import traceback
import threading
import json
handlers = []
class Fusion360CustomThread:
def __init__(self, event_id, auto_start=True):
self.event_id = event_id
self.thread = None
self.fusion_app = None
app = adsk.core.Application.get()
ui = app.userInterface
try:
app.unregisterCustomEvent(event_id)
custom_event = app.registerCustomEvent(event_id)
on_thread_event = _CustomThreadEventHandler(self.custom_event_received)
custom_event.add(on_thread_event)
handlers.append(on_thread_event)
self.stop_flag = threading.Event()
self.thread = _FusionThread(self.event_id, self.run_in_thread, self.stop_flag)
self.thread.daemon = True
if auto_start:
self.thread.start()
except Exception as e:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
def custom_event_received(self, event_dict):
pass
def run_in_thread(self, thread, event_id, input_data=None):
pass
def fire_event(self, args: dict):
app = adsk.core.Application.get()
app.fireCustomEvent(self.event_id, json.dumps(args))
def start_thread(self):
if not self.stop_flag:
pass
self.thread.start()
def restart_thread(self):
self.stop_flag.set()
self.stop_flag = threading.Event()
self.thread = _FusionThread(self.event_id, self.run_in_thread, self.stop_flag)
self.thread.daemon = True
self.thread.start()
def on_stop(self):
app = adsk.core.Application.get()
app.unregisterCustomEvent(self.event_id)
self.stop_flag.set()
class _CustomThreadEventHandler(adsk.core.CustomEventHandler):
def __init__(self, receiver_function):
self.receiver_function = receiver_function
super().__init__()
def notify(self, args):
app = adsk.core.Application.get()
ui = adsk.core.UserInterface.cast(app.userInterface)
try:
if ui.activeCommand != 'SelectCommand':
ui.commandDefinitions.itemById('SelectCommand').execute()
# Get the value from the JSON data passed through the event.
event_dict = json.loads(args.additionalInfo)
self.receiver_function(event_dict)
except:
ui.messageBox('Thread Handler Failed:\n{}'.format(traceback.format_exc()))
class _FusionThread(threading.Thread):
def __init__(self, event_id, run_in_thread, stop_event, input_data=None):
threading.Thread.__init__(self)
self.stopped = stop_event
self.event_id = event_id
self.run_function = run_in_thread
self.input_data = input_data
def run(self):
self.run_function(self, self.event_id, self.input_data)
class Fusion360NewThread:
def __init__(self, event_id, input_data=None):
self.event_id = event_id
self.thread = None
self.fusion_app = None
self.input_data = input_data
try:
# create and start the new thread
self.stop_flag = threading.Event()
self.thread = _FusionThread(self.event_id, self.run_in_thread, self.stop_flag, self.input_data)
self.thread.daemon = True
self.thread.start()
except Exception as e:
app = adsk.core.Application.get()
ui = app.userInterface
ui.messageBox('Failed Crating New Thread:\n{}'.format(traceback.format_exc()))
def run_in_thread(self, thread, event_id, input_data=None):
pass
def stop_thread(self):
self.stop_flag.set()
class Fusion360CustomEvent:
def __init__(self, event_id):
self.event_id = event_id
self.fusion_app = None
app = adsk.core.Application.get()
ui = app.userInterface
try:
# Register the custom event and connect the handler.
app.unregisterCustomEvent(event_id)
custom_event = app.registerCustomEvent(event_id)
on_custom_event = _CustomThreadEventHandler(self.custom_event_received)
custom_event.add(on_custom_event)
handlers.append(on_custom_event)
except Exception as e:
ui.messageBox('Failed creating custom event:\n{}'.format(traceback.format_exc()))
def custom_event_received(self, event_dict: dict):
pass
def on_stop(self):
app = adsk.core.Application.get()
app.unregisterCustomEvent(self.event_id)
# The class for the new thread.
class Fusion360DocumentEvent:
def __init__(self, event_id: str, event_type):
self.event_id = event_id
self.fusion_app = None
self.event_type = event_type
self.document_handler = _DocumentHandler(self.document_event_received)
event_type.add(self.document_handler)
handlers.append(self.document_handler)
def document_event_received(self, event_args, document):
pass
def on_stop(self):
self.event_type.remove(self.document_handler)
class Fusion360WorkspaceEvent:
def __init__(self, event_id, event_type):
self.event_id = event_id
self.fusion_app = None
self.event_type = event_type
self.workspace_handler = _WorkspaceHandler(self.workspace_event_received)
event_type.add(self.workspace_handler)
handlers.append(self.workspace_handler)
def workspace_event_received(self, event_args, workspace):
pass
def on_stop(self):
self.event_type.remove(self.workspace_handler)
# Event handler for the documentActivated event.
class _DocumentHandler(adsk.core.DocumentEventHandler):
def __init__(self, document_event_received):
self.document_function = document_event_received
super().__init__()
def notify(self, args):
try:
event_args = adsk.core.DocumentEventArgs.cast(args)
document = event_args.document
self.document_function(event_args, document)
except:
app = adsk.core.Application.cast(adsk.core.Application.get())
ui = app.userInterface
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
class _WorkspaceHandler(adsk.core.WorkspaceEventHandler):
def __init__(self, workspace_event_received):
super().__init__()
self.workspace_function = workspace_event_received
def notify(self, args):
try:
event_args = adsk.core.WorkspaceEventArgs.cast(args)
workspace = event_args.workspace
self.workspace_function(event_args, workspace)
except:
app = adsk.core.Application.cast(adsk.core.Application.get())
ui = app.userInterface
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
# Event handler for the workspaceActivated event.
class _WebRequestHandler(adsk.core.WebRequestEventHandler):
def __init__(self, web_request_event_received):
super().__init__()
self.web_request_function = web_request_event_received
def notify(self, args):
try:
event_args = adsk.core.WebRequestEventArgs.cast(args)
file = event_args.file
fusion_id = event_args.id
occurrence_or_document = event_args.occurrenceOrDocument
private_info = event_args.privateInfo
properties = event_args.properties
# TODO implement error checking and type checks here. Was getting weird errors.
# if len(event_args.privateInfo) > 1:
# try:
# private_info = json.loads(event_args.privateInfo)
# except:
# private_info = ""
# if len(event_args.properties) > 1:
# try:
# properties = json.loads(event_args.properties)
# except:
# properties = ""
self.web_request_function(event_args, file, fusion_id, occurrence_or_document, private_info, properties)
except:
app = adsk.core.Application.cast(adsk.core.Application.get())
ui = app.userInterface
ui.messageBox('Failed to load data in event handler:\n{}'.format(traceback.format_exc()))
class Fusion360WebRequestEvent:
def __init__(self, event_id: str, event_type):
self.event_id = event_id
self.fusion_app = None
self.event_type = event_type
self.web_request_handler = _WebRequestHandler(self.web_request_event_received)
event_type.add(self.web_request_handler)
handlers.append(self.web_request_handler)
def web_request_event_received(self, event_args, file, fusion_id, occurrence_or_document, private_info, properties):
pass
def on_stop(self):
self.event_type.remove(self.web_request_handler)
class _CommandEventHandler(adsk.core.ApplicationCommandEventHandler):
def __init__(self, command_function):
super().__init__()
self.command_function = command_function
def notify(self, args):
try:
event_args = adsk.core.ApplicationCommandEventArgs.cast(args)
command_id = event_args.commandId
command_definition = event_args.commandDefinition
self.command_function(event_args, command_id, command_definition)
except:
app = adsk.core.Application.cast(adsk.core.Application.get())
ui = app.userInterface
ui.messageBox('Failed to handle Command Event:\n{}'.format(traceback.format_exc()))
class Fusion360CommandEvent:
def __init__(self, event_id, event_type):
self.event_id = event_id
self.fusion_app = None
self.event_type = event_type
self.command_handler = _CommandEventHandler(self.command_event_received)
event_type.add(self.command_handler)
handlers.append(self.command_handler)
def command_event_received(self, event_args, command_id, command_definition):
pass
def on_stop(self):
self.event_type.remove(self.command_handler)
class _ActiveSelectionEventHandler(adsk.core.ActiveSelectionEventHandler):
def __init__(self, command_function):
super().__init__()
self.command_function = command_function
def notify(self, args):
try:
event_args = adsk.core.ActiveSelectionEventArgs.cast(args)
current_selection: [adsk.core.Selection] = event_args.currentSelection
self.command_function(event_args, current_selection)
except:
app = adsk.core.Application.cast(adsk.core.Application.get())
ui = app.userInterface
ui.messageBox('Failed to handle Selection Event:\n{}'.format(traceback.format_exc()))
class Fusion360ActiveSelectionEvent:
def __init__(self, event_id, event_type):
app = adsk.core.Application.get()
ui = app.userInterface
self.event_id = event_id
self.fusion_app = None
self.command_handler = _ActiveSelectionEventHandler(self.selection_event_received)
self.event_type = event_type
self.event_type.add(self.command_handler)
handlers.append(self.command_handler)
def selection_event_received(self, event_args, current_selection):
pass
def on_stop(self):
self.event_type.remove(self.command_handler)
| true | true |
1c2b65979c2e2af3d50481ec12dfa21dd2dcdfa2 | 12,231 | py | Python | manila/tests/api/v1/test_scheduler_stats.py | gouthampacha/manila | 4b7ba9b99d272663f519b495668715fbf979ffbc | [
"Apache-2.0"
] | null | null | null | manila/tests/api/v1/test_scheduler_stats.py | gouthampacha/manila | 4b7ba9b99d272663f519b495668715fbf979ffbc | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | manila/tests/api/v1/test_scheduler_stats.py | gouthampacha/manila | 4b7ba9b99d272663f519b495668715fbf979ffbc | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Copyright (c) 2015 Clinton Knight. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from oslo_utils import uuidutils
from webob import exc
from manila.api.openstack import api_version_request as api_version
from manila.api.v1 import scheduler_stats
from manila import context
from manila import policy
from manila.scheduler import rpcapi
from manila.share import share_types
from manila import test
from manila.tests.api import fakes
FAKE_POOLS = [
{
'name': 'host1@backend1#pool1',
'host': 'host1',
'backend': 'backend1',
'pool': 'pool1',
'capabilities': {
'updated': None,
'total_capacity': 1024,
'free_capacity': 100,
'share_backend_name': 'pool1',
'reserved_percentage': 0,
'driver_version': '1.0.0',
'storage_protocol': 'iSCSI',
'qos': 'False',
},
},
{
'name': 'host1@backend1#pool2',
'host': 'host1',
'backend': 'backend1',
'pool': 'pool2',
'capabilities': {
'updated': None,
'total_capacity': 512,
'free_capacity': 200,
'share_backend_name': 'pool2',
'reserved_percentage': 0,
'driver_version': '1.0.1',
'storage_protocol': 'iSER',
'qos': 'True',
},
},
]
@ddt.ddt
class SchedulerStatsControllerTestCase(test.TestCase):
def setUp(self):
super(SchedulerStatsControllerTestCase, self).setUp()
self.flags(host='fake')
self.controller = scheduler_stats.SchedulerStatsController()
self.resource_name = self.controller.resource_name
self.ctxt = context.RequestContext('admin', 'fake', True)
self.mock_policy_check = self.mock_object(
policy, 'check_policy', mock.Mock(return_value=True))
def test_pools_index(self):
mock_get_pools = self.mock_object(rpcapi.SchedulerAPI,
'get_pools',
mock.Mock(return_value=FAKE_POOLS))
req = fakes.HTTPRequest.blank('/v1/fake_project/scheduler_stats/pools')
req.environ['manila.context'] = self.ctxt
result = self.controller.pools_index(req)
expected = {
'pools': [
{
'name': 'host1@backend1#pool1',
'host': 'host1',
'backend': 'backend1',
'pool': 'pool1',
},
{
'name': 'host1@backend1#pool2',
'host': 'host1',
'backend': 'backend1',
'pool': 'pool2',
}
]
}
self.assertDictMatch(result, expected)
mock_get_pools.assert_called_once_with(self.ctxt, filters={},
cached=True)
self.mock_policy_check.assert_called_once_with(
self.ctxt, self.resource_name, 'index')
@ddt.data(('index', False), ('detail', True))
@ddt.unpack
def test_pools_with_share_type_disabled(self, action, detail):
mock_get_pools = self.mock_object(rpcapi.SchedulerAPI,
'get_pools',
mock.Mock(return_value=FAKE_POOLS))
url = '/v1/fake_project/scheduler-stats/pools/%s' % action
url += '?backend=back1&host=host1&pool=pool1'
req = fakes.HTTPRequest.blank(url)
req.environ['manila.context'] = self.ctxt
expected_filters = {
'host': 'host1',
'pool': 'pool1',
'backend': 'back1',
}
if detail:
expected_result = {"pools": FAKE_POOLS}
else:
expected_result = {
'pools': [
{
'name': 'host1@backend1#pool1',
'host': 'host1',
'backend': 'backend1',
'pool': 'pool1',
},
{
'name': 'host1@backend1#pool2',
'host': 'host1',
'backend': 'backend1',
'pool': 'pool2',
}
]
}
result = self.controller._pools(req, action, False)
self.assertDictMatch(result, expected_result)
mock_get_pools.assert_called_once_with(self.ctxt,
filters=expected_filters,
cached=True)
@ddt.data(('index', False, True),
('index', False, False),
('detail', True, True),
('detail', True, False))
@ddt.unpack
def test_pools_with_share_type_enable(self, action, detail, uuid):
mock_get_pools = self.mock_object(rpcapi.SchedulerAPI,
'get_pools',
mock.Mock(return_value=FAKE_POOLS))
if uuid:
share_type = uuidutils.generate_uuid()
else:
share_type = 'test_type'
self.mock_object(
share_types, 'get_share_type_by_name_or_id',
mock.Mock(return_value={'extra_specs':
{'snapshot_support': True}}))
url = '/v1/fake_project/scheduler-stats/pools/%s' % action
url += ('?backend=back1&host=host1&pool=pool1&share_type=%s'
% share_type)
req = fakes.HTTPRequest.blank(url)
req.environ['manila.context'] = self.ctxt
expected_filters = {
'host': 'host1',
'pool': 'pool1',
'backend': 'back1',
'capabilities': {
'snapshot_support': True
}
}
if detail:
expected_result = {"pools": FAKE_POOLS}
else:
expected_result = {
'pools': [
{
'name': 'host1@backend1#pool1',
'host': 'host1',
'backend': 'backend1',
'pool': 'pool1',
},
{
'name': 'host1@backend1#pool2',
'host': 'host1',
'backend': 'backend1',
'pool': 'pool2',
}
]
}
result = self.controller._pools(req, action, True)
self.assertDictMatch(result, expected_result)
mock_get_pools.assert_called_once_with(self.ctxt,
filters=expected_filters,
cached=True)
@ddt.data('index', 'detail')
def test_pools_with_share_type_not_found(self, action):
url = '/v1/fake_project/scheduler-stats/pools/%s' % action
url += '?backend=.%2A&host=host1&pool=pool%2A&share_type=fake_name_1'
req = fakes.HTTPRequest.blank(url)
self.assertRaises(exc.HTTPBadRequest,
self.controller._pools,
req, action, True)
@ddt.data("1.0", "2.22", "2.23")
def test_pools_index_with_filters(self, microversion):
mock_get_pools = self.mock_object(rpcapi.SchedulerAPI,
'get_pools',
mock.Mock(return_value=FAKE_POOLS))
self.mock_object(
share_types, 'get_share_type_by_name',
mock.Mock(return_value={'extra_specs':
{'snapshot_support': True}}))
url = '/v1/fake_project/scheduler-stats/pools/detail'
url += '?backend=.%2A&host=host1&pool=pool%2A&share_type=test_type'
req = fakes.HTTPRequest.blank(url, version=microversion)
req.environ['manila.context'] = self.ctxt
result = self.controller.pools_index(req)
expected = {
'pools': [
{
'name': 'host1@backend1#pool1',
'host': 'host1',
'backend': 'backend1',
'pool': 'pool1',
},
{
'name': 'host1@backend1#pool2',
'host': 'host1',
'backend': 'backend1',
'pool': 'pool2',
}
]
}
expected_filters = {
'host': 'host1',
'pool': 'pool*',
'backend': '.*',
'share_type': 'test_type',
}
if (api_version.APIVersionRequest(microversion) >=
api_version.APIVersionRequest('2.23')):
expected_filters.update(
{'capabilities': {'snapshot_support': True}})
expected_filters.pop('share_type', None)
self.assertDictMatch(result, expected)
mock_get_pools.assert_called_once_with(self.ctxt,
filters=expected_filters,
cached=True)
self.mock_policy_check.assert_called_once_with(
self.ctxt, self.resource_name, 'index')
def test_get_pools_detail(self):
mock_get_pools = self.mock_object(rpcapi.SchedulerAPI,
'get_pools',
mock.Mock(return_value=FAKE_POOLS))
req = fakes.HTTPRequest.blank(
'/v1/fake_project/scheduler_stats/pools/detail')
req.environ['manila.context'] = self.ctxt
result = self.controller.pools_detail(req)
expected = {
'pools': [
{
'name': 'host1@backend1#pool1',
'host': 'host1',
'backend': 'backend1',
'pool': 'pool1',
'capabilities': {
'updated': None,
'total_capacity': 1024,
'free_capacity': 100,
'share_backend_name': 'pool1',
'reserved_percentage': 0,
'driver_version': '1.0.0',
'storage_protocol': 'iSCSI',
'qos': 'False',
},
},
{
'name': 'host1@backend1#pool2',
'host': 'host1',
'backend': 'backend1',
'pool': 'pool2',
'capabilities': {
'updated': None,
'total_capacity': 512,
'free_capacity': 200,
'share_backend_name': 'pool2',
'reserved_percentage': 0,
'driver_version': '1.0.1',
'storage_protocol': 'iSER',
'qos': 'True',
},
},
],
}
self.assertDictMatch(expected, result)
mock_get_pools.assert_called_once_with(self.ctxt, filters={},
cached=True)
self.mock_policy_check.assert_called_once_with(
self.ctxt, self.resource_name, 'detail')
class SchedulerStatsTestCase(test.TestCase):
def test_create_resource(self):
result = scheduler_stats.create_resource()
self.assertIsInstance(result.controller,
scheduler_stats.SchedulerStatsController)
| 35.763158 | 79 | 0.484425 |
import ddt
import mock
from oslo_utils import uuidutils
from webob import exc
from manila.api.openstack import api_version_request as api_version
from manila.api.v1 import scheduler_stats
from manila import context
from manila import policy
from manila.scheduler import rpcapi
from manila.share import share_types
from manila import test
from manila.tests.api import fakes
FAKE_POOLS = [
{
'name': 'host1@backend1#pool1',
'host': 'host1',
'backend': 'backend1',
'pool': 'pool1',
'capabilities': {
'updated': None,
'total_capacity': 1024,
'free_capacity': 100,
'share_backend_name': 'pool1',
'reserved_percentage': 0,
'driver_version': '1.0.0',
'storage_protocol': 'iSCSI',
'qos': 'False',
},
},
{
'name': 'host1@backend1#pool2',
'host': 'host1',
'backend': 'backend1',
'pool': 'pool2',
'capabilities': {
'updated': None,
'total_capacity': 512,
'free_capacity': 200,
'share_backend_name': 'pool2',
'reserved_percentage': 0,
'driver_version': '1.0.1',
'storage_protocol': 'iSER',
'qos': 'True',
},
},
]
@ddt.ddt
class SchedulerStatsControllerTestCase(test.TestCase):
def setUp(self):
super(SchedulerStatsControllerTestCase, self).setUp()
self.flags(host='fake')
self.controller = scheduler_stats.SchedulerStatsController()
self.resource_name = self.controller.resource_name
self.ctxt = context.RequestContext('admin', 'fake', True)
self.mock_policy_check = self.mock_object(
policy, 'check_policy', mock.Mock(return_value=True))
def test_pools_index(self):
mock_get_pools = self.mock_object(rpcapi.SchedulerAPI,
'get_pools',
mock.Mock(return_value=FAKE_POOLS))
req = fakes.HTTPRequest.blank('/v1/fake_project/scheduler_stats/pools')
req.environ['manila.context'] = self.ctxt
result = self.controller.pools_index(req)
expected = {
'pools': [
{
'name': 'host1@backend1#pool1',
'host': 'host1',
'backend': 'backend1',
'pool': 'pool1',
},
{
'name': 'host1@backend1#pool2',
'host': 'host1',
'backend': 'backend1',
'pool': 'pool2',
}
]
}
self.assertDictMatch(result, expected)
mock_get_pools.assert_called_once_with(self.ctxt, filters={},
cached=True)
self.mock_policy_check.assert_called_once_with(
self.ctxt, self.resource_name, 'index')
@ddt.data(('index', False), ('detail', True))
@ddt.unpack
def test_pools_with_share_type_disabled(self, action, detail):
mock_get_pools = self.mock_object(rpcapi.SchedulerAPI,
'get_pools',
mock.Mock(return_value=FAKE_POOLS))
url = '/v1/fake_project/scheduler-stats/pools/%s' % action
url += '?backend=back1&host=host1&pool=pool1'
req = fakes.HTTPRequest.blank(url)
req.environ['manila.context'] = self.ctxt
expected_filters = {
'host': 'host1',
'pool': 'pool1',
'backend': 'back1',
}
if detail:
expected_result = {"pools": FAKE_POOLS}
else:
expected_result = {
'pools': [
{
'name': 'host1@backend1#pool1',
'host': 'host1',
'backend': 'backend1',
'pool': 'pool1',
},
{
'name': 'host1@backend1#pool2',
'host': 'host1',
'backend': 'backend1',
'pool': 'pool2',
}
]
}
result = self.controller._pools(req, action, False)
self.assertDictMatch(result, expected_result)
mock_get_pools.assert_called_once_with(self.ctxt,
filters=expected_filters,
cached=True)
@ddt.data(('index', False, True),
('index', False, False),
('detail', True, True),
('detail', True, False))
@ddt.unpack
def test_pools_with_share_type_enable(self, action, detail, uuid):
mock_get_pools = self.mock_object(rpcapi.SchedulerAPI,
'get_pools',
mock.Mock(return_value=FAKE_POOLS))
if uuid:
share_type = uuidutils.generate_uuid()
else:
share_type = 'test_type'
self.mock_object(
share_types, 'get_share_type_by_name_or_id',
mock.Mock(return_value={'extra_specs':
{'snapshot_support': True}}))
url = '/v1/fake_project/scheduler-stats/pools/%s' % action
url += ('?backend=back1&host=host1&pool=pool1&share_type=%s'
% share_type)
req = fakes.HTTPRequest.blank(url)
req.environ['manila.context'] = self.ctxt
expected_filters = {
'host': 'host1',
'pool': 'pool1',
'backend': 'back1',
'capabilities': {
'snapshot_support': True
}
}
if detail:
expected_result = {"pools": FAKE_POOLS}
else:
expected_result = {
'pools': [
{
'name': 'host1@backend1#pool1',
'host': 'host1',
'backend': 'backend1',
'pool': 'pool1',
},
{
'name': 'host1@backend1#pool2',
'host': 'host1',
'backend': 'backend1',
'pool': 'pool2',
}
]
}
result = self.controller._pools(req, action, True)
self.assertDictMatch(result, expected_result)
mock_get_pools.assert_called_once_with(self.ctxt,
filters=expected_filters,
cached=True)
@ddt.data('index', 'detail')
def test_pools_with_share_type_not_found(self, action):
url = '/v1/fake_project/scheduler-stats/pools/%s' % action
url += '?backend=.%2A&host=host1&pool=pool%2A&share_type=fake_name_1'
req = fakes.HTTPRequest.blank(url)
self.assertRaises(exc.HTTPBadRequest,
self.controller._pools,
req, action, True)
@ddt.data("1.0", "2.22", "2.23")
def test_pools_index_with_filters(self, microversion):
mock_get_pools = self.mock_object(rpcapi.SchedulerAPI,
'get_pools',
mock.Mock(return_value=FAKE_POOLS))
self.mock_object(
share_types, 'get_share_type_by_name',
mock.Mock(return_value={'extra_specs':
{'snapshot_support': True}}))
url = '/v1/fake_project/scheduler-stats/pools/detail'
url += '?backend=.%2A&host=host1&pool=pool%2A&share_type=test_type'
req = fakes.HTTPRequest.blank(url, version=microversion)
req.environ['manila.context'] = self.ctxt
result = self.controller.pools_index(req)
expected = {
'pools': [
{
'name': 'host1@backend1#pool1',
'host': 'host1',
'backend': 'backend1',
'pool': 'pool1',
},
{
'name': 'host1@backend1#pool2',
'host': 'host1',
'backend': 'backend1',
'pool': 'pool2',
}
]
}
expected_filters = {
'host': 'host1',
'pool': 'pool*',
'backend': '.*',
'share_type': 'test_type',
}
if (api_version.APIVersionRequest(microversion) >=
api_version.APIVersionRequest('2.23')):
expected_filters.update(
{'capabilities': {'snapshot_support': True}})
expected_filters.pop('share_type', None)
self.assertDictMatch(result, expected)
mock_get_pools.assert_called_once_with(self.ctxt,
filters=expected_filters,
cached=True)
self.mock_policy_check.assert_called_once_with(
self.ctxt, self.resource_name, 'index')
def test_get_pools_detail(self):
mock_get_pools = self.mock_object(rpcapi.SchedulerAPI,
'get_pools',
mock.Mock(return_value=FAKE_POOLS))
req = fakes.HTTPRequest.blank(
'/v1/fake_project/scheduler_stats/pools/detail')
req.environ['manila.context'] = self.ctxt
result = self.controller.pools_detail(req)
expected = {
'pools': [
{
'name': 'host1@backend1#pool1',
'host': 'host1',
'backend': 'backend1',
'pool': 'pool1',
'capabilities': {
'updated': None,
'total_capacity': 1024,
'free_capacity': 100,
'share_backend_name': 'pool1',
'reserved_percentage': 0,
'driver_version': '1.0.0',
'storage_protocol': 'iSCSI',
'qos': 'False',
},
},
{
'name': 'host1@backend1#pool2',
'host': 'host1',
'backend': 'backend1',
'pool': 'pool2',
'capabilities': {
'updated': None,
'total_capacity': 512,
'free_capacity': 200,
'share_backend_name': 'pool2',
'reserved_percentage': 0,
'driver_version': '1.0.1',
'storage_protocol': 'iSER',
'qos': 'True',
},
},
],
}
self.assertDictMatch(expected, result)
mock_get_pools.assert_called_once_with(self.ctxt, filters={},
cached=True)
self.mock_policy_check.assert_called_once_with(
self.ctxt, self.resource_name, 'detail')
class SchedulerStatsTestCase(test.TestCase):
def test_create_resource(self):
result = scheduler_stats.create_resource()
self.assertIsInstance(result.controller,
scheduler_stats.SchedulerStatsController)
| true | true |
1c2b66c2df3cabd37c9bb363673aedd583e96669 | 13,909 | py | Python | spherical_cluster.py | JoeyTeng/Algorithm-Selection-for-Classification-Problems-via-Cluster-based-Meta-features | 61fe5a231a0062d9939d1ccdfc0babcbe9562867 | [
"MIT"
] | 2 | 2021-08-19T14:04:25.000Z | 2022-03-17T11:37:24.000Z | spherical_cluster.py | JoeyTeng/Algorithm-Selection-for-Classification-Problems-via-Cluster-based-Meta-features | 61fe5a231a0062d9939d1ccdfc0babcbe9562867 | [
"MIT"
] | null | null | null | spherical_cluster.py | JoeyTeng/Algorithm-Selection-for-Classification-Problems-via-Cluster-based-Meta-features | 61fe5a231a0062d9939d1ccdfc0babcbe9562867 | [
"MIT"
] | 2 | 2020-04-09T10:50:50.000Z | 2021-09-28T00:50:23.000Z | # @Author: Joey Teng
# @Email: joey.teng.dev@gmail.com
# @Filename: spherical_cluster.py
# @Last modified by: Joey Teng
# @Last modified time: 25-Mar-2018
"""Obtain clusters and calculate meta-features.
Args:
dataset_filename (string): path to the dataset
Predefined types:
Point (dict): {'coordinate': (float, ...), 'label': int}
Dataset (list): list of dict objects:
[Point, ...]
Vertex (tuple): Point['coordinate']
Vertices (list): [Vertex, ...]
Output files:
dataset_filename.output.json: calculated meta-features.
dataset_filename.clusters.json: calculated clusters.
dataset_filename.log: log file
"""
import argparse
import collections
import json
import logging
import logging.handlers
import math
import multiprocessing.pool
import os
import numpy
import meta_features
INFINITESIMAL = 1e-323
PROCESS_COUNT = int(os.cpu_count() / 2)
def initialize_logger(
name='LOG',
filename=None,
level=logging.DEBUG,
filemode='a'):
"""Initialize a logger in module logging.
Args:
name (string, optional): Name of logger. Defaults to None.
filename (string, optional): Defaults to None.
The path of log file
By default, logger will stream to the standard output
level (logging level, optional): Defaults to logging.INFO
filemode (string, optional): Defaults to 'a'.
'w' or 'a', overwrite or append
Returns:
logger: [description]
"""
log_format = '%(asctime)s %(levelname)s\n' + \
' %(filename)s:%(lineno)s: %(name)s: %(message)s'
if filename is None:
handler = logging.StreamHandler()
else:
handler = logging.handlers.RotatingFileHandler(
filename=filename, mode=filemode)
handler.setFormatter(logging.Formatter(log_format))
logger = logging.getLogger(name)
logger.addHandler(handler)
logger.setLevel(level)
return logger, handler
def load_dataset(filename):
"""Load data from a csv file.
Args:
filename (string): path of input file.
CSV format
[coordinate, ...] + [label]
Returns:
Dataset: dataset
"""
return [(
lambda point: {
'coordinate': tuple(map(float, point[:-1])),
'label': int(point[-1])})
(string.strip().rstrip().split(','))
for string in open(filename, 'r').read()
.strip().rstrip().split('\n')]
def initialize_cluster(coordinates):
"""Construct a cluster instance with given coordiante.
A factory function
Args:
coordinates (list): The coordinates that needed to be included.
[Vertex, ...]
Returns:
dict: a cluster initialized with given coordinates
[{
'centroid' (Vertex): centroid of the sphere,
'radius' (float): radius of the sphere,
'points' (list): Instances in the cluster
i.e. distance <= radius
[Vertex, ...],
'size' (int): Number of instances covered by the sphere
len(['points']),
'volume' (float): volume of the sphere
}]
"""
points = coordinates
_points = list(map(numpy.array, coordinates))
centroid = sum(_points) / len(_points)
radius = max(
map(lambda x, y=centroid: numpy.linalg.norm((x - y)), _points))
return {
'centroid': tuple(centroid),
'radius': radius,
'points': points,
'size': len(points),
'log-volume': calculate_log_volume(len(centroid), radius)
}
def calculate_distance(lhs, rhs):
"""Calculate the euclidean distance between 2 points.
Args:
lhs, rhs (Vertex): Coordinates of 2 points
Returns:
float: Euclidean distance between them
"""
return numpy.linalg.norm((numpy.array(lhs) - numpy.array(rhs)))
def calculate_log_volume(dimension, radius):
"""Calculate the log-volume of a sphere with given dimension and radius.
Args:
dimension (int): dimension of the space
radius (float): radius of the sphere
Returns:
float: the log-volume of the sphere
radius is set as REL_TOL (1e-09)
"""
if (math.isclose(radius, 0)):
radius = INFINITESIMAL
try:
log_volume = ((dimension / 2.0) * math.log(math.pi) + dimension *
math.log(radius) - math.lgamma(dimension / 2.0 + 1))
except ValueError as message:
raise ValueError("".join([
"{0}\n".format(message),
"(({0} / 2.0) * ln(pi) + ({0} * ln({1})".format(dimension, radius),
" - ln(gamma({0} / 2.0 + 1)))".format(dimension)]))
if math.isnan(log_volume):
raise ValueError(
"Volume is NaN: pi ^ " +
"({0} / 2.0) / gamma({0} / 2.0 + 1) * {1} ^ {0}".format(
dimension, radius))
return log_volume
def float_less_or_equal(lhs, rhs, **kwargs):
"""Determine float A is less than or equal to B using numpy.isclose().
Use numpy.isclose() to determine if A and B are equal
with default tolerance.
Args:
lhs, rhs (float): values that need to be compared
kwargs: kwargs for numpy.isclose()
Returns:
bool: result of comparison.
"""
return numpy.isclose(lhs, rhs, **kwargs) or (lhs < rhs)
def check_inside_cluster(cluster, point):
"""Check if point is inside the cluster.
Args:
cluster (dict): cluster to be checked
{
'centroid' (Vertex): centroid of the cluster,
'radius' (float): radius of the cluster
}
point (Vertex): point to be checked
Returns:
bool: if the point is encompassed by the boundary
"""
return float_less_or_equal(
calculate_distance(cluster['centroid'], point), cluster['radius'])
def check_homogeneity(cluster, label, clusters):
"""Check homogeneity of the cluster with given clusters.
A homogeneous cluster will not overlap with any other cluster which has
different label, but may overlap with cluster that has the same label.
Which means, there should be no region with ambiguity in
categorisation process.
Args:
cluster (dict): Cluster that need to be checked
{
'centroid' (Vertex): centroid of the cluster,
'radius' (float): radius of the cluster
}
label (): label of the cluster
clusters (dict): list of clusters with labels as keys.
{
label: [cluster, ...]
}
Returns:
bool: if cluster is homogeneous
"""
for _label, _clusters in clusters.items():
if _label == label:
continue
for _cluster in _clusters:
if float_less_or_equal(
calculate_distance(
cluster['centroid'], _cluster['centroid']),
(cluster['radius'] + _cluster['radius'])):
return False
return True
def clustering(dataset, logger):
"""Calculate all spherical clusters.
All spheres will be pure(only contains data points with same label)
Args:
dataset (list): All the instances in the space with label
list of dict objects:
[Point, ...]
logger (logger): logger for logging
Returns:
dict: Clusters obtained separated by labels
label: clusters (list of dict objects)
[{
'centroid' (Vertex): centroid of the sphere,
'radius' (float): radius of the sphere,
'points' (list) : Instances in the cluster
[Vertex, ...],
'size' (int): Number of instances covered by the sphere
len(['points']),
'volume': The volume of the sphere
float(optional)
}, ...]
"""
logger.info('Sorting datasets...')
dataset.sort(key=lambda x: x['coordinate'])
logger.info('Initialise clusters...')
clusters = collections.defaultdict(list)
for instance in dataset:
clusters[instance['label']].append(
initialize_cluster((instance['coordinate'], )))
logger.info('Merging clusters...')
logger_count = 0
for label, homo_clusters in clusters.items():
index = 0
while index < len(homo_clusters):
current = homo_clusters[index]
merging_index = -1
distance = float('inf')
for j_index, cluster in enumerate(homo_clusters[index + 1:]):
new_distance = calculate_distance(
current['centroid'], cluster['centroid'])
if new_distance < distance:
merging_index = j_index + index + 1
distance = new_distance
if merging_index == -1:
index += 1
continue
cluster = initialize_cluster(
current['points'] + homo_clusters[merging_index]['points'])
if (check_homogeneity(cluster, label, clusters)):
homo_clusters[merging_index], homo_clusters[-1] =\
homo_clusters[-1], homo_clusters[merging_index]
homo_clusters.pop()
current = cluster
homo_clusters[index] = current
else:
index += 1
logger_count += 1
logger.info('{0}/{1} categories completed'.format(
logger_count, len(clusters.keys())))
return clusters
def main(args):
"""
Start main function here.
Dispatching all the tasks to process.
"""
log_file = args.log
logger, handler = initialize_logger("Parent", log_file)
logger.info('Start: Version 2.1.1')
logger.debug('Logger initialized')
logger.debug('argparse: %r', args)
logger.removeHandler(handler)
_args = []
for dataset_filename in args.paths:
clusters_filename = dataset_filename + ".clusters.json"
output_filename = dataset_filename + ".output.json"
_args.append(tuple([
dataset_filename,
clusters_filename,
output_filename,
log_file]))
pool = multiprocessing.pool.Pool(PROCESS_COUNT)
list(pool.map(task_processing, _args))
pool.close()
pool.join()
def task_processing(args): # Take note here!!!
"""Unwrap the args tuple to adapt a function with multiple args to map."""
def worker(
dataset_filename,
clusters_filename,
output_filename,
log_file):
"""Link the submodules to process the data."""
logger, handler = initialize_logger(dataset_filename, log_file)
logger.debug('Logger initialized')
logger.debug('Loading dataset')
dataset = load_dataset(dataset_filename)
logger.info('Dataset loaded')
logger.info('Trying to load clusters from %s', clusters_filename)
clusters = None
try:
clusters = json.load(open(clusters_filename, 'r'))
except FileNotFoundError:
logger.warning('Clusters data file not found')
except json.decoder.JSONDecodeError:
logger.warning('File broken. Not Json Decodable')
if not clusters:
logger.debug('Clustering data points')
clusters = clustering(dataset, logger)
logger.debug(
'Dumping clusters data into json file: %s', clusters_filename)
json.dump(clusters, open(clusters_filename, 'w'))
logger.info('Data points clustered')
logger.debug('Calculating meta-feature indicators')
features = meta_features.meta_features(clusters)
logger.debug(
'Dumping meta-feature indicators into json file: %s',
clusters_filename)
json.dump(features, open(output_filename, 'w'))
logger.info('Meta-feature indicators calculated')
logger.info('Complete')
logger.removeHandler(handler)
return worker(*args)
def traverse(paths):
"""Traverse to collect all the data files."""
print("Starting Traverse Through", flush=True)
files = []
while paths:
path = paths[0]
paths = paths[1:]
for file in os.listdir(path):
if (file.find('.json') == -1
and file.find('.log') == -1
and file.find('.DS_Store') == -1
and file.find('.png') == -1
and file.find('.html') == -1):
files.append('{0}/{1}'.format(path, file))
elif os.path.isdir('{0}/{1}'.format(path, file)):
paths.append('{0}/{1}'.format(path, file))
print("Traverse Completed.", flush=True)
return files
def parse_args():
"""Parse all necessary args."""
parser = argparse.ArgumentParser(
description="Obtain clusters and calculate meta-features")
parser.add_argument('-r', action='store', nargs='+',
default=[], metavar='Directory',
help='Recursively processing all files in the folder')
parser.add_argument('-i', action='store', nargs='+',
default=[], metavar='File',
help='Files that need to be processed')
parser.add_argument('--log', action='store', type=str,
default='spherical_cluster.log', metavar='Log file',
help='Path to the log file')
args = parser.parse_args()
paths = []
if (args.r):
paths = traverse(args.r)
paths.extend(args.i)
paths.sort()
args.paths = paths
return args
if __name__ == '__main__':
args = parse_args()
main(args)
| 30.636564 | 79 | 0.580272 |
import argparse
import collections
import json
import logging
import logging.handlers
import math
import multiprocessing.pool
import os
import numpy
import meta_features
INFINITESIMAL = 1e-323
PROCESS_COUNT = int(os.cpu_count() / 2)
def initialize_logger(
name='LOG',
filename=None,
level=logging.DEBUG,
filemode='a'):
log_format = '%(asctime)s %(levelname)s\n' + \
' %(filename)s:%(lineno)s: %(name)s: %(message)s'
if filename is None:
handler = logging.StreamHandler()
else:
handler = logging.handlers.RotatingFileHandler(
filename=filename, mode=filemode)
handler.setFormatter(logging.Formatter(log_format))
logger = logging.getLogger(name)
logger.addHandler(handler)
logger.setLevel(level)
return logger, handler
def load_dataset(filename):
return [(
lambda point: {
'coordinate': tuple(map(float, point[:-1])),
'label': int(point[-1])})
(string.strip().rstrip().split(','))
for string in open(filename, 'r').read()
.strip().rstrip().split('\n')]
def initialize_cluster(coordinates):
points = coordinates
_points = list(map(numpy.array, coordinates))
centroid = sum(_points) / len(_points)
radius = max(
map(lambda x, y=centroid: numpy.linalg.norm((x - y)), _points))
return {
'centroid': tuple(centroid),
'radius': radius,
'points': points,
'size': len(points),
'log-volume': calculate_log_volume(len(centroid), radius)
}
def calculate_distance(lhs, rhs):
return numpy.linalg.norm((numpy.array(lhs) - numpy.array(rhs)))
def calculate_log_volume(dimension, radius):
if (math.isclose(radius, 0)):
radius = INFINITESIMAL
try:
log_volume = ((dimension / 2.0) * math.log(math.pi) + dimension *
math.log(radius) - math.lgamma(dimension / 2.0 + 1))
except ValueError as message:
raise ValueError("".join([
"{0}\n".format(message),
"(({0} / 2.0) * ln(pi) + ({0} * ln({1})".format(dimension, radius),
" - ln(gamma({0} / 2.0 + 1)))".format(dimension)]))
if math.isnan(log_volume):
raise ValueError(
"Volume is NaN: pi ^ " +
"({0} / 2.0) / gamma({0} / 2.0 + 1) * {1} ^ {0}".format(
dimension, radius))
return log_volume
def float_less_or_equal(lhs, rhs, **kwargs):
return numpy.isclose(lhs, rhs, **kwargs) or (lhs < rhs)
def check_inside_cluster(cluster, point):
return float_less_or_equal(
calculate_distance(cluster['centroid'], point), cluster['radius'])
def check_homogeneity(cluster, label, clusters):
for _label, _clusters in clusters.items():
if _label == label:
continue
for _cluster in _clusters:
if float_less_or_equal(
calculate_distance(
cluster['centroid'], _cluster['centroid']),
(cluster['radius'] + _cluster['radius'])):
return False
return True
def clustering(dataset, logger):
logger.info('Sorting datasets...')
dataset.sort(key=lambda x: x['coordinate'])
logger.info('Initialise clusters...')
clusters = collections.defaultdict(list)
for instance in dataset:
clusters[instance['label']].append(
initialize_cluster((instance['coordinate'], )))
logger.info('Merging clusters...')
logger_count = 0
for label, homo_clusters in clusters.items():
index = 0
while index < len(homo_clusters):
current = homo_clusters[index]
merging_index = -1
distance = float('inf')
for j_index, cluster in enumerate(homo_clusters[index + 1:]):
new_distance = calculate_distance(
current['centroid'], cluster['centroid'])
if new_distance < distance:
merging_index = j_index + index + 1
distance = new_distance
if merging_index == -1:
index += 1
continue
cluster = initialize_cluster(
current['points'] + homo_clusters[merging_index]['points'])
if (check_homogeneity(cluster, label, clusters)):
homo_clusters[merging_index], homo_clusters[-1] =\
homo_clusters[-1], homo_clusters[merging_index]
homo_clusters.pop()
current = cluster
homo_clusters[index] = current
else:
index += 1
logger_count += 1
logger.info('{0}/{1} categories completed'.format(
logger_count, len(clusters.keys())))
return clusters
def main(args):
log_file = args.log
logger, handler = initialize_logger("Parent", log_file)
logger.info('Start: Version 2.1.1')
logger.debug('Logger initialized')
logger.debug('argparse: %r', args)
logger.removeHandler(handler)
_args = []
for dataset_filename in args.paths:
clusters_filename = dataset_filename + ".clusters.json"
output_filename = dataset_filename + ".output.json"
_args.append(tuple([
dataset_filename,
clusters_filename,
output_filename,
log_file]))
pool = multiprocessing.pool.Pool(PROCESS_COUNT)
list(pool.map(task_processing, _args))
pool.close()
pool.join()
def task_processing(args):
def worker(
dataset_filename,
clusters_filename,
output_filename,
log_file):
logger, handler = initialize_logger(dataset_filename, log_file)
logger.debug('Logger initialized')
logger.debug('Loading dataset')
dataset = load_dataset(dataset_filename)
logger.info('Dataset loaded')
logger.info('Trying to load clusters from %s', clusters_filename)
clusters = None
try:
clusters = json.load(open(clusters_filename, 'r'))
except FileNotFoundError:
logger.warning('Clusters data file not found')
except json.decoder.JSONDecodeError:
logger.warning('File broken. Not Json Decodable')
if not clusters:
logger.debug('Clustering data points')
clusters = clustering(dataset, logger)
logger.debug(
'Dumping clusters data into json file: %s', clusters_filename)
json.dump(clusters, open(clusters_filename, 'w'))
logger.info('Data points clustered')
logger.debug('Calculating meta-feature indicators')
features = meta_features.meta_features(clusters)
logger.debug(
'Dumping meta-feature indicators into json file: %s',
clusters_filename)
json.dump(features, open(output_filename, 'w'))
logger.info('Meta-feature indicators calculated')
logger.info('Complete')
logger.removeHandler(handler)
return worker(*args)
def traverse(paths):
print("Starting Traverse Through", flush=True)
files = []
while paths:
path = paths[0]
paths = paths[1:]
for file in os.listdir(path):
if (file.find('.json') == -1
and file.find('.log') == -1
and file.find('.DS_Store') == -1
and file.find('.png') == -1
and file.find('.html') == -1):
files.append('{0}/{1}'.format(path, file))
elif os.path.isdir('{0}/{1}'.format(path, file)):
paths.append('{0}/{1}'.format(path, file))
print("Traverse Completed.", flush=True)
return files
def parse_args():
parser = argparse.ArgumentParser(
description="Obtain clusters and calculate meta-features")
parser.add_argument('-r', action='store', nargs='+',
default=[], metavar='Directory',
help='Recursively processing all files in the folder')
parser.add_argument('-i', action='store', nargs='+',
default=[], metavar='File',
help='Files that need to be processed')
parser.add_argument('--log', action='store', type=str,
default='spherical_cluster.log', metavar='Log file',
help='Path to the log file')
args = parser.parse_args()
paths = []
if (args.r):
paths = traverse(args.r)
paths.extend(args.i)
paths.sort()
args.paths = paths
return args
if __name__ == '__main__':
args = parse_args()
main(args)
| true | true |
1c2b67624d4b2e4333ce0b5b480b99628453e500 | 2,535 | py | Python | docs/conf.py | eriksf/reproducible_python | bd34b17ddf4b9c1eaab5c6bf18750fb53d21ac96 | [
"BSD-3-Clause"
] | 1 | 2020-07-11T03:49:59.000Z | 2020-07-11T03:49:59.000Z | docs/conf.py | eriksf/reproducible_python | bd34b17ddf4b9c1eaab5c6bf18750fb53d21ac96 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | eriksf/reproducible_python | bd34b17ddf4b9c1eaab5c6bf18750fb53d21ac96 | [
"BSD-3-Clause"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- RTD configuration -------------------------------------------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# -- Project information -----------------------------------------------------
project = 'Reproducible Science - Python Packaging'
copyright = '2020, Texas Advanced Computing Center'
author = 'Texas Advanced Computing Center'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# TACC logo
html_logo = 'images/TACC-White-No-Mask.png'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
html_theme = 'default'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 32.922078 | 79 | 0.665483 |
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
project = 'Reproducible Science - Python Packaging'
copyright = '2020, Texas Advanced Computing Center'
author = 'Texas Advanced Computing Center'
version = ''
release = ''
extensions = [
]
templates_path = ['_templates']
master_doc = 'index'
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
pygments_style = 'default'
html_logo = 'images/TACC-White-No-Mask.png'
html_theme = 'default'
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
html_theme = 'default'
html_static_path = ['_static']
| true | true |
1c2b686b543c717aea4a52684174aa319361d211 | 5,467 | py | Python | fouruc/manager/migrations/0001_initial.py | Alfareiza/4uc-manager-silver | 8a83d2a9e3630d18322c78e0fd632e73bf59a799 | [
"MIT"
] | null | null | null | fouruc/manager/migrations/0001_initial.py | Alfareiza/4uc-manager-silver | 8a83d2a9e3630d18322c78e0fd632e73bf59a799 | [
"MIT"
] | 12 | 2021-05-11T11:18:16.000Z | 2021-09-30T14:13:30.000Z | fouruc/manager/migrations/0001_initial.py | Alfareiza/4uc-manager-silver | 8a83d2a9e3630d18322c78e0fd632e73bf59a799 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.2 on 2021-09-29 14:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
('url', models.CharField(max_length=100, unique=True)),
('token', models.CharField(max_length=100)),
('slug', models.SlugField(null=True)),
('date_added', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ['-date_added'],
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category_id', models.IntegerField()),
('name', models.CharField(max_length=64)),
('description', models.CharField(max_length=128)),
('autoShuffle', models.BooleanField()),
('updateflow', models.IntegerField()),
('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='categories', to='manager.account')),
],
),
migrations.CreateModel(
name='Client',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('date_added', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Playlist',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('playlist_id', models.IntegerField()),
('name', models.CharField(max_length=128)),
('isSubPlaylist', models.BooleanField()),
('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='playlists', to='manager.account')),
],
),
migrations.CreateModel(
name='Player',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('player_id', models.IntegerField()),
('name', models.CharField(max_length=128)),
('platform', models.CharField(max_length=28)),
('lastContactInMinutes', models.IntegerField(null=True)),
('group_id', models.IntegerField()),
('group_name', models.CharField(max_length=128)),
('status_id', models.IntegerField()),
('status_name', models.CharField(max_length=128)),
('lastLogReceived', models.DateTimeField(null=True)),
('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='players', to='manager.account')),
('playlist', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='players', to='manager.playlist')),
],
),
migrations.CreateModel(
name='Media',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('media_id', models.IntegerField()),
('name', models.CharField(max_length=128)),
('file', models.CharField(max_length=13)),
('durationInSeconds', models.IntegerField()),
('startDate', models.DateField(blank=True, null=True)),
('endDate', models.DateField(blank=True, null=True)),
('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='medias', to='manager.account')),
('category', models.ManyToManyField(related_name='medias', to='manager.Category')),
('client', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='medias', to='manager.client')),
('playlist', models.ManyToManyField(related_name='medias', to='manager.Playlist')),
],
),
migrations.CreateModel(
name='Register',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nickname', models.CharField(max_length=128)),
('date', models.DateField()),
('time', models.TimeField()),
('player_id', models.IntegerField()),
('media_id', models.IntegerField()),
('media_type', models.CharField(max_length=2)),
('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='records', to='manager.account')),
],
options={
'ordering': ['date', 'time'],
'unique_together': {('date', 'time', 'player_id', 'nickname')},
},
),
]
| 49.7 | 159 | 0.565209 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
('url', models.CharField(max_length=100, unique=True)),
('token', models.CharField(max_length=100)),
('slug', models.SlugField(null=True)),
('date_added', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ['-date_added'],
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category_id', models.IntegerField()),
('name', models.CharField(max_length=64)),
('description', models.CharField(max_length=128)),
('autoShuffle', models.BooleanField()),
('updateflow', models.IntegerField()),
('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='categories', to='manager.account')),
],
),
migrations.CreateModel(
name='Client',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('date_added', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Playlist',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('playlist_id', models.IntegerField()),
('name', models.CharField(max_length=128)),
('isSubPlaylist', models.BooleanField()),
('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='playlists', to='manager.account')),
],
),
migrations.CreateModel(
name='Player',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('player_id', models.IntegerField()),
('name', models.CharField(max_length=128)),
('platform', models.CharField(max_length=28)),
('lastContactInMinutes', models.IntegerField(null=True)),
('group_id', models.IntegerField()),
('group_name', models.CharField(max_length=128)),
('status_id', models.IntegerField()),
('status_name', models.CharField(max_length=128)),
('lastLogReceived', models.DateTimeField(null=True)),
('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='players', to='manager.account')),
('playlist', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='players', to='manager.playlist')),
],
),
migrations.CreateModel(
name='Media',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('media_id', models.IntegerField()),
('name', models.CharField(max_length=128)),
('file', models.CharField(max_length=13)),
('durationInSeconds', models.IntegerField()),
('startDate', models.DateField(blank=True, null=True)),
('endDate', models.DateField(blank=True, null=True)),
('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='medias', to='manager.account')),
('category', models.ManyToManyField(related_name='medias', to='manager.Category')),
('client', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='medias', to='manager.client')),
('playlist', models.ManyToManyField(related_name='medias', to='manager.Playlist')),
],
),
migrations.CreateModel(
name='Register',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nickname', models.CharField(max_length=128)),
('date', models.DateField()),
('time', models.TimeField()),
('player_id', models.IntegerField()),
('media_id', models.IntegerField()),
('media_type', models.CharField(max_length=2)),
('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='records', to='manager.account')),
],
options={
'ordering': ['date', 'time'],
'unique_together': {('date', 'time', 'player_id', 'nickname')},
},
),
]
| true | true |
1c2b687fcbe5edccf9480b9de721b1f6ece5331a | 1,873 | py | Python | peryton/Flow_analysis/list_spy.py | jweinst1/Peryton | 250fba0bf35d27c9d0e9a96d4adfdf92987189e0 | [
"Apache-2.0"
] | 97 | 2016-05-30T08:02:53.000Z | 2022-03-25T05:38:19.000Z | peryton/Flow_analysis/list_spy.py | jweinst1/Peryton | 250fba0bf35d27c9d0e9a96d4adfdf92987189e0 | [
"Apache-2.0"
] | 3 | 2016-12-26T05:18:06.000Z | 2022-03-20T22:52:41.000Z | peryton/Flow_analysis/list_spy.py | jweinst1/Peryton | 250fba0bf35d27c9d0e9a96d4adfdf92987189e0 | [
"Apache-2.0"
] | 30 | 2016-10-13T05:42:02.000Z | 2022-03-05T05:22:55.000Z | #object that allows lists to be spyed on through a process
import sys
import operator
class listspy(list):
#class the simulates an integer but collects changes to it's value
def __init__(self, *values):
self.container = [elem for elem in values]
self.operations = []
def __getattribute__(self, item):
if item == 'operations':
return object.__getattribute__(self, item)
elif item == '__class__':
return object.__getattribute__(self, item)
elif item == '__dict__':
return object.__getattribute__(self, item)
else:
self.operations.append((sys._getframe().f_code.co_name, item))
return object.__getattribute__(self, item)
def __repr__(self):
self.operations.append((sys._getframe().f_code.co_name))
return str(self.container)
def __str__(self):
self.operations.append((sys._getframe().f_code.co_name))
return str(self.container)
def __getitem__(self, item):
self.operations.append((sys._getframe().f_code.co_name), item)
return self.continer[item]
def __setitem__(self, key, value):
self.operations.append((sys._getframe().f_code.co_name), key, value)
self.container[key] = value
"""def append(self, p_object):
self.operations.append((sys._getframe().f_code.co_name), p_object)
self.continer.append(p_object)
def remove(self, value):
self.operations.append((sys._getframe().f_code.co_name), value)
self.container.remove(value)
def insert(self, index, p_object):
self.operations.append((sys._getframe().f_code.co_name), index, p_object)
self.container.insert(index, p_object)
def pop(self, index=None):
self.operations.append((sys._getframe().f_code.co_name), index)
self.container.pop(index)"""
| 41.622222 | 81 | 0.658836 |
import sys
import operator
class listspy(list):
def __init__(self, *values):
self.container = [elem for elem in values]
self.operations = []
def __getattribute__(self, item):
if item == 'operations':
return object.__getattribute__(self, item)
elif item == '__class__':
return object.__getattribute__(self, item)
elif item == '__dict__':
return object.__getattribute__(self, item)
else:
self.operations.append((sys._getframe().f_code.co_name, item))
return object.__getattribute__(self, item)
def __repr__(self):
self.operations.append((sys._getframe().f_code.co_name))
return str(self.container)
def __str__(self):
self.operations.append((sys._getframe().f_code.co_name))
return str(self.container)
def __getitem__(self, item):
self.operations.append((sys._getframe().f_code.co_name), item)
return self.continer[item]
def __setitem__(self, key, value):
self.operations.append((sys._getframe().f_code.co_name), key, value)
self.container[key] = value
| true | true |
1c2b68ca21ee50d8f9d052a9e69cdd6fbfe740eb | 1,823 | py | Python | tests/test_typeutils.py | victor-torres/andi | c77a57013ae7d6c0d871d582edf9ab6edbb73fb8 | [
"BSD-3-Clause"
] | 13 | 2019-08-28T23:08:38.000Z | 2022-03-10T14:32:21.000Z | tests/test_typeutils.py | victor-torres/andi | c77a57013ae7d6c0d871d582edf9ab6edbb73fb8 | [
"BSD-3-Clause"
] | 21 | 2020-02-10T15:26:46.000Z | 2021-02-11T18:41:12.000Z | tests/test_typeutils.py | victor-torres/andi | c77a57013ae7d6c0d871d582edf9ab6edbb73fb8 | [
"BSD-3-Clause"
] | 2 | 2020-04-27T22:08:29.000Z | 2021-04-24T02:18:25.000Z | # -*- coding: utf-8 -*-
from typing import Union, Optional
import pytest
from andi.typeutils import get_union_args, get_callable_func_obj
def test_get_union_args():
assert get_union_args(Union[str, int]) == [str, int]
def test_get_union_args_optional():
assert get_union_args(Optional[Union[str, int]]) == [str, int, None.__class__]
def test_get_callable_func_obj_functions():
def foo():
pass
assert get_callable_func_obj(foo) is foo
def test_get_callable_func_obj_class():
class Foo:
x = 5
def __init__(self):
pass
def meth(self):
pass
@staticmethod
def staticmeth(cls):
pass
foo = Foo()
# happy path
assert get_callable_func_obj(Foo) is Foo.__init__
assert get_callable_func_obj(Foo.meth) is Foo.meth
assert get_callable_func_obj(Foo.staticmeth) is Foo.staticmeth
assert get_callable_func_obj(foo.meth) == foo.meth
assert get_callable_func_obj(foo.staticmeth) is foo.staticmeth
with pytest.raises(TypeError):
get_callable_func_obj(Foo.x) # type: ignore
with pytest.raises(TypeError):
get_callable_func_obj(foo)
def test_get_callable_func_classmethods():
class Foo:
@classmethod
def clsmeth(cls):
pass
foo = Foo()
assert get_callable_func_obj(Foo.clsmeth) == Foo.clsmeth
assert get_callable_func_obj(foo.clsmeth) == foo.clsmeth
def test_get_callable_func_obj_call():
class Foo:
def __init__(self):
pass
def __call__(self):
pass
def meth(self):
pass
foo = Foo()
assert get_callable_func_obj(Foo) is Foo.__init__
assert get_callable_func_obj(foo.meth) == foo.meth
assert get_callable_func_obj(foo) == foo.__call__
| 21.963855 | 82 | 0.665387 |
from typing import Union, Optional
import pytest
from andi.typeutils import get_union_args, get_callable_func_obj
def test_get_union_args():
assert get_union_args(Union[str, int]) == [str, int]
def test_get_union_args_optional():
assert get_union_args(Optional[Union[str, int]]) == [str, int, None.__class__]
def test_get_callable_func_obj_functions():
def foo():
pass
assert get_callable_func_obj(foo) is foo
def test_get_callable_func_obj_class():
class Foo:
x = 5
def __init__(self):
pass
def meth(self):
pass
@staticmethod
def staticmeth(cls):
pass
foo = Foo()
assert get_callable_func_obj(Foo) is Foo.__init__
assert get_callable_func_obj(Foo.meth) is Foo.meth
assert get_callable_func_obj(Foo.staticmeth) is Foo.staticmeth
assert get_callable_func_obj(foo.meth) == foo.meth
assert get_callable_func_obj(foo.staticmeth) is foo.staticmeth
with pytest.raises(TypeError):
get_callable_func_obj(Foo.x)
with pytest.raises(TypeError):
get_callable_func_obj(foo)
def test_get_callable_func_classmethods():
class Foo:
@classmethod
def clsmeth(cls):
pass
foo = Foo()
assert get_callable_func_obj(Foo.clsmeth) == Foo.clsmeth
assert get_callable_func_obj(foo.clsmeth) == foo.clsmeth
def test_get_callable_func_obj_call():
class Foo:
def __init__(self):
pass
def __call__(self):
pass
def meth(self):
pass
foo = Foo()
assert get_callable_func_obj(Foo) is Foo.__init__
assert get_callable_func_obj(foo.meth) == foo.meth
assert get_callable_func_obj(foo) == foo.__call__
| true | true |
1c2b68f04b50f0983f33a900ce9f7b937251c3b5 | 1,448 | py | Python | cride/circles/models/circles.py | mdark1001/crideApiRest | 228efec90d7f1ad8a6766b5a8085dd6bbf49fc8a | [
"MIT"
] | null | null | null | cride/circles/models/circles.py | mdark1001/crideApiRest | 228efec90d7f1ad8a6766b5a8085dd6bbf49fc8a | [
"MIT"
] | null | null | null | cride/circles/models/circles.py | mdark1001/crideApiRest | 228efec90d7f1ad8a6766b5a8085dd6bbf49fc8a | [
"MIT"
] | null | null | null | """
@author: Miguel Cabrera R. <miguel.cabrera@oohel.net>
@date: 10/04/21
@name: circles
"""
from cride.utils.models import CrideModel
from django.db import models
class Circle(CrideModel):
"""
"""
name = models.CharField(
max_length=150,
)
slug = models.SlugField(
unique=True,
max_length=40,
)
description = models.TextField(
null=True,
blank=True,
)
picture = models.ImageField(
upload_to='circles/pictures',
blank=True,
null=True
)
rides_offered = models.PositiveIntegerField(
default=0
)
rides_taken = models.PositiveIntegerField(
default=0
)
is_verified = models.BooleanField(
default=False,
help_text='Circle is a official group'
)
is_public = models.BooleanField(
help_text='Circle is checked as public',
default=False
)
is_limited = models.BooleanField(
default=False,
help_text='Check if a circle has limited number of members',
)
member_limited = models.PositiveIntegerField(
default=0,
help_text='Number of members',
)
members = models.ManyToManyField(
'users.User',
through='circles.Membership',
through_fields=('circle', 'user')
)
def __str__(self):
return self.name
class Meta(CrideModel.Meta):
ordering = ['-rides_taken', '-rides_taken']
| 22.625 | 68 | 0.610497 | from cride.utils.models import CrideModel
from django.db import models
class Circle(CrideModel):
name = models.CharField(
max_length=150,
)
slug = models.SlugField(
unique=True,
max_length=40,
)
description = models.TextField(
null=True,
blank=True,
)
picture = models.ImageField(
upload_to='circles/pictures',
blank=True,
null=True
)
rides_offered = models.PositiveIntegerField(
default=0
)
rides_taken = models.PositiveIntegerField(
default=0
)
is_verified = models.BooleanField(
default=False,
help_text='Circle is a official group'
)
is_public = models.BooleanField(
help_text='Circle is checked as public',
default=False
)
is_limited = models.BooleanField(
default=False,
help_text='Check if a circle has limited number of members',
)
member_limited = models.PositiveIntegerField(
default=0,
help_text='Number of members',
)
members = models.ManyToManyField(
'users.User',
through='circles.Membership',
through_fields=('circle', 'user')
)
def __str__(self):
return self.name
class Meta(CrideModel.Meta):
ordering = ['-rides_taken', '-rides_taken']
| true | true |
1c2b69d0cf5b6359b4654c47953a0a04bddff8cf | 609 | py | Python | aioclustermanager/service.py | sunbit/aioclustermanager | f5a2f4ba7936a75c7748cff9f77c3bfff1a3a61d | [
"BSD-3-Clause"
] | null | null | null | aioclustermanager/service.py | sunbit/aioclustermanager | f5a2f4ba7936a75c7748cff9f77c3bfff1a3a61d | [
"BSD-3-Clause"
] | 4 | 2019-07-23T14:46:34.000Z | 2020-08-23T21:59:58.000Z | aioclustermanager/service.py | sunbit/aioclustermanager | f5a2f4ba7936a75c7748cff9f77c3bfff1a3a61d | [
"BSD-3-Clause"
] | 2 | 2020-05-21T17:32:23.000Z | 2021-05-11T12:17:56.000Z | class Service:
"""Generic job class."""
def __init__(self, namespace=None, name=None, ports=None, selector=None, type=None, data=None, **kw):
if data is not None:
self._raw = data
else:
self._raw = self.create(
namespace,
name=name,
ports=ports,
selector=selector,
type=type,
**kw)
@property
def id(self):
raise NotImplementedError()
def get_payload(self):
raise NotImplementedError()
def payload(self):
return self._raw
| 24.36 | 105 | 0.517241 | class Service:
def __init__(self, namespace=None, name=None, ports=None, selector=None, type=None, data=None, **kw):
if data is not None:
self._raw = data
else:
self._raw = self.create(
namespace,
name=name,
ports=ports,
selector=selector,
type=type,
**kw)
@property
def id(self):
raise NotImplementedError()
def get_payload(self):
raise NotImplementedError()
def payload(self):
return self._raw
| true | true |
1c2b6a42b381ee7f947f4e3bd93a598d50e0e046 | 3,004 | py | Python | src/pages/event_volatility.py | PFX-Public/pfx-app | 9bc6421b49356934d1df311fe399d2bc2b37f63b | [
"MIT"
] | null | null | null | src/pages/event_volatility.py | PFX-Public/pfx-app | 9bc6421b49356934d1df311fe399d2bc2b37f63b | [
"MIT"
] | null | null | null | src/pages/event_volatility.py | PFX-Public/pfx-app | 9bc6421b49356934d1df311fe399d2bc2b37f63b | [
"MIT"
] | null | null | null | from typing import List
from pathlib import Path
import streamlit as st
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from .event_utils import *
def render() -> None:
st.title("Event Volatility")
ccy_pairs = ['EURUSD', 'EURAUD', 'EURCAD', 'EURCHF', 'EURGBP', 'EURJPY', 'EURNZD',
'AUDCAD', 'AUDCHF', 'AUDJPY', 'AUDNZD', 'AUDUSD', 'CADCHF', 'CADJPY',
'CHFJPY', 'GBPAUD', 'GBPCAD', 'GBPCHF', 'GBPJPY', 'GBPNZD', 'GBPUSD',
'NZDCAD', 'NZDCHF', 'NZDJPY', 'NZDUSD', 'USDCAD', 'USDCHF', 'USDJPY']
ff_calendar_path = Path("data/forex_calendar_01-2011_04-2021_GMT0.csv")
calendar_df = pd.read_csv(ff_calendar_path)
calendar_df = calendar_df[~calendar_df['Event'].astype(str).str.contains("Holiday")]
base_ccys: np.ndarray = calendar_df['Currency'].unique()
events: np.ndarray = np.sort(calendar_df['Event'].unique().astype(str))
event: str = st.sidebar.selectbox("Event:", events, index=0)
base_ccys: np.ndarray = np.sort(calendar_df[calendar_df['Event'] == event]['Currency'].unique())
if 'All' in base_ccys:
base_ccys: np.ndarray = np.sort(calendar_df['Currency'].unique())
base_ccy = st.sidebar.selectbox("Base Currency:", base_ccys, index=0)
pairs: List[str] = [i for i in ccy_pairs if base_ccy in i]
pair: str = st.sidebar.selectbox("Pair:", pairs, index=0)
df_calendar_filtered = get_df_calendar_filtered(calendar_df, event, base_ccy)
df_calendar_filtered['Actual'] = df_calendar_filtered['Actual'].fillna('0')
df_calendar_filtered['Forecast'] = df_calendar_filtered['Forecast'].fillna('0')
df_calendar_filtered['Previous'] = df_calendar_filtered['Previous'].fillna('0')
df_price_RT = get_df_price_RT(pair)
result_df = combine_calendar_with_price_RT(df_calendar_filtered,
df_price_RT,
event,
pair, base_ccy)
result_df = calc_volatility(result_df, base_ccy, event, pair)
st.header(f"Volatility Histogram Charts")
with st.expander("See charts"):
fig_par, ax_par = plt.subplots()
ax_par.set_title("Volatility At Event Release")
ax_par.hist(result_df['Volatility_pips_intraday'].dropna(), bins=10)
st.pyplot(fig_par)
fig_bf, ax_bf = plt.subplots()
ax_bf.set_title("Volatility Before Event Release")
ax_bf.hist(result_df['Volatility_pips_bf'].dropna(), bins=10)
st.pyplot(fig_bf)
fig_af, ax_af = plt.subplots()
ax_af.set_title("Volatility Before Event Release")
ax_af.hist(result_df['Volatility_pips_af'].dropna(), bins=10)
st.pyplot(fig_af)
st.header(f"Volatility Table")
with st.expander("See table"):
st.write(result_df[['Volatility_pips_bf', 'Volatility_pips_af', 'Volatility_pips_intraday']]
.dropna()
.assign(hack='')
.set_index('hack'))
| 40.053333 | 100 | 0.643475 | from typing import List
from pathlib import Path
import streamlit as st
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from .event_utils import *
def render() -> None:
st.title("Event Volatility")
ccy_pairs = ['EURUSD', 'EURAUD', 'EURCAD', 'EURCHF', 'EURGBP', 'EURJPY', 'EURNZD',
'AUDCAD', 'AUDCHF', 'AUDJPY', 'AUDNZD', 'AUDUSD', 'CADCHF', 'CADJPY',
'CHFJPY', 'GBPAUD', 'GBPCAD', 'GBPCHF', 'GBPJPY', 'GBPNZD', 'GBPUSD',
'NZDCAD', 'NZDCHF', 'NZDJPY', 'NZDUSD', 'USDCAD', 'USDCHF', 'USDJPY']
ff_calendar_path = Path("data/forex_calendar_01-2011_04-2021_GMT0.csv")
calendar_df = pd.read_csv(ff_calendar_path)
calendar_df = calendar_df[~calendar_df['Event'].astype(str).str.contains("Holiday")]
base_ccys: np.ndarray = calendar_df['Currency'].unique()
events: np.ndarray = np.sort(calendar_df['Event'].unique().astype(str))
event: str = st.sidebar.selectbox("Event:", events, index=0)
base_ccys: np.ndarray = np.sort(calendar_df[calendar_df['Event'] == event]['Currency'].unique())
if 'All' in base_ccys:
base_ccys: np.ndarray = np.sort(calendar_df['Currency'].unique())
base_ccy = st.sidebar.selectbox("Base Currency:", base_ccys, index=0)
pairs: List[str] = [i for i in ccy_pairs if base_ccy in i]
pair: str = st.sidebar.selectbox("Pair:", pairs, index=0)
df_calendar_filtered = get_df_calendar_filtered(calendar_df, event, base_ccy)
df_calendar_filtered['Actual'] = df_calendar_filtered['Actual'].fillna('0')
df_calendar_filtered['Forecast'] = df_calendar_filtered['Forecast'].fillna('0')
df_calendar_filtered['Previous'] = df_calendar_filtered['Previous'].fillna('0')
df_price_RT = get_df_price_RT(pair)
result_df = combine_calendar_with_price_RT(df_calendar_filtered,
df_price_RT,
event,
pair, base_ccy)
result_df = calc_volatility(result_df, base_ccy, event, pair)
st.header(f"Volatility Histogram Charts")
with st.expander("See charts"):
fig_par, ax_par = plt.subplots()
ax_par.set_title("Volatility At Event Release")
ax_par.hist(result_df['Volatility_pips_intraday'].dropna(), bins=10)
st.pyplot(fig_par)
fig_bf, ax_bf = plt.subplots()
ax_bf.set_title("Volatility Before Event Release")
ax_bf.hist(result_df['Volatility_pips_bf'].dropna(), bins=10)
st.pyplot(fig_bf)
fig_af, ax_af = plt.subplots()
ax_af.set_title("Volatility Before Event Release")
ax_af.hist(result_df['Volatility_pips_af'].dropna(), bins=10)
st.pyplot(fig_af)
st.header(f"Volatility Table")
with st.expander("See table"):
st.write(result_df[['Volatility_pips_bf', 'Volatility_pips_af', 'Volatility_pips_intraday']]
.dropna()
.assign(hack='')
.set_index('hack'))
| true | true |
1c2b6ab3cb65aef03242cda050e06f529b100e6f | 1,590 | py | Python | src/OTLMOW/OTLModel/Datatypes/KlOntvangerToepassing.py | davidvlaminck/OTLClassPython | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | 2 | 2022-02-01T08:58:11.000Z | 2022-02-08T13:35:17.000Z | src/OTLMOW/OTLModel/Datatypes/KlOntvangerToepassing.py | davidvlaminck/OTLMOW | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | null | null | null | src/OTLMOW/OTLModel/Datatypes/KlOntvangerToepassing.py | davidvlaminck/OTLMOW | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | null | null | null | # coding=utf-8
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlOntvangerToepassing(KeuzelijstField):
"""Keuzelijst met modelnamen voor OntvangerToepassing."""
naam = 'KlOntvangerToepassing'
label = 'Ontvanger toepassing'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlOntvangerToepassing'
definition = 'Keuzelijst met modelnamen voor OntvangerToepassing.'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlOntvangerToepassing'
options = {
'GPRS': KeuzelijstWaarde(invulwaarde='GPRS',
label='GPRS',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlOntvangerToepassing/GPRS'),
'GSM': KeuzelijstWaarde(invulwaarde='GSM',
label='GSM',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlOntvangerToepassing/GSM'),
'KAR': KeuzelijstWaarde(invulwaarde='KAR',
label='KAR',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlOntvangerToepassing/KAR'),
'WIFI': KeuzelijstWaarde(invulwaarde='WIFI',
label='WIFI',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlOntvangerToepassing/WIFI')
}
| 54.827586 | 126 | 0.64717 |
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
class KlOntvangerToepassing(KeuzelijstField):
naam = 'KlOntvangerToepassing'
label = 'Ontvanger toepassing'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlOntvangerToepassing'
definition = 'Keuzelijst met modelnamen voor OntvangerToepassing.'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlOntvangerToepassing'
options = {
'GPRS': KeuzelijstWaarde(invulwaarde='GPRS',
label='GPRS',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlOntvangerToepassing/GPRS'),
'GSM': KeuzelijstWaarde(invulwaarde='GSM',
label='GSM',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlOntvangerToepassing/GSM'),
'KAR': KeuzelijstWaarde(invulwaarde='KAR',
label='KAR',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlOntvangerToepassing/KAR'),
'WIFI': KeuzelijstWaarde(invulwaarde='WIFI',
label='WIFI',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlOntvangerToepassing/WIFI')
}
| true | true |
1c2b6b83cc21f4f9cf393e72f0fd8b6d10b253b6 | 493 | py | Python | profiles_api/urls.py | SameyaAlam/profiles-rest-api | d92772cfd53b6a7606cb612b468d4460c7645a55 | [
"MIT"
] | null | null | null | profiles_api/urls.py | SameyaAlam/profiles-rest-api | d92772cfd53b6a7606cb612b468d4460c7645a55 | [
"MIT"
] | null | null | null | profiles_api/urls.py | SameyaAlam/profiles-rest-api | d92772cfd53b6a7606cb612b468d4460c7645a55 | [
"MIT"
] | null | null | null | from django.urls import path,include
from profiles_api import views
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register('hello-viewset', views.HelloViewSet, basename='hello-viewset')
router.register('profiles', views.UserProfileViewSet)
router.register('feed', views.UserProfileFeedViewSet)
urlpatterns = [
path('hello-api/', views.HelloApiView.as_view()),
path('login/', views.UserLoginApiView.as_view()),
path('', include(router.urls)),
] | 32.866667 | 78 | 0.768763 | from django.urls import path,include
from profiles_api import views
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register('hello-viewset', views.HelloViewSet, basename='hello-viewset')
router.register('profiles', views.UserProfileViewSet)
router.register('feed', views.UserProfileFeedViewSet)
urlpatterns = [
path('hello-api/', views.HelloApiView.as_view()),
path('login/', views.UserLoginApiView.as_view()),
path('', include(router.urls)),
] | true | true |
1c2b6ec667cdf8df3798a7119d20da4ae586253f | 15,809 | py | Python | 05-ACGAN/acgan.py | stephenwithav/25-gans-of-04-20 | ae8c475084c95869fc3992a8c6aa5acae693377f | [
"MIT"
] | null | null | null | 05-ACGAN/acgan.py | stephenwithav/25-gans-of-04-20 | ae8c475084c95869fc3992a8c6aa5acae693377f | [
"MIT"
] | null | null | null | 05-ACGAN/acgan.py | stephenwithav/25-gans-of-04-20 | ae8c475084c95869fc3992a8c6aa5acae693377f | [
"MIT"
] | null | null | null | '''Trains ACGAN on MNIST using Keras
This version of ACGAN is similar to DCGAN. The difference mainly
is that the z-vector of geneerator is conditioned by a one-hot label
to produce specific fake images. The discriminator is trained to
discriminate real from fake images and predict the corresponding
one-hot labels.
[1] Radford, Alec, Luke Metz, and Soumith Chintala.
"Unsupervised representation learning with deep convolutional
generative adversarial networks." arXiv preprint arXiv:1511.06434 (2015).
[2] Odena, Augustus, Christopher Olah, and Jonathon Shlens.
"Conditional image synthesis with auxiliary classifier gans."
arXiv preprint arXiv:1610.09585 (2016).
'''
from tensorflow.keras.layers import Activation, Dense, Input
from tensorflow.keras.layers import Conv2D, Flatten
from tensorflow.keras.layers import Reshape, Conv2DTranspose
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.datasets import mnist
from tensorflow.keras import backend as K
from tensorflow.keras.models import load_model
from tensorflow.keras.utils import to_categorical
import math
import matplotlib.pyplot as plt
import numpy as np
import argparse
def build_generator(inputs,
image_size,
activation='sigmoid',
labels=None,
codes=None):
"""Build a Generator Model
Stack of BN-ReLU-Conv2DTranpose to generate fake images.
Output activation is sigmoid instead of tanh in [1].
Sigmoid converges easily.
Arguments:
inputs (Layer): Input layer of the generator (the z-vector)
image_size (int): Target size of one side
(assuming square image)
activation (string): Name of output activation layer
labels (tensor): Input labels
codes (list): 2-dim disentangled codes for InfoGAN
Returns:
Model: Generator Model
"""
image_resize = image_size // 4
# network parameters
kernel_size = 5
layer_filters = [128, 64, 32, 1]
if labels is not None:
if codes is None:
# ACGAN labels
# concatenate z noise vector and one-hot labels
inputs = [inputs, labels]
else:
# infoGAN codes
# concatenate z noise vector,
# one-hot labels and codes 1 & 2
inputs = [inputs, labels] + codes
x = concatenate(inputs, axis=1)
elif codes is not None:
# generator 0 of StackedGAN
inputs = [inputs, codes]
x = concatenate(inputs, axis=1)
else:
# default input is just 100-dim noise (z-code)
x = inputs
x = Dense(image_resize * image_resize * layer_filters[0])(x)
x = Reshape((image_resize, image_resize, layer_filters[0]))(x)
for filters in layer_filters:
# first two convolution layers use strides = 2
# the last two use strides = 1
if filters > layer_filters[-2]:
strides = 2
else:
strides = 1
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2DTranspose(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='same')(x)
if activation is not None:
x = Activation(activation)(x)
# generator output is the synthesized image x
return Model(inputs, x, name='generator')
def build_discriminator(inputs,
activation='sigmoid',
num_labels=None,
num_codes=None):
"""Build a Discriminator Model
Stack of LeakyReLU-Conv2D to discriminate real from fake
The network does not converge with BN so it is not used here
unlike in [1]
Arguments:
inputs (Layer): Input layer of the discriminator (the image)
activation (string): Name of output activation layer
num_labels (int): Dimension of one-hot labels for ACGAN & InfoGAN
num_codes (int): num_codes-dim Q network as output
if StackedGAN or 2 Q networks if InfoGAN
Returns:
Model: Discriminator Model
"""
kernel_size = 5
layer_filters = [32, 64, 128, 256]
x = inputs
for filters in layer_filters:
# first 3 convolution layers use strides = 2
# last one uses strides = 1
if filters == layer_filters[-1]:
strides = 1
else:
strides = 2
x = LeakyReLU(alpha=0.2)(x)
x = Conv2D(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='same')(x)
x = Flatten()(x)
# default output is probability that the image is real
outputs = Dense(1)(x)
if activation is not None:
print(activation)
outputs = Activation(activation)(outputs)
if num_labels:
# ACGAN and InfoGAN have 2nd output
# 2nd output is 10-dim one-hot vector of label
layer = Dense(layer_filters[-2])(x)
labels = Dense(num_labels)(layer)
labels = Activation('softmax', name='label')(labels)
if num_codes is None:
outputs = [outputs, labels]
else:
# InfoGAN have 3rd and 4th outputs
# 3rd output is 1-dim continous Q of 1st c given x
code1 = Dense(1)(layer)
code1 = Activation('sigmoid', name='code1')(code1)
# 4th output is 1-dim continuous Q of 2nd c given x
code2 = Dense(1)(layer)
code2 = Activation('sigmoid', name='code2')(code2)
outputs = [outputs, labels, code1, code2]
elif num_codes is not None:
# StackedGAN Q0 output
# z0_recon is reconstruction of z0 normal distribution
z0_recon = Dense(num_codes)(x)
z0_recon = Activation('tanh', name='z0')(z0_recon)
outputs = [outputs, z0_recon]
return Model(inputs, outputs, name='discriminator')
def train(models, data, params):
"""Train the discriminator and adversarial Networks
Alternately train discriminator and adversarial
networks by batch.
Discriminator is trained first with real and fake
images and corresponding one-hot labels.
Adversarial is trained next with fake images pretending
to be real and corresponding one-hot labels.
Generate sample images per save_interval.
# Arguments
models (list): Generator, Discriminator,
Adversarial models
data (list): x_train, y_train data
params (list): Network parameters
"""
# the GAN models
generator, discriminator, adversarial = models
# images and their one-hot labels
x_train, y_train = data
# network parameters
batch_size, latent_size, train_steps, num_labels, model_name \
= params
# the generator image is saved every 500 steps
save_interval = 500
# noise vector to see how the generator
# output evolves during training
noise_input = np.random.uniform(-1.0,
1.0,
size=[16, latent_size])
# class labels are 0, 1, 2, 3, 4, 5,
# 6, 7, 8, 9, 0, 1, 2, 3, 4, 5
# the generator must produce these MNIST digits
noise_label = np.eye(num_labels)[np.arange(0, 16) % num_labels]
# number of elements in train dataset
train_size = x_train.shape[0]
print(model_name,
"Labels for generated images: ",
np.argmax(noise_label, axis=1))
for i in range(train_steps):
# train the discriminator for 1 batch
# 1 batch of real (label=1.0) and fake images (label=0.0)
# randomly pick real images and
# corresponding labels from dataset
rand_indexes = np.random.randint(0,
train_size,
size=batch_size)
real_images = x_train[rand_indexes]
real_labels = y_train[rand_indexes]
# generate fake images from noise using generator
# generate noise using uniform distribution
noise = np.random.uniform(-1.0,
1.0,
size=[batch_size, latent_size])
# randomly pick one-hot labels
fake_labels = np.eye(num_labels)[np.random.choice(num_labels,
batch_size)]
# generate fake images
fake_images = generator.predict([noise, fake_labels])
# real + fake images = 1 batch of train data
x = np.concatenate((real_images, fake_images))
# real + fake labels = 1 batch of train data labels
labels = np.concatenate((real_labels, fake_labels))
# label real and fake images
# real images label is 1.0
y = np.ones([2 * batch_size, 1])
# fake images label is 0.0
y[batch_size:, :] = 0
# train discriminator network, log the loss and accuracy
# ['loss', 'activation_1_loss',
# 'label_loss', 'activation_1_acc', 'label_acc']
metrics = discriminator.train_on_batch(x, [y, labels])
fmt = "%d: [disc loss: %f, srcloss: %f,"
fmt += "lblloss: %f, srcacc: %f, lblacc: %f]"
log = fmt % (i, metrics[0], metrics[1], \
metrics[2], metrics[3], metrics[4])
# train the adversarial network for 1 batch
# 1 batch of fake images with label=1.0 and
# corresponding one-hot label or class
# since the discriminator weights are frozen
# in adversarial network only the generator is trained
# generate noise using uniform distribution
noise = np.random.uniform(-1.0,
1.0,
size=[batch_size, latent_size])
# randomly pick one-hot labels
fake_labels = np.eye(num_labels)[np.random.choice(num_labels,
batch_size)]
# label fake images as real
y = np.ones([batch_size, 1])
# train the adversarial network
# note that unlike in discriminator training,
# we do not save the fake images in a variable
# the fake images go to the discriminator input
# of the adversarial for classification
# log the loss and accuracy
metrics = adversarial.train_on_batch([noise, fake_labels],
[y, fake_labels])
fmt = "%s [advr loss: %f, srcloss: %f,"
fmt += "lblloss: %f, srcacc: %f, lblacc: %f]"
log = fmt % (log, metrics[0], metrics[1],\
metrics[2], metrics[3], metrics[4])
if (i + 1) % 25 == 0:
# plot generator images on a periodic basis
print(log)
# save the model after training the generator
# the trained generator can be reloaded
# for future MNIST digit generation
generator.save(model_name + ".h5")
def build_and_train_models():
"""Load the dataset, build ACGAN discriminator,
generator, and adversarial models.
Call the ACGAN train routine.
"""
# load MNIST dataset
(x_train, y_train), (_, _) = mnist.load_data()
# reshape data for CNN as (28, 28, 1) and normalize
image_size = x_train.shape[1]
x_train = np.reshape(x_train,
[-1, image_size, image_size, 1])
x_train = x_train.astype('float32') / 255
# train labels
num_labels = len(np.unique(y_train))
y_train = to_categorical(y_train)
model_name = "acgan_mnist"
# network parameters
latent_size = 100
batch_size = 64
train_steps = 40000
lr = 2e-4
decay = 6e-8
input_shape = (image_size, image_size, 1)
label_shape = (num_labels, )
# build discriminator Model
inputs = Input(shape=input_shape,
name='discriminator_input')
# call discriminator builder
# with 2 outputs, pred source and labels
discriminator = build_discriminator(inputs,
num_labels=num_labels)
# [1] uses Adam, but discriminator
# easily converges with RMSprop
optimizer = RMSprop(lr=lr, decay=decay)
# 2 loss fuctions: 1) probability image is real
# 2) class label of the image
loss = ['binary_crossentropy', 'categorical_crossentropy']
discriminator.compile(loss=loss,
optimizer=optimizer,
metrics=['accuracy'])
discriminator.summary()
# build generator model
input_shape = (latent_size, )
inputs = Input(shape=input_shape, name='z_input')
labels = Input(shape=label_shape, name='labels')
# call generator builder with input labels
generator = build_generator(inputs,
image_size,
labels=labels)
generator.summary()
# build adversarial model = generator + discriminator
optimizer = RMSprop(lr=lr*0.5, decay=decay*0.5)
# freeze the weights of discriminator
# during adversarial training
discriminator.trainable = False
adversarial = Model([inputs, labels],
discriminator(generator([inputs, labels])),
name=model_name)
# same 2 loss fuctions: 1) probability image is real
# 2) class label of the image
adversarial.compile(loss=loss,
optimizer=optimizer,
metrics=['accuracy'])
adversarial.summary()
# train discriminator and adversarial networks
models = (generator, discriminator, adversarial)
data = (x_train, y_train)
params = (batch_size, latent_size, \
train_steps, num_labels, model_name)
train(models, data, params)
return models
def plot_images(generator,
noise_input,
noise_label=None,
noise_codes=None,
show=False,
step=0,
model_name="gan"):
"""Generate fake images and plot them
For visualization purposes, generate fake images
then plot them in a square grid
# Arguments
generator (Model): The Generator Model for
fake images generation
noise_input (ndarray): Array of z-vectors
show (bool): Whether to show plot or not
step (int): Appended to filename of the save images
model_name (string): Model name
"""
rows = int(math.sqrt(noise_input.shape[0]))
if noise_label is not None:
noise_input = [noise_input, noise_label]
if noise_codes is not None:
noise_input += noise_codes
images = generator.predict(noise_input)
plt.figure(figsize=(2.2, 2.2))
num_images = images.shape[0]
image_size = images.shape[1]
for i in range(num_images):
plt.subplot(rows, rows, i + 1)
image = np.reshape(images[i], [image_size, image_size])
plt.imshow(image, cmap='gray')
plt.axis('off')
if show:
plt.show()
else:
plt.close('all')
def test_generator(generator, class_label=None):
noise_input = np.random.uniform(-1.0, 1.0, size=[16, 100])
step = 0
if class_label is None:
num_labels = 10
noise_label = np.eye(num_labels)[np.random.choice(num_labels, 16)]
else:
noise_label = np.zeros((16, 10))
noise_label[:,class_label] = 1
step = class_label
plot_images(generator,
noise_input=noise_input,
noise_label=noise_label,
show=True,
step=step,
model_name="test_outputs")
(g, d, a) = build_and_train_models()
| 36.594907 | 74 | 0.609906 |
from tensorflow.keras.layers import Activation, Dense, Input
from tensorflow.keras.layers import Conv2D, Flatten
from tensorflow.keras.layers import Reshape, Conv2DTranspose
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.datasets import mnist
from tensorflow.keras import backend as K
from tensorflow.keras.models import load_model
from tensorflow.keras.utils import to_categorical
import math
import matplotlib.pyplot as plt
import numpy as np
import argparse
def build_generator(inputs,
image_size,
activation='sigmoid',
labels=None,
codes=None):
image_resize = image_size // 4
kernel_size = 5
layer_filters = [128, 64, 32, 1]
if labels is not None:
if codes is None:
inputs = [inputs, labels]
else:
inputs = [inputs, labels] + codes
x = concatenate(inputs, axis=1)
elif codes is not None:
inputs = [inputs, codes]
x = concatenate(inputs, axis=1)
else:
x = inputs
x = Dense(image_resize * image_resize * layer_filters[0])(x)
x = Reshape((image_resize, image_resize, layer_filters[0]))(x)
for filters in layer_filters:
if filters > layer_filters[-2]:
strides = 2
else:
strides = 1
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2DTranspose(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='same')(x)
if activation is not None:
x = Activation(activation)(x)
return Model(inputs, x, name='generator')
def build_discriminator(inputs,
activation='sigmoid',
num_labels=None,
num_codes=None):
kernel_size = 5
layer_filters = [32, 64, 128, 256]
x = inputs
for filters in layer_filters:
if filters == layer_filters[-1]:
strides = 1
else:
strides = 2
x = LeakyReLU(alpha=0.2)(x)
x = Conv2D(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='same')(x)
x = Flatten()(x)
outputs = Dense(1)(x)
if activation is not None:
print(activation)
outputs = Activation(activation)(outputs)
if num_labels:
layer = Dense(layer_filters[-2])(x)
labels = Dense(num_labels)(layer)
labels = Activation('softmax', name='label')(labels)
if num_codes is None:
outputs = [outputs, labels]
else:
code1 = Dense(1)(layer)
code1 = Activation('sigmoid', name='code1')(code1)
code2 = Dense(1)(layer)
code2 = Activation('sigmoid', name='code2')(code2)
outputs = [outputs, labels, code1, code2]
elif num_codes is not None:
z0_recon = Dense(num_codes)(x)
z0_recon = Activation('tanh', name='z0')(z0_recon)
outputs = [outputs, z0_recon]
return Model(inputs, outputs, name='discriminator')
def train(models, data, params):
generator, discriminator, adversarial = models
x_train, y_train = data
batch_size, latent_size, train_steps, num_labels, model_name \
= params
save_interval = 500
noise_input = np.random.uniform(-1.0,
1.0,
size=[16, latent_size])
noise_label = np.eye(num_labels)[np.arange(0, 16) % num_labels]
train_size = x_train.shape[0]
print(model_name,
"Labels for generated images: ",
np.argmax(noise_label, axis=1))
for i in range(train_steps):
rand_indexes = np.random.randint(0,
train_size,
size=batch_size)
real_images = x_train[rand_indexes]
real_labels = y_train[rand_indexes]
noise = np.random.uniform(-1.0,
1.0,
size=[batch_size, latent_size])
fake_labels = np.eye(num_labels)[np.random.choice(num_labels,
batch_size)]
fake_images = generator.predict([noise, fake_labels])
x = np.concatenate((real_images, fake_images))
labels = np.concatenate((real_labels, fake_labels))
y = np.ones([2 * batch_size, 1])
y[batch_size:, :] = 0
metrics = discriminator.train_on_batch(x, [y, labels])
fmt = "%d: [disc loss: %f, srcloss: %f,"
fmt += "lblloss: %f, srcacc: %f, lblacc: %f]"
log = fmt % (i, metrics[0], metrics[1], \
metrics[2], metrics[3], metrics[4])
noise = np.random.uniform(-1.0,
1.0,
size=[batch_size, latent_size])
fake_labels = np.eye(num_labels)[np.random.choice(num_labels,
batch_size)]
y = np.ones([batch_size, 1])
metrics = adversarial.train_on_batch([noise, fake_labels],
[y, fake_labels])
fmt = "%s [advr loss: %f, srcloss: %f,"
fmt += "lblloss: %f, srcacc: %f, lblacc: %f]"
log = fmt % (log, metrics[0], metrics[1],\
metrics[2], metrics[3], metrics[4])
if (i + 1) % 25 == 0:
print(log)
generator.save(model_name + ".h5")
def build_and_train_models():
(x_train, y_train), (_, _) = mnist.load_data()
image_size = x_train.shape[1]
x_train = np.reshape(x_train,
[-1, image_size, image_size, 1])
x_train = x_train.astype('float32') / 255
num_labels = len(np.unique(y_train))
y_train = to_categorical(y_train)
model_name = "acgan_mnist"
latent_size = 100
batch_size = 64
train_steps = 40000
lr = 2e-4
decay = 6e-8
input_shape = (image_size, image_size, 1)
label_shape = (num_labels, )
inputs = Input(shape=input_shape,
name='discriminator_input')
discriminator = build_discriminator(inputs,
num_labels=num_labels)
optimizer = RMSprop(lr=lr, decay=decay)
loss = ['binary_crossentropy', 'categorical_crossentropy']
discriminator.compile(loss=loss,
optimizer=optimizer,
metrics=['accuracy'])
discriminator.summary()
input_shape = (latent_size, )
inputs = Input(shape=input_shape, name='z_input')
labels = Input(shape=label_shape, name='labels')
generator = build_generator(inputs,
image_size,
labels=labels)
generator.summary()
optimizer = RMSprop(lr=lr*0.5, decay=decay*0.5)
discriminator.trainable = False
adversarial = Model([inputs, labels],
discriminator(generator([inputs, labels])),
name=model_name)
adversarial.compile(loss=loss,
optimizer=optimizer,
metrics=['accuracy'])
adversarial.summary()
models = (generator, discriminator, adversarial)
data = (x_train, y_train)
params = (batch_size, latent_size, \
train_steps, num_labels, model_name)
train(models, data, params)
return models
def plot_images(generator,
noise_input,
noise_label=None,
noise_codes=None,
show=False,
step=0,
model_name="gan"):
rows = int(math.sqrt(noise_input.shape[0]))
if noise_label is not None:
noise_input = [noise_input, noise_label]
if noise_codes is not None:
noise_input += noise_codes
images = generator.predict(noise_input)
plt.figure(figsize=(2.2, 2.2))
num_images = images.shape[0]
image_size = images.shape[1]
for i in range(num_images):
plt.subplot(rows, rows, i + 1)
image = np.reshape(images[i], [image_size, image_size])
plt.imshow(image, cmap='gray')
plt.axis('off')
if show:
plt.show()
else:
plt.close('all')
def test_generator(generator, class_label=None):
noise_input = np.random.uniform(-1.0, 1.0, size=[16, 100])
step = 0
if class_label is None:
num_labels = 10
noise_label = np.eye(num_labels)[np.random.choice(num_labels, 16)]
else:
noise_label = np.zeros((16, 10))
noise_label[:,class_label] = 1
step = class_label
plot_images(generator,
noise_input=noise_input,
noise_label=noise_label,
show=True,
step=step,
model_name="test_outputs")
(g, d, a) = build_and_train_models()
| true | true |
1c2b6ed4432e6ada01769e62bcdbd23019e41bea | 242 | py | Python | BOJ/08000~08999/8700~8799/8716.py | shinkeonkim/today-ps | f3e5e38c5215f19579bb0422f303a9c18c626afa | [
"Apache-2.0"
] | 2 | 2020-01-29T06:54:41.000Z | 2021-11-07T13:23:27.000Z | BOJ/08000~08999/8700~8799/8716.py | shinkeonkim/Today_PS | bb0cda0ee1b9c57e1cfa38355e29d0f1c6167a44 | [
"Apache-2.0"
] | null | null | null | BOJ/08000~08999/8700~8799/8716.py | shinkeonkim/Today_PS | bb0cda0ee1b9c57e1cfa38355e29d0f1c6167a44 | [
"Apache-2.0"
] | null | null | null | A = list(map(int,input().split()))
B = list(map(int,input().split()))
if (min(A[2],B[2])-max(A[0],B[0])) < 0 or (min(A[1],B[1])-max(A[3],B[3])) < 0:
print(0)
else:
print((min(A[2],B[2])-max(A[0],B[0]))*(min(A[1],B[1])-max(A[3],B[3]))) | 40.333333 | 78 | 0.491736 | A = list(map(int,input().split()))
B = list(map(int,input().split()))
if (min(A[2],B[2])-max(A[0],B[0])) < 0 or (min(A[1],B[1])-max(A[3],B[3])) < 0:
print(0)
else:
print((min(A[2],B[2])-max(A[0],B[0]))*(min(A[1],B[1])-max(A[3],B[3]))) | true | true |
1c2b6fb7c8ea9bb292916e0effb3a81bc4e7e8fa | 3,153 | py | Python | data/p2DJ/New/program/qiskit/noisy/startQiskit_noisy208.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/program/qiskit/noisy/startQiskit_noisy208.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/program/qiskit/noisy/startQiskit_noisy208.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=2
# total number=12
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[1]) # number=6
prog.cz(input_qubit[0],input_qubit[1]) # number=7
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=8
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.y(input_qubit[1]) # number=2
prog.cx(input_qubit[0],input_qubit[1]) # number=4
prog.y(input_qubit[1]) # number=3
prog.x(input_qubit[0]) # number=10
prog.x(input_qubit[0]) # number=11
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = FakeVigo()
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit_noisy208.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 28.151786 | 82 | 0.627022 |
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
prog.x(target)
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1])
prog.h(input_qubit[1])
prog.cz(input_qubit[0],input_qubit[1])
prog.h(input_qubit[1])
prog.h(input_qubit[1])
prog.h(target)
prog.barrier()
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
prog.y(input_qubit[1])
prog.cx(input_qubit[0],input_qubit[1])
prog.y(input_qubit[1])
prog.x(input_qubit[0])
prog.x(input_qubit[0])
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
prog = make_circuit(n, f)
sample_shot =2800
backend = FakeVigo()
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit_noisy208.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| true | true |
1c2b6fce8b515def8f4278b4af8f986068125807 | 8,930 | py | Python | dynaconf/utils/__init__.py | Bernardoow/dynaconf | bb6282cf04214f13c0bcbacdb4cee65d4c9ddafb | [
"MIT"
] | null | null | null | dynaconf/utils/__init__.py | Bernardoow/dynaconf | bb6282cf04214f13c0bcbacdb4cee65d4c9ddafb | [
"MIT"
] | null | null | null | dynaconf/utils/__init__.py | Bernardoow/dynaconf | bb6282cf04214f13c0bcbacdb4cee65d4c9ddafb | [
"MIT"
] | null | null | null | import functools
import os
import warnings
from json import JSONDecoder
BANNER = """
██████╗ ██╗ ██╗███╗ ██╗ █████╗ ██████╗ ██████╗ ███╗ ██╗███████╗
██╔══██╗╚██╗ ██╔╝████╗ ██║██╔══██╗██╔════╝██╔═══██╗████╗ ██║██╔════╝
██║ ██║ ╚████╔╝ ██╔██╗ ██║███████║██║ ██║ ██║██╔██╗ ██║█████╗
██║ ██║ ╚██╔╝ ██║╚██╗██║██╔══██║██║ ██║ ██║██║╚██╗██║██╔══╝
██████╔╝ ██║ ██║ ╚████║██║ ██║╚██████╗╚██████╔╝██║ ╚████║██║
╚═════╝ ╚═╝ ╚═╝ ╚═══╝╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═══╝╚═╝
"""
if os.name == "nt": # pragma: no cover
# windows can't handle the above charmap
BANNER = "DYNACONF"
def object_merge(old, new, unique=False, tail=None):
"""
Recursively merge two data structures, new is mutated in-place.
:param old: The existing data.
:param new: The new data to get old values merged in to.
:param unique: When set to True existing list items are not set.
:param tail: Indicates the last element of a tree.
"""
if old == new or old is None or new is None:
# Nothing to merge
return
if isinstance(old, list) and isinstance(new, list):
for item in old[::-1]:
if unique and item in new:
continue
new.insert(0, item)
if isinstance(old, dict) and isinstance(new, dict):
for key, value in old.items():
if key == tail:
continue
if key not in new:
new[key] = value
else:
object_merge(value, new[key], tail=tail)
handle_metavalues(old, new)
def handle_metavalues(old, new):
"""Cleanup of MetaValues on new dict"""
for key in list(new.keys()):
if getattr(new[key], "_dynaconf_reset", False): # pragma: no cover
# a Reset on new triggers reasign of existing data
# @reset is deprecated on v3.0.0
new[key] = new[key].unwrap()
if getattr(new[key], "_dynaconf_merge", False):
# a Merge on new triggers merge with existing data
unique = new[key].unique
new[key] = new[key].unwrap()
object_merge(old.get(key), new[key], unique=unique)
if getattr(new[key], "_dynaconf_del", False):
# a Del on new triggers deletion of existing data
new.pop(key, None)
old.pop(key, None)
class DynaconfDict(dict):
"""A dict representing en empty Dynaconf object
useful to run loaders in to a dict for testing"""
def __init__(self, *args, **kwargs):
self._loaded_files = []
super(DynaconfDict, self).__init__(*args, **kwargs)
@property
def logger(self):
return raw_logger()
def set(self, key, value, *args, **kwargs):
self[key] = value
@staticmethod
def get_environ(key, default=None): # pragma: no cover
return os.environ.get(key, default)
def exists(self, key, **kwargs):
return self.get(key, missing) is not missing
@functools.lru_cache()
def _logger(level):
import logging
formatter = logging.Formatter(
fmt=(
"%(asctime)s,%(msecs)d %(levelname)-8s "
"[%(filename)s:%(lineno)d - %(funcName)s] %(message)s"
),
datefmt="%Y-%m-%d:%H:%M:%S",
)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger("dynaconf")
logger.addHandler(handler)
logger.setLevel(level=getattr(logging, level, "DEBUG"))
return logger
def raw_logger(level=None):
"""Get or create inner logger"""
level = level or os.environ.get("DEBUG_LEVEL_FOR_DYNACONF", "ERROR")
return _logger(level)
RENAMED_VARS = {
# old: new
"DYNACONF_NAMESPACE": "ENV_FOR_DYNACONF",
"NAMESPACE_FOR_DYNACONF": "ENV_FOR_DYNACONF",
"DYNACONF_SETTINGS_MODULE": "SETTINGS_FILE_FOR_DYNACONF",
"DYNACONF_SETTINGS": "SETTINGS_FILE_FOR_DYNACONF",
"SETTINGS_MODULE": "SETTINGS_FILE_FOR_DYNACONF",
"SETTINGS_MODULE_FOR_DYNACONF": "SETTINGS_FILE_FOR_DYNACONF",
"PROJECT_ROOT": "ROOT_PATH_FOR_DYNACONF",
"PROJECT_ROOT_FOR_DYNACONF": "ROOT_PATH_FOR_DYNACONF",
"DYNACONF_SILENT_ERRORS": "SILENT_ERRORS_FOR_DYNACONF",
"DYNACONF_ALWAYS_FRESH_VARS": "FRESH_VARS_FOR_DYNACONF",
"BASE_NAMESPACE_FOR_DYNACONF": "DEFAULT_ENV_FOR_DYNACONF",
"GLOBAL_ENV_FOR_DYNACONF": "ENVVAR_PREFIX_FOR_DYNACONF",
}
def compat_kwargs(kwargs):
"""To keep backwards compat change the kwargs to new names"""
warn_deprecations(kwargs)
for old, new in RENAMED_VARS.items():
if old in kwargs:
kwargs[new] = kwargs[old]
# update cross references
for c_old, c_new in RENAMED_VARS.items():
if c_new == new:
kwargs[c_old] = kwargs[new]
class Missing(object):
"""
Sentinel value object/singleton used to differentiate between ambiguous
situations where `None` is a valid value.
"""
def __bool__(self):
"""Respond to boolean duck-typing."""
return False
def __eq__(self, other):
"""Equality check for a singleton."""
return isinstance(other, self.__class__)
# Ensure compatibility with Python 2.x
__nonzero__ = __bool__
def __repr__(self):
"""
Unambiguously identify this string-based representation of Missing,
used as a singleton.
"""
return "<dynaconf.missing>"
missing = Missing()
def deduplicate(list_object):
"""Rebuild `list_object` removing duplicated and keeping order"""
new = []
for item in list_object:
if item not in new:
new.append(item)
return new
def warn_deprecations(data):
for old, new in RENAMED_VARS.items():
if old in data:
warnings.warn(
"You are using %s which is a deprecated settings "
"replace it with %s" % (old, new),
DeprecationWarning,
)
def trimmed_split(s, seps=(";", ",")):
"""Given a string s, split is by one of one of the seps."""
for sep in seps:
if sep not in s:
continue
data = [item.strip() for item in s.strip().split(sep)]
return data
return [s] # raw un-splitted
def ensure_a_list(data):
"""Ensure data is a list or wrap it in a list"""
if not data:
return []
if isinstance(data, (list, tuple, set)):
return list(data)
if isinstance(data, str):
data = trimmed_split(data) # settings.toml,other.yaml
return data
return [data]
def build_env_list(obj, env):
"""Build env list for loaders to iterate.
Arguments:
obj {LazySettings} -- A Dynaconf settings instance
env {str} -- The current env to be loaded
Returns:
[str] -- A list of string names of the envs to load.
"""
# add the [default] env
env_list = [obj.get("DEFAULT_ENV_FOR_DYNACONF")]
# compatibility with older versions that still uses [dynaconf] as
# [default] env
global_env = obj.get("ENVVAR_PREFIX_FOR_DYNACONF") or "DYNACONF"
if global_env not in env_list:
env_list.append(global_env)
# add the current env
if obj.current_env and obj.current_env not in env_list:
env_list.append(obj.current_env)
# add a manually set env
if env and env not in env_list:
env_list.append(env)
# add the [global] env
env_list.append("GLOBAL")
# loaders are responsible to change to lower/upper cases
return [env.lower() for env in env_list]
def upperfy(key):
"""Receive a string key and returns its upper version.
Example:
input: foo
output: FOO
input: foo_bar
output: FOO_BAR
input: foo__bar__ZAZ
output: FOO__bar__ZAZ
Arguments:
key {str} -- A string key that may contain dunders `__`
Returns:
The key as upper case but keeping the nested elements.
"""
if "__" in key:
parts = key.split("__")
return "__".join([parts[0].upper()] + parts[1:])
return key.upper()
def multi_replace(text, patterns):
"""Replaces multiple pairs in a string
Arguments:
text {str} -- A "string text"
patterns {dict} -- A dict of {"old text": "new text"}
Returns:
text -- str
"""
for old, new in patterns.items():
text = text.replace(old, new)
return text
def extract_json_objects(text, decoder=JSONDecoder()):
"""Find JSON objects in text, and yield the decoded JSON data
Does not attempt to look for JSON arrays, text, or other JSON types outside
of a parent JSON object.
"""
pos = 0
while True:
match = text.find("{", pos)
if match == -1:
break
try:
result, index = decoder.raw_decode(text[match:])
yield result
pos = match + index
except ValueError:
pos = match + 1
| 28.530351 | 79 | 0.586786 | import functools
import os
import warnings
from json import JSONDecoder
BANNER = """
██████╗ ██╗ ██╗███╗ ██╗ █████╗ ██████╗ ██████╗ ███╗ ██╗███████╗
██╔══██╗╚██╗ ██╔╝████╗ ██║██╔══██╗██╔════╝██╔═══██╗████╗ ██║██╔════╝
██║ ██║ ╚████╔╝ ██╔██╗ ██║███████║██║ ██║ ██║██╔██╗ ██║█████╗
██║ ██║ ╚██╔╝ ██║╚██╗██║██╔══██║██║ ██║ ██║██║╚██╗██║██╔══╝
██████╔╝ ██║ ██║ ╚████║██║ ██║╚██████╗╚██████╔╝██║ ╚████║██║
╚═════╝ ╚═╝ ╚═╝ ╚═══╝╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═══╝╚═╝
"""
if os.name == "nt":
BANNER = "DYNACONF"
def object_merge(old, new, unique=False, tail=None):
if old == new or old is None or new is None:
# Nothing to merge
return
if isinstance(old, list) and isinstance(new, list):
for item in old[::-1]:
if unique and item in new:
continue
new.insert(0, item)
if isinstance(old, dict) and isinstance(new, dict):
for key, value in old.items():
if key == tail:
continue
if key not in new:
new[key] = value
else:
object_merge(value, new[key], tail=tail)
handle_metavalues(old, new)
def handle_metavalues(old, new):
for key in list(new.keys()):
if getattr(new[key], "_dynaconf_reset", False): # pragma: no cover
# a Reset on new triggers reasign of existing data
# @reset is deprecated on v3.0.0
new[key] = new[key].unwrap()
if getattr(new[key], "_dynaconf_merge", False):
# a Merge on new triggers merge with existing data
unique = new[key].unique
new[key] = new[key].unwrap()
object_merge(old.get(key), new[key], unique=unique)
if getattr(new[key], "_dynaconf_del", False):
# a Del on new triggers deletion of existing data
new.pop(key, None)
old.pop(key, None)
class DynaconfDict(dict):
def __init__(self, *args, **kwargs):
self._loaded_files = []
super(DynaconfDict, self).__init__(*args, **kwargs)
@property
def logger(self):
return raw_logger()
def set(self, key, value, *args, **kwargs):
self[key] = value
@staticmethod
def get_environ(key, default=None): # pragma: no cover
return os.environ.get(key, default)
def exists(self, key, **kwargs):
return self.get(key, missing) is not missing
@functools.lru_cache()
def _logger(level):
import logging
formatter = logging.Formatter(
fmt=(
"%(asctime)s,%(msecs)d %(levelname)-8s "
"[%(filename)s:%(lineno)d - %(funcName)s] %(message)s"
),
datefmt="%Y-%m-%d:%H:%M:%S",
)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger("dynaconf")
logger.addHandler(handler)
logger.setLevel(level=getattr(logging, level, "DEBUG"))
return logger
def raw_logger(level=None):
level = level or os.environ.get("DEBUG_LEVEL_FOR_DYNACONF", "ERROR")
return _logger(level)
RENAMED_VARS = {
# old: new
"DYNACONF_NAMESPACE": "ENV_FOR_DYNACONF",
"NAMESPACE_FOR_DYNACONF": "ENV_FOR_DYNACONF",
"DYNACONF_SETTINGS_MODULE": "SETTINGS_FILE_FOR_DYNACONF",
"DYNACONF_SETTINGS": "SETTINGS_FILE_FOR_DYNACONF",
"SETTINGS_MODULE": "SETTINGS_FILE_FOR_DYNACONF",
"SETTINGS_MODULE_FOR_DYNACONF": "SETTINGS_FILE_FOR_DYNACONF",
"PROJECT_ROOT": "ROOT_PATH_FOR_DYNACONF",
"PROJECT_ROOT_FOR_DYNACONF": "ROOT_PATH_FOR_DYNACONF",
"DYNACONF_SILENT_ERRORS": "SILENT_ERRORS_FOR_DYNACONF",
"DYNACONF_ALWAYS_FRESH_VARS": "FRESH_VARS_FOR_DYNACONF",
"BASE_NAMESPACE_FOR_DYNACONF": "DEFAULT_ENV_FOR_DYNACONF",
"GLOBAL_ENV_FOR_DYNACONF": "ENVVAR_PREFIX_FOR_DYNACONF",
}
def compat_kwargs(kwargs):
warn_deprecations(kwargs)
for old, new in RENAMED_VARS.items():
if old in kwargs:
kwargs[new] = kwargs[old]
# update cross references
for c_old, c_new in RENAMED_VARS.items():
if c_new == new:
kwargs[c_old] = kwargs[new]
class Missing(object):
def __bool__(self):
return False
def __eq__(self, other):
return isinstance(other, self.__class__)
# Ensure compatibility with Python 2.x
__nonzero__ = __bool__
def __repr__(self):
return "<dynaconf.missing>"
missing = Missing()
def deduplicate(list_object):
new = []
for item in list_object:
if item not in new:
new.append(item)
return new
def warn_deprecations(data):
for old, new in RENAMED_VARS.items():
if old in data:
warnings.warn(
"You are using %s which is a deprecated settings "
"replace it with %s" % (old, new),
DeprecationWarning,
)
def trimmed_split(s, seps=(";", ",")):
for sep in seps:
if sep not in s:
continue
data = [item.strip() for item in s.strip().split(sep)]
return data
return [s] # raw un-splitted
def ensure_a_list(data):
if not data:
return []
if isinstance(data, (list, tuple, set)):
return list(data)
if isinstance(data, str):
data = trimmed_split(data) # settings.toml,other.yaml
return data
return [data]
def build_env_list(obj, env):
# add the [default] env
env_list = [obj.get("DEFAULT_ENV_FOR_DYNACONF")]
# compatibility with older versions that still uses [dynaconf] as
# [default] env
global_env = obj.get("ENVVAR_PREFIX_FOR_DYNACONF") or "DYNACONF"
if global_env not in env_list:
env_list.append(global_env)
# add the current env
if obj.current_env and obj.current_env not in env_list:
env_list.append(obj.current_env)
# add a manually set env
if env and env not in env_list:
env_list.append(env)
# add the [global] env
env_list.append("GLOBAL")
# loaders are responsible to change to lower/upper cases
return [env.lower() for env in env_list]
def upperfy(key):
if "__" in key:
parts = key.split("__")
return "__".join([parts[0].upper()] + parts[1:])
return key.upper()
def multi_replace(text, patterns):
for old, new in patterns.items():
text = text.replace(old, new)
return text
def extract_json_objects(text, decoder=JSONDecoder()):
pos = 0
while True:
match = text.find("{", pos)
if match == -1:
break
try:
result, index = decoder.raw_decode(text[match:])
yield result
pos = match + index
except ValueError:
pos = match + 1
| true | true |
1c2b7048d9f5521d56350cccb5d7d760eff311ae | 6,678 | py | Python | nsff_scripts/flow_utils.py | k-washi/Neural-Scene-Flow-Fields | 7a954cf817cd8272e91f3438bed8114bcef7cc0a | [
"MIT"
] | null | null | null | nsff_scripts/flow_utils.py | k-washi/Neural-Scene-Flow-Fields | 7a954cf817cd8272e91f3438bed8114bcef7cc0a | [
"MIT"
] | null | null | null | nsff_scripts/flow_utils.py | k-washi/Neural-Scene-Flow-Fields | 7a954cf817cd8272e91f3438bed8114bcef7cc0a | [
"MIT"
] | null | null | null | import numpy as np
import os
import sys
import glob
import cv2
import scipy.io
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def read_img(img_dir, img1_name, img2_name):
# print(os.path.join(img_dir, img1_name + '.png'))
return cv2.imread(os.path.join(img_dir, img1_name + '.png')), cv2.imread(os.path.join(img_dir, img2_name + '.png'))
def refinement_flow(fwd_flow, img1, img2):
flow_refine = cv2.VariationalRefinement.create()
refine_flow = flow_refine.calc(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY),
cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY),
fwd_flow)
return refine_flow
def make_color_wheel():
"""
Generate color wheel according Middlebury color code
:return: Color wheel
"""
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros([ncols, 3])
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.transpose(np.floor(255*np.arange(0, RY) / RY))
col += RY
# YG
colorwheel[col:col+YG, 0] = 255 - np.transpose(np.floor(255*np.arange(0, YG) / YG))
colorwheel[col:col+YG, 1] = 255
col += YG
# GC
colorwheel[col:col+GC, 1] = 255
colorwheel[col:col+GC, 2] = np.transpose(np.floor(255*np.arange(0, GC) / GC))
col += GC
# CB
colorwheel[col:col+CB, 1] = 255 - np.transpose(np.floor(255*np.arange(0, CB) / CB))
colorwheel[col:col+CB, 2] = 255
col += CB
# BM
colorwheel[col:col+BM, 2] = 255
colorwheel[col:col+BM, 0] = np.transpose(np.floor(255*np.arange(0, BM) / BM))
col += + BM
# MR
colorwheel[col:col+MR, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, MR) / MR))
colorwheel[col:col+MR, 0] = 255
return colorwheel
def compute_color(u, v):
"""
compute optical flow color map
:param u: optical flow horizontal map
:param v: optical flow vertical map
:return: optical flow in color code
"""
[h, w] = u.shape
img = np.zeros([h, w, 3])
nanIdx = np.isnan(u) | np.isnan(v)
u[nanIdx] = 0
v[nanIdx] = 0
colorwheel = make_color_wheel()
ncols = np.size(colorwheel, 0)
rad = np.sqrt(u**2+v**2)
a = np.arctan2(-v, -u) / np.pi
fk = (a+1) / 2 * (ncols - 1) + 1
k0 = np.floor(fk).astype(int)
k1 = k0 + 1
k1[k1 == ncols+1] = 1
f = fk - k0
for i in range(0, np.size(colorwheel,1)):
tmp = colorwheel[:, i]
col0 = tmp[k0-1] / 255
col1 = tmp[k1-1] / 255
col = (1-f) * col0 + f * col1
idx = rad <= 1
col[idx] = 1-rad[idx]*(1-col[idx])
notidx = np.logical_not(idx)
col[notidx] *= 0.75
img[:, :, i] = np.uint8(np.floor(255 * col*(1-nanIdx)))
return img
def flow_to_image(flow, display=False):
"""
Convert flow into middlebury color code image
:param flow: optical flow map
:return: optical flow image in middlebury color
"""
UNKNOWN_FLOW_THRESH = 100
u = flow[:, :, 0]
v = flow[:, :, 1]
maxu = -999.
maxv = -999.
minu = 999.
minv = 999.
idxUnknow = (abs(u) > UNKNOWN_FLOW_THRESH) | (abs(v) > UNKNOWN_FLOW_THRESH)
u[idxUnknow] = 0
v[idxUnknow] = 0
maxu = max(maxu, np.max(u))
minu = min(minu, np.min(u))
maxv = max(maxv, np.max(v))
minv = min(minv, np.min(v))
# sqrt_rad = u**2 + v**2
rad = np.sqrt(u**2 + v**2)
maxrad = max(-1, np.max(rad))
if display:
print("max flow: %.4f\nflow range:\nu = %.3f .. %.3f\nv = %.3f .. %.3f" % (maxrad, minu,maxu, minv, maxv))
u = u/(maxrad + np.finfo(float).eps)
v = v/(maxrad + np.finfo(float).eps)
img = compute_color(u, v)
idx = np.repeat(idxUnknow[:, :, np.newaxis], 3, axis=2)
img[idx] = 0
return np.uint8(img)
def warp_flow(img, flow):
h, w = flow.shape[:2]
flow_new = flow.copy()
flow_new[:,:,0] += np.arange(w)
flow_new[:,:,1] += np.arange(h)[:,np.newaxis]
res = cv2.remap(img, flow_new, None, cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT)
return res
def resize_flow(flow, img_h, img_w):
# flow = np.load(flow_path)
# flow_h, flow_w = flow.shape[0], flow.shape[1]
flow[:, :, 0] *= float(img_w)/float(flow_w)
flow[:, :, 1] *= float(img_h)/float(flow_h)
flow = cv2.resize(flow, (img_w, img_h), cv2.INTER_LINEAR)
return flow
def extract_poses(im):
R = im.qvec2rotmat()
t = im.tvec.reshape([3,1])
bottom = np.array([0,0,0,1.]).reshape([1,4])
m = np.concatenate([np.concatenate([R, t], 1), bottom], 0)
return m
def load_colmap_data(realdir):
import colmap_read_model as read_model
camerasfile = os.path.join(realdir, 'sparse/cameras.bin')
camdata = read_model.read_cameras_binary(camerasfile)
list_of_keys = list(camdata.keys())
cam = camdata[list_of_keys[0]]
print( 'Cameras', len(cam))
h, w, f = cam.height, cam.width, cam.params[0]
# w, h, f = factor * w, factor * h, factor * f
hwf = np.array([h,w,f]).reshape([3,1])
imagesfile = os.path.join(realdir, 'sparse/images.bin')
imdata = read_model.read_images_binary(imagesfile)
w2c_mats = []
# bottom = np.array([0,0,0,1.]).reshape([1,4])
names = [imdata[k].name for k in imdata]
img_keys = [k for k in imdata]
print( 'Images #', len(names))
perm = np.argsort(names)
return imdata, perm, img_keys, hwf
def skew(x):
return np.array([[0, -x[2], x[1]],
[x[2], 0, -x[0]],
[-x[1], x[0], 0]])
def compute_epipolar_distance(T_21, K, p_1, p_2):
R_21 = T_21[:3, :3]
t_21 = T_21[:3, 3]
E_mat = np.dot(skew(t_21), R_21)
# compute bearing vector
inv_K = np.linalg.inv(K)
F_mat = np.dot(np.dot(inv_K.T, E_mat), inv_K)
l_2 = np.dot(F_mat, p_1)
algebric_e_distance = np.sum(p_2 * l_2, axis=0)
n_term = np.sqrt(l_2[0, :]**2 + l_2[1, :]**2) + 1e-8
geometric_e_distance = algebric_e_distance/n_term
geometric_e_distance = np.abs(geometric_e_distance)
return geometric_e_distance
def read_optical_flow(basedir, img_i_name, read_fwd):
flow_dir = os.path.join(basedir, 'flow_i1')
fwd_flow_path = os.path.join(flow_dir, '%s_fwd.npz'%img_i_name[:-4])
bwd_flow_path = os.path.join(flow_dir, '%s_bwd.npz'%img_i_name[:-4])
if read_fwd:
fwd_data = np.load(fwd_flow_path)#, (w, h))
fwd_flow, fwd_mask = fwd_data['flow'], fwd_data['mask']
# fwd_mask = np.float32(fwd_mask)
# bwd_flow = np.zeros_like(fwd_flow)
return fwd_flow
else:
bwd_data = np.load(bwd_flow_path)#, (w, h))
bwd_flow, bwd_mask = bwd_data['flow'], bwd_data['mask']
# bwd_mask = np.float32(bwd_mask)
# fwd_flow = np.zeros_like(bwd_flow)
return bwd_flow
# return fwd_flow, bwd_flow#, fwd_mask, bwd_mask
| 25.48855 | 117 | 0.608715 | import numpy as np
import os
import sys
import glob
import cv2
import scipy.io
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def read_img(img_dir, img1_name, img2_name):
return cv2.imread(os.path.join(img_dir, img1_name + '.png')), cv2.imread(os.path.join(img_dir, img2_name + '.png'))
def refinement_flow(fwd_flow, img1, img2):
flow_refine = cv2.VariationalRefinement.create()
refine_flow = flow_refine.calc(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY),
cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY),
fwd_flow)
return refine_flow
def make_color_wheel():
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros([ncols, 3])
col = 0
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.transpose(np.floor(255*np.arange(0, RY) / RY))
col += RY
colorwheel[col:col+YG, 0] = 255 - np.transpose(np.floor(255*np.arange(0, YG) / YG))
colorwheel[col:col+YG, 1] = 255
col += YG
colorwheel[col:col+GC, 1] = 255
colorwheel[col:col+GC, 2] = np.transpose(np.floor(255*np.arange(0, GC) / GC))
col += GC
colorwheel[col:col+CB, 1] = 255 - np.transpose(np.floor(255*np.arange(0, CB) / CB))
colorwheel[col:col+CB, 2] = 255
col += CB
colorwheel[col:col+BM, 2] = 255
colorwheel[col:col+BM, 0] = np.transpose(np.floor(255*np.arange(0, BM) / BM))
col += + BM
colorwheel[col:col+MR, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, MR) / MR))
colorwheel[col:col+MR, 0] = 255
return colorwheel
def compute_color(u, v):
[h, w] = u.shape
img = np.zeros([h, w, 3])
nanIdx = np.isnan(u) | np.isnan(v)
u[nanIdx] = 0
v[nanIdx] = 0
colorwheel = make_color_wheel()
ncols = np.size(colorwheel, 0)
rad = np.sqrt(u**2+v**2)
a = np.arctan2(-v, -u) / np.pi
fk = (a+1) / 2 * (ncols - 1) + 1
k0 = np.floor(fk).astype(int)
k1 = k0 + 1
k1[k1 == ncols+1] = 1
f = fk - k0
for i in range(0, np.size(colorwheel,1)):
tmp = colorwheel[:, i]
col0 = tmp[k0-1] / 255
col1 = tmp[k1-1] / 255
col = (1-f) * col0 + f * col1
idx = rad <= 1
col[idx] = 1-rad[idx]*(1-col[idx])
notidx = np.logical_not(idx)
col[notidx] *= 0.75
img[:, :, i] = np.uint8(np.floor(255 * col*(1-nanIdx)))
return img
def flow_to_image(flow, display=False):
UNKNOWN_FLOW_THRESH = 100
u = flow[:, :, 0]
v = flow[:, :, 1]
maxu = -999.
maxv = -999.
minu = 999.
minv = 999.
idxUnknow = (abs(u) > UNKNOWN_FLOW_THRESH) | (abs(v) > UNKNOWN_FLOW_THRESH)
u[idxUnknow] = 0
v[idxUnknow] = 0
maxu = max(maxu, np.max(u))
minu = min(minu, np.min(u))
maxv = max(maxv, np.max(v))
minv = min(minv, np.min(v))
rad = np.sqrt(u**2 + v**2)
maxrad = max(-1, np.max(rad))
if display:
print("max flow: %.4f\nflow range:\nu = %.3f .. %.3f\nv = %.3f .. %.3f" % (maxrad, minu,maxu, minv, maxv))
u = u/(maxrad + np.finfo(float).eps)
v = v/(maxrad + np.finfo(float).eps)
img = compute_color(u, v)
idx = np.repeat(idxUnknow[:, :, np.newaxis], 3, axis=2)
img[idx] = 0
return np.uint8(img)
def warp_flow(img, flow):
h, w = flow.shape[:2]
flow_new = flow.copy()
flow_new[:,:,0] += np.arange(w)
flow_new[:,:,1] += np.arange(h)[:,np.newaxis]
res = cv2.remap(img, flow_new, None, cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT)
return res
def resize_flow(flow, img_h, img_w):
flow[:, :, 0] *= float(img_w)/float(flow_w)
flow[:, :, 1] *= float(img_h)/float(flow_h)
flow = cv2.resize(flow, (img_w, img_h), cv2.INTER_LINEAR)
return flow
def extract_poses(im):
R = im.qvec2rotmat()
t = im.tvec.reshape([3,1])
bottom = np.array([0,0,0,1.]).reshape([1,4])
m = np.concatenate([np.concatenate([R, t], 1), bottom], 0)
return m
def load_colmap_data(realdir):
import colmap_read_model as read_model
camerasfile = os.path.join(realdir, 'sparse/cameras.bin')
camdata = read_model.read_cameras_binary(camerasfile)
list_of_keys = list(camdata.keys())
cam = camdata[list_of_keys[0]]
print( 'Cameras', len(cam))
h, w, f = cam.height, cam.width, cam.params[0]
hwf = np.array([h,w,f]).reshape([3,1])
imagesfile = os.path.join(realdir, 'sparse/images.bin')
imdata = read_model.read_images_binary(imagesfile)
w2c_mats = []
names = [imdata[k].name for k in imdata]
img_keys = [k for k in imdata]
print( 'Images #', len(names))
perm = np.argsort(names)
return imdata, perm, img_keys, hwf
def skew(x):
return np.array([[0, -x[2], x[1]],
[x[2], 0, -x[0]],
[-x[1], x[0], 0]])
def compute_epipolar_distance(T_21, K, p_1, p_2):
R_21 = T_21[:3, :3]
t_21 = T_21[:3, 3]
E_mat = np.dot(skew(t_21), R_21)
inv_K = np.linalg.inv(K)
F_mat = np.dot(np.dot(inv_K.T, E_mat), inv_K)
l_2 = np.dot(F_mat, p_1)
algebric_e_distance = np.sum(p_2 * l_2, axis=0)
n_term = np.sqrt(l_2[0, :]**2 + l_2[1, :]**2) + 1e-8
geometric_e_distance = algebric_e_distance/n_term
geometric_e_distance = np.abs(geometric_e_distance)
return geometric_e_distance
def read_optical_flow(basedir, img_i_name, read_fwd):
flow_dir = os.path.join(basedir, 'flow_i1')
fwd_flow_path = os.path.join(flow_dir, '%s_fwd.npz'%img_i_name[:-4])
bwd_flow_path = os.path.join(flow_dir, '%s_bwd.npz'%img_i_name[:-4])
if read_fwd:
fwd_data = np.load(fwd_flow_path)
fwd_flow, fwd_mask = fwd_data['flow'], fwd_data['mask']
return fwd_flow
else:
bwd_data = np.load(bwd_flow_path)
bwd_flow, bwd_mask = bwd_data['flow'], bwd_data['mask']
return bwd_flow
| true | true |
1c2b7048e14c09b0f05388ac43fd85007e596a93 | 7,140 | py | Python | extra/gen_stubs.py | henry4k/dummy | 25710b38774e04a1ef9570baabd9f02b7a14e17e | [
"Unlicense"
] | null | null | null | extra/gen_stubs.py | henry4k/dummy | 25710b38774e04a1ef9570baabd9f02b7a14e17e | [
"Unlicense"
] | null | null | null | extra/gen_stubs.py | henry4k/dummy | 25710b38774e04a1ef9570baabd9f02b7a14e17e | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3.3
import re
import sys
import os.path
import argparse
class CParameter():
def __init__(self, name, type):
self.name = name
self.type = type
def __str__(self):
return '{} {}'.format(
self.type,
self.name)
def __repr__(self):
return "CParameter(name='{}', type='{}')".format(
self.name,
self.type)
class CFunction():
def __init__(self, name, return_type, parameters):
self.name = name
self.return_type = return_type
self.parameters = parameters
def __str__(self):
return '{} {}({})'.format(
self.return_type,
self.name,
', '.join(str(p) for p in self.parameters))
def __repr__(self):
return "CFunction(name='{}', return_type='{}', parameters=[{}])".format(
self.name,
self.return_type,
', '.join(repr(p) for p in self.parameters))
parameter_pattern = re.compile(r'''
(?P<type> [a-zA-Z_:][a-zA-Z_:*0-9 ]+)
\s+
(?P<name> [a-zA-Z_:]+)
''', re.VERBOSE)
function_pattern = re.compile(r'''
^\s*
(?P<return_type> [a-zA-Z_:][a-zA-Z_:*0-9 ]+)
\s+
(?P<name> [a-zA-Z_:0-9]+)
\s*
\(
(?P<parameters> [^()]*)
\)
\s*
;\s*$
''', re.VERBOSE)
#typedef void (*LogHandler)( LogLevel level, const char* line );
class_pattern = re.compile(r'^\s*class\s*')
log_filter = {}
def log(type, format, *args):
if type in log_filter:
return
else:
message = str.format(format, *args)
output = sys.stdout
if type != 'INFO':
output = sys.stderr
print(str.format('{}: {}', type, message), file=output)
def parse_cparameters(parameters):
for parameter_string in parameters.split(','):
parameter_string = parameter_string.strip()
parameter_match = parameter_pattern.search(parameter_string)
if parameter_match:
name = parameter_match.group('name')
type = parameter_match.group('type')
yield CParameter(name=name, type=type)
elif parameter_string == '':
continue
elif parameter_string == '...':
raise RuntimeError('Functions with variadic arguments can\'t be stubbed.')
else:
raise RuntimeError('Can\'t parse parameter: "'+parameter_string+'" (This is probably a bug.)')
def try_parse_cfunction(function_string):
function_match = function_pattern.search(function_string)
if function_match:
name = function_match.group('name')
return_type = function_match.group('return_type')
parameters = list(parse_cparameters(function_match.group('parameters')))
return CFunction(name=name,
return_type=return_type,
parameters=parameters)
else:
if class_pattern.match(function_string):
raise RuntimeError('Found a class definition. Methods can\'t be stubbed yet.')
return None
def get_cfunction_stub_pointer_name(function):
return function.name+'_stub'
def write_cfunction_stub_pointer(function, file, extern):
stub_pointer_template = None
if extern:
stub_pointer_template = \
'extern {return_type} (*{pointer_name})({parameters});\n'
else:
stub_pointer_template = \
'{return_type} (*{pointer_name})({parameters}) = NULL;\n'
pointer_name = get_cfunction_stub_pointer_name(function)
parameters = ', '.join(p.type for p in function.parameters)
file.write(stub_pointer_template.format(
return_type=function.return_type,
pointer_name=pointer_name,
parameters=parameters))
def write_cfunction_stub_implementation(function, file):
implementation_template = \
'''{return_type} {name}({parameter_declarations})
{{
if(!{pointer_name})
dummyAbortTest(DUMMY_FAIL_TEST, "Called {name} without stub callback.");
return {pointer_name}({parameter_names});
}}
'''
pointer_name = get_cfunction_stub_pointer_name(function)
parameter_declarations = ', '.join(str(p) for p in function.parameters)
parameter_names = ', '.join(p.name for p in function.parameters)
file.write(implementation_template.format(
return_type=function.return_type,
name=function.name,
parameter_declarations=parameter_declarations,
parameter_names=parameter_names,
pointer_name=pointer_name))
def get_stub_header_name(language, name):
return name+'_stub.h'
def get_stub_implementation_name(language, name):
return name+'_stub.'+language
def write_stub_header(language, name, header_name, functions):
file_name = get_stub_header_name(language, name)
with open(file_name, 'w', encoding='UTF-8') as file:
file.write('#include "{}"\n'.format(header_name))
file.write('\n')
for function in functions:
write_cfunction_stub_pointer(function, file, extern=True)
def write_stub_implementation(language, name, functions):
file_name = get_stub_implementation_name(language, name)
with open(file_name, 'w', encoding='UTF-8') as file:
file.write('#include <stddef.h> // NULL\n')
file.write('#include <dummy/core.h> // dummyAbortTest\n')
file.write('#include "{}"\n'.format(get_stub_header_name(language, name)))
file.write('\n')
for function in functions:
write_cfunction_stub_pointer(function, file, extern=False)
write_cfunction_stub_implementation(function, file)
file.write('\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate stubs for C functions.')
parser.add_argument('-q', '--quiet',
action='store_true')
parser.add_argument('--lang',
default='c',
choices=['c','cpp'])
parser.add_argument('headers',
metavar='header',
nargs='+')
args = parser.parse_args()
if args.quiet:
log_filter['INFO'] = True
lang = args.lang
headers = args.headers
for header in headers:
module_name = os.path.splitext(os.path.basename(header))[0]
with open(header, 'r', encoding='UTF-8') as file:
functions = []
for line_number, line in enumerate(file):
location = str.format('{}:{}', header, line_number+1)
try:
function = try_parse_cfunction(line)
if function:
functions.append(function)
log('INFO', '{} Found {}', location, function)
except RuntimeError as error:
log('WARN', '{} {}', location, error)
write_stub_header(language=lang,
name=module_name,
header_name=header,
functions=functions)
write_stub_implementation(language=lang,
name=module_name,
functions=functions) | 34.326923 | 106 | 0.602381 |
import re
import sys
import os.path
import argparse
class CParameter():
def __init__(self, name, type):
self.name = name
self.type = type
def __str__(self):
return '{} {}'.format(
self.type,
self.name)
def __repr__(self):
return "CParameter(name='{}', type='{}')".format(
self.name,
self.type)
class CFunction():
def __init__(self, name, return_type, parameters):
self.name = name
self.return_type = return_type
self.parameters = parameters
def __str__(self):
return '{} {}({})'.format(
self.return_type,
self.name,
', '.join(str(p) for p in self.parameters))
def __repr__(self):
return "CFunction(name='{}', return_type='{}', parameters=[{}])".format(
self.name,
self.return_type,
', '.join(repr(p) for p in self.parameters))
parameter_pattern = re.compile(r'''
(?P<type> [a-zA-Z_:][a-zA-Z_:*0-9 ]+)
\s+
(?P<name> [a-zA-Z_:]+)
''', re.VERBOSE)
function_pattern = re.compile(r'''
^\s*
(?P<return_type> [a-zA-Z_:][a-zA-Z_:*0-9 ]+)
\s+
(?P<name> [a-zA-Z_:0-9]+)
\s*
\(
(?P<parameters> [^()]*)
\)
\s*
;\s*$
''', re.VERBOSE)
class_pattern = re.compile(r'^\s*class\s*')
log_filter = {}
def log(type, format, *args):
if type in log_filter:
return
else:
message = str.format(format, *args)
output = sys.stdout
if type != 'INFO':
output = sys.stderr
print(str.format('{}: {}', type, message), file=output)
def parse_cparameters(parameters):
for parameter_string in parameters.split(','):
parameter_string = parameter_string.strip()
parameter_match = parameter_pattern.search(parameter_string)
if parameter_match:
name = parameter_match.group('name')
type = parameter_match.group('type')
yield CParameter(name=name, type=type)
elif parameter_string == '':
continue
elif parameter_string == '...':
raise RuntimeError('Functions with variadic arguments can\'t be stubbed.')
else:
raise RuntimeError('Can\'t parse parameter: "'+parameter_string+'" (This is probably a bug.)')
def try_parse_cfunction(function_string):
function_match = function_pattern.search(function_string)
if function_match:
name = function_match.group('name')
return_type = function_match.group('return_type')
parameters = list(parse_cparameters(function_match.group('parameters')))
return CFunction(name=name,
return_type=return_type,
parameters=parameters)
else:
if class_pattern.match(function_string):
raise RuntimeError('Found a class definition. Methods can\'t be stubbed yet.')
return None
def get_cfunction_stub_pointer_name(function):
return function.name+'_stub'
def write_cfunction_stub_pointer(function, file, extern):
stub_pointer_template = None
if extern:
stub_pointer_template = \
'extern {return_type} (*{pointer_name})({parameters});\n'
else:
stub_pointer_template = \
'{return_type} (*{pointer_name})({parameters}) = NULL;\n'
pointer_name = get_cfunction_stub_pointer_name(function)
parameters = ', '.join(p.type for p in function.parameters)
file.write(stub_pointer_template.format(
return_type=function.return_type,
pointer_name=pointer_name,
parameters=parameters))
def write_cfunction_stub_implementation(function, file):
implementation_template = \
'''{return_type} {name}({parameter_declarations})
{{
if(!{pointer_name})
dummyAbortTest(DUMMY_FAIL_TEST, "Called {name} without stub callback.");
return {pointer_name}({parameter_names});
}}
'''
pointer_name = get_cfunction_stub_pointer_name(function)
parameter_declarations = ', '.join(str(p) for p in function.parameters)
parameter_names = ', '.join(p.name for p in function.parameters)
file.write(implementation_template.format(
return_type=function.return_type,
name=function.name,
parameter_declarations=parameter_declarations,
parameter_names=parameter_names,
pointer_name=pointer_name))
def get_stub_header_name(language, name):
return name+'_stub.h'
def get_stub_implementation_name(language, name):
return name+'_stub.'+language
def write_stub_header(language, name, header_name, functions):
file_name = get_stub_header_name(language, name)
with open(file_name, 'w', encoding='UTF-8') as file:
file.write('
file.write('\n')
for function in functions:
write_cfunction_stub_pointer(function, file, extern=True)
def write_stub_implementation(language, name, functions):
file_name = get_stub_implementation_name(language, name)
with open(file_name, 'w', encoding='UTF-8') as file:
file.write('
file.write('
file.write('
file.write('\n')
for function in functions:
write_cfunction_stub_pointer(function, file, extern=False)
write_cfunction_stub_implementation(function, file)
file.write('\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate stubs for C functions.')
parser.add_argument('-q', '--quiet',
action='store_true')
parser.add_argument('--lang',
default='c',
choices=['c','cpp'])
parser.add_argument('headers',
metavar='header',
nargs='+')
args = parser.parse_args()
if args.quiet:
log_filter['INFO'] = True
lang = args.lang
headers = args.headers
for header in headers:
module_name = os.path.splitext(os.path.basename(header))[0]
with open(header, 'r', encoding='UTF-8') as file:
functions = []
for line_number, line in enumerate(file):
location = str.format('{}:{}', header, line_number+1)
try:
function = try_parse_cfunction(line)
if function:
functions.append(function)
log('INFO', '{} Found {}', location, function)
except RuntimeError as error:
log('WARN', '{} {}', location, error)
write_stub_header(language=lang,
name=module_name,
header_name=header,
functions=functions)
write_stub_implementation(language=lang,
name=module_name,
functions=functions) | true | true |
1c2b71b4e3dc14512b5483ec779dc897e2647111 | 4,854 | py | Python | LIP_model.py | julijanjug/lip2dense_v2 | 8a1147f7da1949908b703ba13cbb4dc454d22161 | [
"MIT"
] | null | null | null | LIP_model.py | julijanjug/lip2dense_v2 | 8a1147f7da1949908b703ba13cbb4dc454d22161 | [
"MIT"
] | null | null | null | LIP_model.py | julijanjug/lip2dense_v2 | 8a1147f7da1949908b703ba13cbb4dc454d22161 | [
"MIT"
] | null | null | null | import tensorflow as tf
from utils.ops import *
#------------------------network setting---------------------
#################################################
## refine net version 4. 07.17
def pose_net(image, name):
with tf.variable_scope(name) as scope:
is_BN = False
pose_conv1 = conv2d(image, 512, 3, 1, relu=True, bn=is_BN, name='pose_conv1')
pose_conv2 = conv2d(pose_conv1, 512, 3, 1, relu=True, bn=is_BN, name='pose_conv2')
pose_conv3 = conv2d(pose_conv2, 256, 3, 1, relu=True, bn=is_BN, name='pose_conv3')
pose_conv4 = conv2d(pose_conv3, 256, 3, 1, relu=True, bn=is_BN, name='pose_conv4')
pose_conv5 = conv2d(pose_conv4, 256, 3, 1, relu=True, bn=is_BN, name='pose_conv5')
pose_conv6 = conv2d(pose_conv5, 256, 3, 1, relu=True, bn=is_BN, name='pose_conv6')
pose_conv7 = conv2d(pose_conv6, 512, 1, 1, relu=True, bn=is_BN, name='pose_conv7')
pose_conv8 = conv2d(pose_conv7, 16, 1, 1, relu=False, bn=is_BN, name='pose_conv8')
return pose_conv8, pose_conv6
def pose_refine(pose, parsing, pose_fea, name):
with tf.variable_scope(name) as scope:
is_BN = False
# 1*1 convolution remaps the heatmaps to match the number of channels of the intermediate features.
pose = conv2d(pose, 128, 1, 1, relu=True, bn=is_BN, name='pose_remap')
parsing = conv2d(parsing, 128, 1, 1, relu=True, bn=is_BN, name='parsing_remap')
# concat
pos_par = tf.concat([pose, parsing, pose_fea], 3)
conv1 = conv2d(pos_par, 512, 3, 1, relu=True, bn=is_BN, name='conv1')
conv2 = conv2d(conv1, 256, 5, 1, relu=True, bn=is_BN, name='conv2')
conv3 = conv2d(conv2, 256, 7, 1, relu=True, bn=is_BN, name='conv3')
conv4 = conv2d(conv3, 256, 9, 1, relu=True, bn=is_BN, name='conv4')
conv5 = conv2d(conv4, 256, 1, 1, relu=True, bn=is_BN, name='conv5')
conv6 = conv2d(conv5, 16, 1, 1, relu=False, bn=is_BN, name='conv6')
return conv6, conv4
def parsing_refine(parsing, pose, parsing_fea, name):
with tf.variable_scope(name) as scope:
is_BN = False
pose = conv2d(pose, 128, 1, 1, relu=True, bn=is_BN, name='pose_remap')
parsing = conv2d(parsing, 128, 1, 1, relu=True, bn=is_BN, name='parsing_remap')
par_pos = tf.concat([parsing, pose, parsing_fea], 3)
parsing_conv1 = conv2d(par_pos, 512, 3, 1, relu=True, bn=is_BN, name='parsing_conv1')
parsing_conv2 = conv2d(parsing_conv1, 256, 5, 1, relu=True, bn=is_BN, name='parsing_conv2')
parsing_conv3 = conv2d(parsing_conv2, 256, 7, 1, relu=True, bn=is_BN, name='parsing_conv3')
parsing_conv4 = conv2d(parsing_conv3, 256, 9, 1, relu=True, bn=is_BN, name='parsing_conv4')
parsing_conv5 = conv2d(parsing_conv4, 256, 1, 1, relu=True, bn=is_BN, name='parsing_conv5')
parsing_human1 = atrous_conv2d(parsing_conv5, 20, 3, rate=6, relu=False, name='parsing_human1')
parsing_human2 = atrous_conv2d(parsing_conv5, 20, 3, rate=12, relu=False, name='parsing_human2')
parsing_human3 = atrous_conv2d(parsing_conv5, 20, 3, rate=18, relu=False, name='parsing_human3')
parsing_human4 = atrous_conv2d(parsing_conv5, 20, 3, rate=24, relu=False, name='parsing_human4')
parsing_human = tf.add_n([parsing_human1, parsing_human2, parsing_human3, parsing_human4], name='parsing_human')
return parsing_human, parsing_conv4
#################################################
# My code for custom models
def parsing_refine_no_pose(parsing, parsing_fea, name):
with tf.variable_scope(name) as scope:
is_BN = False
# pose = conv2d(pose, 128, 1, 1, relu=True, bn=is_BN, name='pose_remap')
parsing = conv2d(parsing, 128, 1, 1, relu=True, bn=is_BN, name='parsing_remap')
par_pos = tf.concat([parsing, parsing_fea], 3)
parsing_conv1 = conv2d(par_pos, 512, 3, 1, relu=True, bn=is_BN, name='parsing_conv1')
parsing_conv2 = conv2d(parsing_conv1, 256, 5, 1, relu=True, bn=is_BN, name='parsing_conv2')
parsing_conv3 = conv2d(parsing_conv2, 256, 7, 1, relu=True, bn=is_BN, name='parsing_conv3')
parsing_conv4 = conv2d(parsing_conv3, 256, 9, 1, relu=True, bn=is_BN, name='parsing_conv4')
parsing_conv5 = conv2d(parsing_conv4, 256, 1, 1, relu=True, bn=is_BN, name='parsing_conv5')
parsing_human1 = atrous_conv2d(parsing_conv5, 20, 3, rate=6, relu=False, name='parsing_human1')
parsing_human2 = atrous_conv2d(parsing_conv5, 20, 3, rate=12, relu=False, name='parsing_human2')
parsing_human3 = atrous_conv2d(parsing_conv5, 20, 3, rate=18, relu=False, name='parsing_human3')
parsing_human4 = atrous_conv2d(parsing_conv5, 20, 3, rate=24, relu=False, name='parsing_human4')
parsing_human = tf.add_n([parsing_human1, parsing_human2, parsing_human3, parsing_human4], name='parsing_human')
return parsing_human, parsing_conv4
| 53.933333 | 118 | 0.666873 | import tensorflow as tf
from utils.ops import *
r, 512, 3, 1, relu=True, bn=is_BN, name='conv1')
conv2 = conv2d(conv1, 256, 5, 1, relu=True, bn=is_BN, name='conv2')
conv3 = conv2d(conv2, 256, 7, 1, relu=True, bn=is_BN, name='conv3')
conv4 = conv2d(conv3, 256, 9, 1, relu=True, bn=is_BN, name='conv4')
conv5 = conv2d(conv4, 256, 1, 1, relu=True, bn=is_BN, name='conv5')
conv6 = conv2d(conv5, 16, 1, 1, relu=False, bn=is_BN, name='conv6')
return conv6, conv4
def parsing_refine(parsing, pose, parsing_fea, name):
with tf.variable_scope(name) as scope:
is_BN = False
pose = conv2d(pose, 128, 1, 1, relu=True, bn=is_BN, name='pose_remap')
parsing = conv2d(parsing, 128, 1, 1, relu=True, bn=is_BN, name='parsing_remap')
par_pos = tf.concat([parsing, pose, parsing_fea], 3)
parsing_conv1 = conv2d(par_pos, 512, 3, 1, relu=True, bn=is_BN, name='parsing_conv1')
parsing_conv2 = conv2d(parsing_conv1, 256, 5, 1, relu=True, bn=is_BN, name='parsing_conv2')
parsing_conv3 = conv2d(parsing_conv2, 256, 7, 1, relu=True, bn=is_BN, name='parsing_conv3')
parsing_conv4 = conv2d(parsing_conv3, 256, 9, 1, relu=True, bn=is_BN, name='parsing_conv4')
parsing_conv5 = conv2d(parsing_conv4, 256, 1, 1, relu=True, bn=is_BN, name='parsing_conv5')
parsing_human1 = atrous_conv2d(parsing_conv5, 20, 3, rate=6, relu=False, name='parsing_human1')
parsing_human2 = atrous_conv2d(parsing_conv5, 20, 3, rate=12, relu=False, name='parsing_human2')
parsing_human3 = atrous_conv2d(parsing_conv5, 20, 3, rate=18, relu=False, name='parsing_human3')
parsing_human4 = atrous_conv2d(parsing_conv5, 20, 3, rate=24, relu=False, name='parsing_human4')
parsing_human = tf.add_n([parsing_human1, parsing_human2, parsing_human3, parsing_human4], name='parsing_human')
return parsing_human, parsing_conv4
_human = tf.add_n([parsing_human1, parsing_human2, parsing_human3, parsing_human4], name='parsing_human')
return parsing_human, parsing_conv4
| true | true |
1c2b73c0082d64b6808b7729e0139460fbb9f8af | 137,915 | py | Python | api_server/switch_api/services/osemo/Code/OSESMO_SC2019.py | cliftbar/switch_suncode2019 | cad8fcca50a4848ba946f39aeaa624a230af679d | [
"MIT"
] | null | null | null | api_server/switch_api/services/osemo/Code/OSESMO_SC2019.py | cliftbar/switch_suncode2019 | cad8fcca50a4848ba946f39aeaa624a230af679d | [
"MIT"
] | null | null | null | api_server/switch_api/services/osemo/Code/OSESMO_SC2019.py | cliftbar/switch_suncode2019 | cad8fcca50a4848ba946f39aeaa624a230af679d | [
"MIT"
] | 1 | 2019-08-31T01:10:10.000Z | 2019-08-31T01:10:10.000Z | ## Script Description Header
# File Name: OSESMO.py
# File Location: "~/Desktop/OSESMO Git Repository"
# Project: Open-Source Energy Storage Model (OSESMO)
# Description: Simulates operation of energy storage system.
# Calculates customer savings, GHG reduction, and battery cycling.
import os
import math as math
import time as time
import datetime as datetime
import numpy as np
import pandas as pd
from cvxopt import matrix, sparse, solvers
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
def OSESMO(Modeling_Team_Input=None, Model_Run_Number_Input=None, Model_Type_Input=None,
Model_Timestep_Resolution=None, Customer_Class_Input=None, Load_Profile_Name_Input=None,
Retail_Rate_Name_Input=None, Solar_Profile_Name_Input=None, Solar_Size_Input=None,
Storage_Type_Input=None, Storage_Power_Rating_Input=None, Usable_Storage_Capacity_Input=None,
Single_Cycle_RTE_Input=None, Parasitic_Storage_Load_Input=None,
Storage_Control_Algorithm_Name=None, GHG_Reduction_Solution_Input=None, Equivalent_Cycling_Constraint_Input=None,
Annual_RTE_Constraint_Input=None, ITC_Constraint_Input=None,
Carbon_Adder_Incentive_Value_Input=None, Emissions_Forecast_Signal_Input=None,
OSESMO_Git_Repo_Directory=None, Input_Output_Data_Directory_Location=None, Start_Time_Input=None,
Show_Plots=None, Export_Plots=None, Export_Data=None,
Solar_Installed_Cost_per_kW=None, Storage_Installed_Cost_per_kWh=None, Estimated_Future_Lithium_Ion_Battery_Installed_Cost_per_kWh=None,
Cycle_Life=None, Storage_Depth_of_Discharge=None, Initial_Final_SOC=None, End_of_Month_Padding_Days=None):
## Calculate Model Variable Values from User-Specified Input Values
# Convert model timestep resolution input from minutes to hours.
# This is a more useful format for the model to use.
delta_t = (Model_Timestep_Resolution / 60) # Model timestep resolution, in hours.
# Convert storage efficiency from round-trip efficiency to charge and discharge efficiency.
# Charge efficiency and discharge efficiency assumed to be square root of round-trip efficiency (Eff_c = Eff_d).
# Round-trip efficiency taken from Lazard's Levelized Cost of Storage report (2017), pg. 130
# https://www.lazard.com/media/450338/lazard-levelized-cost-of-storage-version-30.pdf
Eff_c = math.sqrt(Single_Cycle_RTE_Input)
Eff_d = math.sqrt(Single_Cycle_RTE_Input)
# Parasitic storage load (kW) calculated based on input value, which is
# given as a percentage of Storage Power Rating.
Parasitic_Storage_Load = Storage_Power_Rating_Input * Parasitic_Storage_Load_Input
# Set Carbon Adder to $0/metric ton if GHG Reduction Solution is not GHG Signal Co-Optimization.
# This serves as error-handling in case the user sets the Carbon Adder to a
# non-zero value, and sets the GHG Reduction Solution to something other
# than GHG Signal Co-Optimization.
if GHG_Reduction_Solution_Input != "GHG Signal Co-Optimization":
Carbon_Adder_Incentive_Value_Input = 0 # Value of carbon adder, in $ per metric ton.
Emissions_Forecast_Signal_Input = "No Emissions Forecast Signal" # Ensures consistent outputs.
# Set Solar Profile Name Input to "No Solar", set Solar Size Input to 0 kW,
# and set ITC Constraint to 0 if Model Type Input is Storage Only.
# This serves as error handling.
if Model_Type_Input == "Storage Only":
Solar_Profile_Name_Input = "No Solar"
Solar_Size_Input = 0
ITC_Constraint_Input = 0
# Throw an error if Model Type Input is set to Solar Plus Storage
# and Solar Profile Name Input is set to "No Solar",
# or if Solar Size Input is set to 0 kW.
if Model_Type_Input == "Solar Plus Storage":
if Solar_Profile_Name_Input == "No Solar":
print("Solar Plus Storage Model selected, but No Solar Profile Name Input selected.")
if Solar_Size_Input == 0:
print("Solar Plus Storage Model selected, but Solar Size Input set to 0 kW.")
# Throw an error if Storage Control Algorithm set to OSESMO Non-Economic
# Solar Self-Supply, and Model Type Input is set to Storage Only,
# or if Solar Profile Name Input is set to "No Solar",
# or if Solar Size Input is set to 0 kW.
if Storage_Control_Algorithm_Name == "OSESMO Non-Economic Solar Self-Supply":
if Model_Type_Input == "Storage Only":
print("OSESMO Non-Economic Solar Self-Supply control algorithm selected, but Model Type set to Storage Only.")
if Solar_Profile_Name_Input == "No Solar":
print("OSESMO Non-Economic Solar Self-Supply control algorithm selected, but No Solar Profile Name Input selected.")
if Solar_Size_Input == 0:
print("OSESMO Non-Economic Solar Self-Supply control algorithm selected, but Solar Size Input set to 0 kW.")
# Emissions Evaluation Signal
# Real-time five-minute marginal emissions signal used to evaluate emission impacts.
# Available for both NP15 (Northern California congestion zone)
# and SP15 (Southern California congestion zone).
# Mapped based on load profile site location (Northern or Southern CA).
if Load_Profile_Name_Input == "WattTime GreenButton Residential Berkeley" or \
Load_Profile_Name_Input == "WattTime GreenButton Residential Coulterville" or \
Load_Profile_Name_Input == "PG&E GreenButton E-6 Residential" or \
Load_Profile_Name_Input == "PG&E GreenButton Central Valley Residential CARE" or \
Load_Profile_Name_Input == "PG&E GreenButton Central Valley Residential Non-CARE" or \
Load_Profile_Name_Input == "Custom Power Solar GreenButton PG&E Albany Residential with EV" or \
Load_Profile_Name_Input == "Custom Power Solar GreenButton PG&E Crockett Residential with EV" or \
Load_Profile_Name_Input == "Avalon GreenButton East Bay Light Industrial" or \
Load_Profile_Name_Input == "Avalon GreenButton South Bay Education" or \
Load_Profile_Name_Input == "EnerNOC GreenButton San Francisco Office" or \
Load_Profile_Name_Input == "EnerNOC GreenButton San Francisco Industrial" or \
Load_Profile_Name_Input == "PG&E GreenButton A-6 SMB" or \
Load_Profile_Name_Input == "PG&E GreenButton A-10S MLB" or \
Load_Profile_Name_Input == "PG&E GreenButton Central Valley Residential Non-CARE" or \
Load_Profile_Name_Input == "PG&E GreenButton Central Valley Residential CARE":
Emissions_Evaluation_Signal_Input = "NP15 RT5M"
elif Load_Profile_Name_Input == "WattTime GreenButton Residential Long Beach" or\
Load_Profile_Name_Input == "Stem GreenButton SCE TOU-8B Office" or\
Load_Profile_Name_Input == "Stem GreenButton SDG&E G-16 Manufacturing" or\
Load_Profile_Name_Input == "Stem GreenButton SCE GS-3B Food Processing" or\
Load_Profile_Name_Input == "EnerNOC GreenButton Los Angeles Grocery" or\
Load_Profile_Name_Input == "EnerNOC GreenButton Los Angeles Industrial" or\
Load_Profile_Name_Input == "EnerNOC GreenButton San Diego Office":
Emissions_Evaluation_Signal_Input = "SP15 RT5M"
else:
print("This load profile name input has not been mapped to an emissions evaluation signal (NP15 or SP15).")
# Total Storage Capacity
# Total storage capacity is the total chemical capacity of the battery.
# The usable storage capacity is equal to the total storage capacity
# multiplied by storage depth of discharge. This means that the total
# storage capacity is equal to the usable storage capacity divided by
# storage depth of discharge. Total storage capacity is used to
# calculate battery cost, whereas usable battery capacity is used
# as an input to operational simulation portion of model.
Total_Storage_Capacity = Usable_Storage_Capacity_Input / Storage_Depth_of_Discharge
# Usable Storage Capacity
# Usable storage capacity is equal to the original usable storage capacity
# input, degraded every month based on the number of cycles performed in
# that month. Initialized at the usable storage capacity input value.
Usable_Storage_Capacity = Usable_Storage_Capacity_Input
# Cycling Penalty
# Cycling penalty for lithium-ion battery is equal to estimated replacement cell cost
# in 10 years divided by expected cycle life. Cycling penalty for flow batteries is $0/cycle.
if Storage_Type_Input == "Lithium-Ion Battery":
cycle_pen = (Total_Storage_Capacity * Estimated_Future_Lithium_Ion_Battery_Installed_Cost_per_kWh) / Cycle_Life
elif Storage_Type_Input == "Flow Battery":
cycle_pen = 0
## Import Data from CSV Files
# Begin script runtime timer
tstart = time.time()
# Import Load Profile Data
# Call Import_Load_Profile_Data function.
from switch_api.services.osemo.Code.Import_Load_Profile_Data_SC2019 import Import_Load_Profile_Data
[Load_Profile_Data, Load_Profile_Master_Index] = Import_Load_Profile_Data(Input_Output_Data_Directory_Location, OSESMO_Git_Repo_Directory,
delta_t, Load_Profile_Name_Input)
Annual_Peak_Demand_Baseline = np.max(Load_Profile_Data)
Annual_Total_Energy_Consumption_Baseline = np.sum(Load_Profile_Data) * delta_t
# Import Solar PV Generation Profile Data
# Scale base 10-kW or 100-kW profile to match user-input PV system size
if Model_Type_Input == "Solar Plus Storage":
from switch_api.services.osemo.Code.Import_Solar_PV_Profile_Data_SC2019 import Import_Solar_PV_Profile_Data
[Solar_Profile_Master_Index, Solar_Profile_Description, Solar_PV_Profile_Data] = Import_Solar_PV_Profile_Data(
Input_Output_Data_Directory_Location,
OSESMO_Git_Repo_Directory, delta_t,
Solar_Profile_Name_Input, Solar_Size_Input)
elif Model_Type_Input == "Storage Only" or Solar_Profile_Name_Input == "No Solar":
Solar_PV_Profile_Data = np.zeros(shape=Load_Profile_Data.shape)
# Import Retail Rate Data
# Call Import_Retail_Rate_Data function.
from switch_api.services.osemo.Code.Import_Retail_Rate_Data_SC2019 import Import_Retail_Rate_Data
[Retail_Rate_Master_Index, Retail_Rate_Effective_Date,
Volumetric_Rate_Data, Summer_Peak_DC, Summer_Part_Peak_DC, Summer_Noncoincident_DC,
Winter_Peak_DC, Winter_Part_Peak_DC, Winter_Noncoincident_DC,
Fixed_Per_Meter_Day_Charge, Fixed_Per_Meter_Month_Charge,
First_Summer_Month, Last_Summer_Month, Month_Data,
Summer_Peak_Binary_Data, Summer_Part_Peak_Binary_Data,
Winter_Peak_Binary_Data, Winter_Part_Peak_Binary_Data] = Import_Retail_Rate_Data(
Input_Output_Data_Directory_Location, OSESMO_Git_Repo_Directory,
delta_t, Retail_Rate_Name_Input)
Month_Data = Month_Data.astype(int)
Summer_Peak_Binary_Data = Summer_Peak_Binary_Data.astype(int)
Summer_Part_Peak_Binary_Data = Summer_Part_Peak_Binary_Data.astype(int)
Winter_Peak_Binary_Data = Winter_Peak_Binary_Data.astype(int)
Winter_Part_Peak_Binary_Data = Winter_Part_Peak_Binary_Data.astype(int)
# Import Marginal Emissions Rate Data Used as Forecast
# Call Import_Marginal_Emissions_Rate_Forecast_Data function.
# from Import_Marginal_Emissions_Rate_Forecast_Data import Import_Marginal_Emissions_Rate_Forecast_Data
Marginal_Emissions_Rate_Forecast_Data = np.zeros(shape=Load_Profile_Data.shape)
# Import Marginal Emissions Rate Data Used for Evaluation
# Call Import_Marginal_Emissions_Rate_Forecast_Data function.
# from Import_Marginal_Emissions_Rate_Evaluation_Data import Import_Marginal_Emissions_Rate_Evaluation_Data
Marginal_Emissions_Rate_Evaluation_Data = np.zeros(shape=Load_Profile_Data.shape)
# Import Carbon Adder Data
# Carbon Adder ($/kWh) = Marginal Emissions Rate (metric tons CO2/MWh) *
# Carbon Adder ($/metric ton) * (1 MWh/1000 kWh)
Carbon_Adder_Data = (Marginal_Emissions_Rate_Forecast_Data *
Carbon_Adder_Incentive_Value_Input) / 1000
# Import IOU-Proposed Charge and Discharge Hour Flag Vectors
if GHG_Reduction_Solution_Input == "IOU-Proposed Charge-Discharge Time Constraints":
from switch_api.services.osemo.Code.Import_IOU_Time_Constraint_Binary_Data import Import_IOU_Time_Constraint_Binary_Data
[IOU_Charge_Hour_Binary_Data, IOU_Discharge_Hour_Binary_Data] = Import_IOU_Time_Constraint_Binary_Data(
Input_Output_Data_Directory_Location,
OSESMO_Git_Repo_Directory, delta_t)
# Import PG&E-Proposed Charge, No-Charge, and Discharge Hour Flag Vectors
if GHG_Reduction_Solution_Input == "No-Charging Time Constraint" or GHG_Reduction_Solution_Input == "Charging and Discharging Time Constraints":
from switch_api.services.osemo.Code.Import_PGE_Time_Constraint_Binary_Data import Import_PGE_Time_Constraint_Binary_Data
[PGE_Charge_Hour_Binary_Data, PGE_No_Charge_Hour_Binary_Data, PGE_Discharge_Hour_Binary_Data] = Import_PGE_Time_Constraint_Binary_Data(
Input_Output_Data_Directory_Location, OSESMO_Git_Repo_Directory, delta_t)
# Import Utility Marginal Cost Data
# Marginal Costs are mapped to load profile location
# from Import_Utility_Marginal_Cost_Data import Import_Utility_Marginal_Cost_Data
Generation_Cost_Data = np.zeros(shape=Load_Profile_Data.shape)
Representative_Distribution_Cost_Data = np.zeros(shape=Load_Profile_Data.shape)
# Set Directory to Box Sync Folder
os.chdir(Input_Output_Data_Directory_Location)
## Iterate Through Months & Filter Data to Selected Month
# Initialize Blank Variables to store optimal decision variable values for
# all months
# Initialize Decision Variable Vectors
P_ES_in = np.array([])
P_ES_out = np.array([])
Ene_Lvl = np.array([])
P_max_NC = np.array([])
P_max_peak = np.array([])
P_max_part_peak = np.array([])
# Initialize Monthly Cost Variable Vectors
Fixed_Charge_Vector = np.array([])
NC_DC_Baseline_Vector = np.array([])
NC_DC_with_Solar_Only_Vector = np.array([])
NC_DC_with_Solar_and_Storage_Vector = np.array([])
CPK_DC_Baseline_Vector = np.array([])
CPK_DC_with_Solar_Only_Vector = np.array([])
CPK_DC_with_Solar_and_Storage_Vector = np.array([])
CPP_DC_Baseline_Vector = np.array([])
CPP_DC_with_Solar_Only_Vector = np.array([])
CPP_DC_with_Solar_and_Storage_Vector = np.array([])
Energy_Charge_Baseline_Vector = np.array([])
Energy_Charge_with_Solar_Only_Vector = np.array([])
Energy_Charge_with_Solar_and_Storage_Vector = np.array([])
Cycles_Vector = np.array([])
Cycling_Penalty_Vector = np.array([])
for Month_Iter in range(1,13): # Iterate through all months
# Filter Load Profile Data to Selected Month
Load_Profile_Data_Month = Load_Profile_Data[Month_Data == Month_Iter]
# Filter PV Production Profile Data to Selected Month
Solar_PV_Profile_Data_Month = Solar_PV_Profile_Data[Month_Data == Month_Iter]
# Filter Volumetric Rate Data to Selected Month
Volumetric_Rate_Data_Month = Volumetric_Rate_Data[Month_Data == Month_Iter]
# Filter Marginal Emissions Data to Selected Month
Marginal_Emissions_Rate_Forecast_Data_Month = Marginal_Emissions_Rate_Forecast_Data[Month_Data == Month_Iter]
# Filter Carbon Adder Data to Selected Month
Carbon_Adder_Data_Month = Carbon_Adder_Data[Month_Data == Month_Iter]
# Set Demand Charge Values Based on Month
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
Peak_DC = Summer_Peak_DC
Part_Peak_DC = Summer_Part_Peak_DC
Noncoincident_DC = Summer_Noncoincident_DC
else:
Peak_DC = Winter_Peak_DC
Part_Peak_DC = Winter_Part_Peak_DC
Noncoincident_DC = Winter_Noncoincident_DC
# Filter Peak and Part-Peak Binary Data to Selected Month
if Summer_Peak_DC > 0:
Summer_Peak_Binary_Data_Month = Summer_Peak_Binary_Data[Month_Data == Month_Iter]
if Summer_Part_Peak_DC > 0:
Summer_Part_Peak_Binary_Data_Month = Summer_Part_Peak_Binary_Data[Month_Data == Month_Iter]
if Winter_Peak_DC > 0:
Winter_Peak_Binary_Data_Month = Winter_Peak_Binary_Data[Month_Data == Month_Iter]
if Winter_Part_Peak_DC > 0:
Winter_Part_Peak_Binary_Data_Month = Winter_Part_Peak_Binary_Data[Month_Data == Month_Iter]
# Filter PG&E-Proposed Charge and Discharge Hour Binary Data to Selected Month
if GHG_Reduction_Solution_Input == "No-Charging Time Constraint" or \
GHG_Reduction_Solution_Input == "Charging and Discharging Time Constraints":
PGE_Charge_Hour_Binary_Data_Month = PGE_Charge_Hour_Binary_Data[Month_Data == Month_Iter]
PGE_No_Charge_Hour_Binary_Data_Month = PGE_No_Charge_Hour_Binary_Data[Month_Data == Month_Iter]
PGE_Discharge_Hour_Binary_Data_Month = PGE_Discharge_Hour_Binary_Data[Month_Data == Month_Iter]
# Filter IOU-Proposed Charge and Discharge Hour Binary Data to Selected Month
if GHG_Reduction_Solution_Input == "IOU-Proposed Charge-Discharge Time Constraints":
IOU_Charge_Hour_Binary_Data_Month = IOU_Charge_Hour_Binary_Data[Month_Data == Month_Iter]
IOU_Discharge_Hour_Binary_Data_Month = IOU_Discharge_Hour_Binary_Data[Month_Data == Month_Iter]
## Add "Padding" to Every Month of Data
# Don't pad Month 12, because the final state of charge is constrained
# to equal the original state of charge.
if Month_Iter in range(1, 12): # 1 through 11
# Pad Load Profile Data
Load_Profile_Data_Month_Padded = np.concatenate((Load_Profile_Data_Month,
Load_Profile_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
# Pad PV Production Profile Data
Solar_PV_Profile_Data_Month_Padded = np.concatenate((Solar_PV_Profile_Data_Month,
Solar_PV_Profile_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
# Pad Volumetric Energy Rate Data
Volumetric_Rate_Data_Month_Padded = np.concatenate((Volumetric_Rate_Data_Month,
Volumetric_Rate_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
# Pad Marginal Emissions Data
Marginal_Emissions_Rate_Data_Month_Padded = np.concatenate((Marginal_Emissions_Rate_Forecast_Data_Month,
Marginal_Emissions_Rate_Forecast_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
# Pad Carbon Adder Data
Carbon_Adder_Data_Month_Padded = np.concatenate((Carbon_Adder_Data_Month,
Carbon_Adder_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
# Pad Peak and Part-Peak Binary Data
if Summer_Peak_DC > 0:
Summer_Peak_Binary_Data_Month_Padded = np.concatenate((Summer_Peak_Binary_Data_Month,
Summer_Peak_Binary_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
if Summer_Part_Peak_DC > 0:
Summer_Part_Peak_Binary_Data_Month_Padded = np.concatenate((Summer_Part_Peak_Binary_Data_Month,
Summer_Part_Peak_Binary_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
if Winter_Peak_DC > 0:
Winter_Peak_Binary_Data_Month_Padded = np.concatenate((Winter_Peak_Binary_Data_Month,
Winter_Peak_Binary_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
if Winter_Part_Peak_DC > 0:
Winter_Part_Peak_Binary_Data_Month_Padded = np.concatenate((Winter_Part_Peak_Binary_Data_Month,
Winter_Part_Peak_Binary_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
# Pad PG&E-Proposed Charge and Discharge Hour Binary Data
if GHG_Reduction_Solution_Input == "No-Charging Time Constraint" or \
GHG_Reduction_Solution_Input == "Charging and Discharging Time Constraints":
PGE_Charge_Hour_Binary_Data_Month_Padded = np.concatenate((PGE_Charge_Hour_Binary_Data_Month,
PGE_Charge_Hour_Binary_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
PGE_No_Charge_Hour_Binary_Data_Month_Padded = np.concatenate((PGE_No_Charge_Hour_Binary_Data_Month,
PGE_No_Charge_Hour_Binary_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
PGE_Discharge_Hour_Binary_Data_Month_Padded = np.concatenate((PGE_Discharge_Hour_Binary_Data_Month,
PGE_Discharge_Hour_Binary_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
# Pad IOU-Proposed Charge and Discharge Hour Binary Data
if GHG_Reduction_Solution_Input == "IOU-Proposed Charge-Discharge Time Constraints":
IOU_Charge_Hour_Binary_Data_Month_Padded = np.concatenate((IOU_Charge_Hour_Binary_Data_Month,
IOU_Charge_Hour_Binary_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
IOU_Discharge_Hour_Binary_Data_Month_Padded = np.concatenate((IOU_Discharge_Hour_Binary_Data_Month,
IOU_Discharge_Hour_Binary_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
elif Month_Iter == 12:
# Don't Pad Load Profile Data
Load_Profile_Data_Month_Padded = Load_Profile_Data_Month
# Don't Pad PV Production Profile Data
Solar_PV_Profile_Data_Month_Padded = Solar_PV_Profile_Data_Month
# Don't Pad Volumetric Rate Data
Volumetric_Rate_Data_Month_Padded = Volumetric_Rate_Data_Month
# Don't Pad Marginal Emissions Data
Marginal_Emissions_Rate_Data_Month_Padded = Marginal_Emissions_Rate_Forecast_Data_Month
# Don't Pad Carbon Adder Data
Carbon_Adder_Data_Month_Padded = Carbon_Adder_Data_Month
# Don't Pad Peak and Part-Peak Binary Data
if Summer_Peak_DC > 0:
Summer_Peak_Binary_Data_Month_Padded = Summer_Peak_Binary_Data_Month
if Summer_Part_Peak_DC > 0:
Summer_Part_Peak_Binary_Data_Month_Padded = Summer_Part_Peak_Binary_Data_Month
if Winter_Peak_DC > 0:
Winter_Peak_Binary_Data_Month_Padded = Winter_Peak_Binary_Data_Month
if Winter_Part_Peak_DC > 0:
Winter_Part_Peak_Binary_Data_Month_Padded = Winter_Part_Peak_Binary_Data_Month
# Don't Pad PG&E-Proposed Charge and Discharge Hour Binary Data
if GHG_Reduction_Solution_Input == "No-Charging Time Constraint" or \
GHG_Reduction_Solution_Input == "Charging and Discharging Time Constraints":
PGE_Charge_Hour_Binary_Data_Month_Padded = PGE_Charge_Hour_Binary_Data_Month
PGE_No_Charge_Hour_Binary_Data_Month_Padded = PGE_No_Charge_Hour_Binary_Data_Month
PGE_Discharge_Hour_Binary_Data_Month_Padded = PGE_Discharge_Hour_Binary_Data_Month
# Don't Pad IOU-Proposed Charge and Discharge Hour Binary Data
if GHG_Reduction_Solution_Input == "IOU-Proposed Charge-Discharge Time Constraints":
IOU_Charge_Hour_Binary_Data_Month_Padded = IOU_Charge_Hour_Binary_Data_Month
IOU_Discharge_Hour_Binary_Data_Month_Padded = IOU_Discharge_Hour_Binary_Data_Month
## Initialize Cost Vector "c"
# nts = numtsteps = number of timesteps
numtsteps = len(Load_Profile_Data_Month_Padded)
all_tsteps = np.array(list(range(0, numtsteps)))
# x = np.concatenate((P_ES_in_grid(size nts), P_ES_out(size nts), Ene_Lvl(size nts)
# [P_max_NC (size 1)], [P_max_peak (size 1)], [P_max_part_peak (size 1)]))
# Even if the system is charging from solar, it still has a relative cost
# equal to the cost of grid power (Volumetric Rate).
# This is because every amount of PV power going into the battery is
# not used to offset load or export to the grid.
c_Month_Bill_Only = np.concatenate(((Volumetric_Rate_Data_Month_Padded * delta_t),
(-Volumetric_Rate_Data_Month_Padded * delta_t),
np.zeros((numtsteps,)),
[Noncoincident_DC],
[Peak_DC],
[Part_Peak_DC]))
# The same is true of carbon emissions. Every amount of PV power going into the battery is
# not used at that time to offset emissions from the load or from the grid.
c_Month_Carbon_Only = np.concatenate(((Carbon_Adder_Data_Month_Padded * delta_t),
(-Carbon_Adder_Data_Month_Padded * delta_t),
np.zeros(numtsteps,),
[0.],
[0.],
[0.]))
c_Month_Degradation_Only = np.concatenate((
(((Eff_c * cycle_pen) / (2. * Total_Storage_Capacity)) * delta_t) * np.ones(numtsteps,),
((cycle_pen / (Eff_d * 2. * Total_Storage_Capacity)) * delta_t) * np.ones(numtsteps,),
np.zeros(numtsteps,),
[0.],
[0.],
[0.]))
# c_Month_Solar_Self_Supply is an additional cost term used in the
# OSESMO Non-Economic Solar Self-Supply dispatch algorithm. This dispatch mode adds
# additional cost terms (P_PV(t) - P_ES_in(t)) to be minimized, which
# represent all power produced by the PV system that is not stored in the
# battery. Because P_PV is not controllable (not a decision variable),
# this can be simplified to adding -P_ES_in(t) cost terms to the cost function.
if Storage_Control_Algorithm_Name == "OSESMO Economic Dispatch":
c_Month_Solar_Self_Supply = np.concatenate((np.zeros(numtsteps,),
np.zeros(numtsteps,),
np.zeros(numtsteps,),
[0.],
[0.],
[0.]))
elif Storage_Control_Algorithm_Name == "OSESMO Non-Economic Solar Self-Supply":
c_Month_Solar_Self_Supply = np.concatenate((-np.ones(numtsteps,),
np.zeros(numtsteps,),
np.zeros(numtsteps,),
[0.],
[0.],
[0.]))
c_Month = c_Month_Bill_Only + c_Month_Carbon_Only + c_Month_Degradation_Only + c_Month_Solar_Self_Supply
# This is the length of the vectors c and x, or the total number of decision variables.
length_x = len(c_Month)
# Convert from numpy array to cvxopt matrix format
c_Month = matrix(c_Month, tc = 'd')
## Decision Variable Indices
# P_ES_in = x(1:numtsteps)
# P_ES_out = x(numtsteps+1:2*numtsteps)
# Ene_Lvl = x(2*numtsteps+1:3*numtsteps)
# P_max_NC = x(3*numtsteps+1)
# P_max_peak = x(3*numtsteps+2)
# P_max_part_peak = x(3*numsteps+3)
## State of Charge Constraint
# This constraint represents conservation of energy as it flows into and out of the
# energy storage system, while accounting for efficiency losses.
# For t in [0, numsteps-1]:
# E[t+1] = E[t] + [Eff_c * P_ES_in[t] - (1/Eff_d) * P_ES_out[t]] * delta_t
# E[t] - E[t+1] + Eff_c * P_ES_in[t] * delta_t - (1/Eff_d) * P_ES_out[t] * delta_t = 0
# An equality constraint can be transformed into two inequality constraints
# Ax = 0 -> Ax <=0 , -Ax <=0
# Number of rows in each inequality constraint matrix = (numtsteps - 1)
# Number of columns in each inequality constraint matrix = number of
# decision variables = length_x
A_E = sparse(matrix(0., (numtsteps - 1, length_x), tc = 'd'), tc = 'd')
b_E = sparse(matrix(0., (numtsteps - 1, 1), tc = 'd'), tc = 'd')
for n in range(0, numtsteps - 1): # Iterates from Index 0 to Index (numtsteps-2) - equivalent to Timesteps 1 to (numtsteps-1)
A_E[n, n + (2 * numtsteps)] = 1. # E[t]
A_E[n, n + (2 * numtsteps) + 1] = -1. # -E[t+1]
A_E[n, n] = Eff_c * delta_t # Eff_c * P_ES_in[t] * delta_t
A_E[n, n + numtsteps] = (-1 / Eff_d) * delta_t # - (1/Eff_d) * P_ES_out[t] * delta_t
A_Month = sparse([A_E,
-A_E], tc = 'd')
b_Month = sparse([b_E,
-b_E], tc = 'd')
## Energy Storage Charging Power Constraint
# This constraint sets maximum and minimum values for P_ES_in.
# The minimum is 0 kW, and the maximum is Storage_Power_Rating_Input.
# P_ES_in >= 0 -> -P_ES_in <= 0
# P_ES_in <= Storage_Power_Rating_Input
# Number of rows in inequality constraint matrix = numtsteps
# Number of columns in inequality constraint matrix = length_x
A_P_ES_in = sparse(matrix(0., (numtsteps, length_x), tc = 'd'), tc = 'd')
for n in range(0, numtsteps): # Iterates from Index 0 to Index (numtsteps-1) - equivalent to Timesteps 1 to (numtsteps)
A_P_ES_in[n, n] = -1.
A_Month = sparse([A_Month,
A_P_ES_in,
-A_P_ES_in], tc = 'd')
b_Month = sparse([b_Month,
sparse(matrix(0., (numtsteps, 1), tc = 'd'), tc = 'd'),
sparse(matrix(Storage_Power_Rating_Input, (numtsteps, 1), tc = 'd'), tc = 'd')], tc = 'd')
## Energy Storage Discharging Power Constraint
# This constraint sets maximum and minimum values for P_ES_out.
# The minimum is 0 kW, and the maximum is Storage_Power_Rating_Input.
# P_ES_out >= 0 -> -P_ES_out <= 0
# P_ES_out <= Storage_Power_Rating_Input
A_P_ES_out = sparse(matrix(0., (numtsteps, length_x), tc = 'd'), tc = 'd')
for n in range(0, numtsteps): # Iterates from Index 0 to Index (numtsteps-1) - equivalent to Timesteps 1 to (numtsteps)
A_P_ES_out[n, n + numtsteps] = -1.
A_Month = sparse([A_Month,
A_P_ES_out,
-A_P_ES_out], tc = 'd')
b_Month = sparse([b_Month,
sparse(matrix(0., (numtsteps, 1), tc = 'd'), tc = 'd'),
sparse(matrix(Storage_Power_Rating_Input, (numtsteps, 1), tc = 'd'), tc = 'd')], tc = 'd')
## State of Charge Minimum/Minimum Constraints
# This constraint sets maximum and minimum values on the Energy Level.
# The minimum value is 0, and the maximum value is Usable_Storage_Capacity, the size of the
# battery. Note: this optimization defines the range [0, Usable_Storage_Capacity] as the
# effective storage capacity of the battery, without accounting for
# depth of discharge.
# Ene_Lvl(t) >= 0 -> -Ene_Lvl(t) <=0
A_Ene_Lvl_min = sparse(matrix(0., (numtsteps, length_x), tc = 'd'), tc = 'd')
b_Ene_Lvl_min = sparse(matrix(0., (numtsteps, 1), tc = 'd'), tc = 'd')
for n in range(0, numtsteps): # Iterates from Index 0 to Index (numtsteps-1) - equivalent to Timesteps 1 to (numtsteps)
A_Ene_Lvl_min[n, n + (2 * numtsteps)] = -1.
A_Month = sparse([A_Month,
A_Ene_Lvl_min], tc = 'd')
b_Month = sparse([b_Month,
b_Ene_Lvl_min], tc = 'd')
# Ene_Lvl(t) <= Size_ES
A_Ene_Lvl_max = sparse(matrix(0., (numtsteps, length_x), tc = 'd'), tc = 'd')
b_Ene_Lvl_max = matrix(Usable_Storage_Capacity * np.ones((numtsteps,1)), tc = 'd')
for n in range(0, numtsteps): # Iterates from Index 0 to Index (numtsteps-1) - equivalent to Timesteps 1 to (numtsteps)
A_Ene_Lvl_max[n, n + (2 * numtsteps)] = 1.
A_Month = sparse([A_Month,
A_Ene_Lvl_max], tc = 'd')
b_Month = sparse([b_Month,
b_Ene_Lvl_max], tc = 'd')
## Initial State of Charge Constraint
# In the first month, this constraint initializes the energy level of the battery at
# a user-defined percentage of the original battery capacity.
# In all other month, this constraints initializes the energy level of
# the battery at the final battery level from the previous month.
# E(0) = Initial_Final_SOC * Usable_Storage_Capacity_Input
# E(0) <= Initial_Final_SOC * Usable_Storage_Capacity_Input, -E(0) <= Initial_Final_SOC * Usable_Storage_Capacity_Input
# E(0) = Previous_Month_Final_Energy_Level
# E(0) <= Previous_Month_Final_Energy_Level, -E(0) <= Previous_Month_Final_Energy_Level
A_Ene_Lvl_0 = sparse(matrix(0., (1, length_x), tc = 'd'), tc = 'd')
A_Ene_Lvl_0[0, (2 * numtsteps)] = 1.
if Month_Iter == 1:
b_Ene_Lvl_0 = matrix(Initial_Final_SOC * Usable_Storage_Capacity_Input, tc = 'd')
elif Month_Iter in range(2, (12 + 1)):
b_Ene_Lvl_0 = matrix(Next_Month_Initial_Energy_Level, tc = 'd')
A_Month = sparse([A_Month,
A_Ene_Lvl_0,
-A_Ene_Lvl_0], tc = 'd')
b_Month = sparse([b_Month,
b_Ene_Lvl_0,
-b_Ene_Lvl_0], tc = 'd')
## Final State of Charge Constraints
# This constraint fixes the final state of charge of the battery at a user-defined percentage
# of the original battery capacity,
# to prevent it from discharging completely in the final timesteps.
# E(N) = Initial_Final_SOC * Usable_Storage_Capacity_Input
# E(N) <= Initial_Final_SOC * Usable_Storage_Capacity_Input, -E(N) <= Initial_Final_SOC * Usable_Storage_Capacity_Input
A_Ene_Lvl_N = sparse(matrix(0., (1, length_x), tc = 'd'), tc = 'd')
A_Ene_Lvl_N[0, (3 * numtsteps) - 1] = 1.
b_Ene_Lvl_N = matrix(Initial_Final_SOC * Usable_Storage_Capacity_Input, tc = 'd')
A_Month = sparse([A_Month,
A_Ene_Lvl_N,
-A_Ene_Lvl_N], tc = 'd')
b_Month = sparse([b_Month,
b_Ene_Lvl_N,
-b_Ene_Lvl_N], tc = 'd')
## Noncoincident Demand Charge Constraint
# This constraint linearizes the noncoincident demand charge constraint.
# Setting the demand charge value as a decision variable incentivizes
# "demand capping" to reduce the value of max(P_load(t)) to an optimal
# level without using the nonlinear max() operator.
# The noncoincident demand charge applies across all 15-minute intervals.
# P_load(t) - P_PV(t) + P_ES_in(t) - P_ES_out(t) <= P_max_NC for all t
# P_ES_in(t) - P_ES_out(t) - P_max_NC <= - P_load(t) + P_PV(t) for all t
if Noncoincident_DC > 0:
A_NC_DC = sparse(matrix(0., (numtsteps, length_x), tc = 'd'), tc = 'd')
b_NC_DC = matrix(-Load_Profile_Data_Month_Padded + Solar_PV_Profile_Data_Month_Padded, tc = 'd')
for n in range(0, numtsteps): # Iterates from Index 0 to Index (numtsteps-1) - equivalent to Timesteps 1 to (numtsteps)
A_NC_DC[n, n] = 1.
A_NC_DC[n, n + numtsteps] = -1.
A_NC_DC[n, (3 * numtsteps)] = -1.
A_Month = sparse([A_Month,
A_NC_DC], tc = 'd')
b_Month = sparse([b_Month,
b_NC_DC], tc = 'd')
# Add P_max_NC >=0 Constraint
# -P_max_NC <= 0
# Note: this non-negativity constraint is added even if the noncoincident
# demand charge is $0/kW for this tariff. This ensures that the
# decision variable P_max_NC goes to zero, and is not negative.
A_NC_DC_gt0 = sparse(matrix(0., (1, length_x), tc = 'd'), tc = 'd')
A_NC_DC_gt0[0, (3 * numtsteps)] = -1.
b_NC_DC_gt0 = matrix(0., tc = 'd')
A_Month = sparse([A_Month,
A_NC_DC_gt0], tc = 'd')
b_Month = sparse([b_Month,
b_NC_DC_gt0], tc = 'd')
## Coincident Peak Demand Charge Constraint
# This constraint linearizes the coincident peak demand charge constraint.
# This demand charge only applies for peak hours.
# P_load(t) - P_PV(t) + P_ES_in(t) - P_ES_out(t) <= P_max_peak for Peak t only
# P_ES_in(t) - P_ES_out(t) - P_max_peak <= - P_load(t) + P_PV(t) for Peak t only
if Peak_DC > 0:
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
Peak_Indices = all_tsteps[Summer_Peak_Binary_Data_Month_Padded == 1]
A_CPK_DC = sparse(matrix(0., (sum(Summer_Peak_Binary_Data_Month_Padded), length_x), tc = 'd'), tc = 'd')
b_CPK_DC = matrix(-Load_Profile_Data_Month_Padded[Summer_Peak_Binary_Data_Month_Padded == 1] + \
Solar_PV_Profile_Data_Month_Padded[Summer_Peak_Binary_Data_Month_Padded == 1], tc = 'd')
else:
Peak_Indices = all_tsteps[Winter_Peak_Binary_Data_Month_Padded == 1]
A_CPK_DC = sparse(matrix(0., (sum(Winter_Peak_Binary_Data_Month_Padded), length_x), tc = 'd'), tc = 'd')
b_CPK_DC = matrix(-Load_Profile_Data_Month_Padded[Winter_Peak_Binary_Data_Month_Padded == 1] + \
Solar_PV_Profile_Data_Month_Padded[Winter_Peak_Binary_Data_Month_Padded == 1], tc = 'd')
for n in range(0, len(Peak_Indices)): # Iterates from Index 0 to Index (len(Peak_Indices)-1) - equivalent to Timesteps 1 to len(Peak_Indices)
Peak_Index_n = int(Peak_Indices[n])
A_CPK_DC[n, Peak_Index_n] = 1.
A_CPK_DC[n, numtsteps + Peak_Index_n] = -1.
A_CPK_DC[n, (3 * numtsteps) + 1] = -1.
A_Month = sparse([A_Month,
A_CPK_DC], tc = 'd')
b_Month = sparse([b_Month,
b_CPK_DC], tc = 'd')
# Add P_max_peak >=0 Constraint
# -P_max_peak <= 0
# Note: this non-negativity constraint is added even if the coincident peak
# demand charge is $0/kW for this tariff. This ensures that the
# decision variable P_max_peak goes to zero, and is not negative.
A_CPK_DC_gt0 = sparse(matrix(0., (1, length_x), tc = 'd'), tc = 'd')
A_CPK_DC_gt0[0, (3 * numtsteps) + 1] = -1.
b_CPK_DC_gt0 = matrix(0., tc = 'd')
A_Month = sparse([A_Month,
A_CPK_DC_gt0], tc = 'd')
b_Month = sparse([b_Month,
b_CPK_DC_gt0], tc = 'd')
## Coincident Part-Peak Demand Charge Constraint
# This constraint linearizes the coincident part-peak demand charge
# constraint.
# This demand charge only applies for part-peak hours.
# P_load(t) - P_PV(t) + P_ES_in(t) - P_ES_out(t) <= P_max_part_peak for Part-Peak t only
# P_ES_in(t) - P_ES_out(t) - P_max_part_peak <= - P_load(t) + P_PV(t) for Part-Peak t only
if Part_Peak_DC > 0:
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
Part_Peak_Indices = all_tsteps[Summer_Part_Peak_Binary_Data_Month_Padded == 1]
A_CPP_DC = sparse(matrix(0., (sum(Summer_Part_Peak_Binary_Data_Month_Padded), length_x), tc = 'd'), tc = 'd')
b_CPP_DC = matrix(-Load_Profile_Data_Month_Padded[Summer_Part_Peak_Binary_Data_Month_Padded == 1] + \
Solar_PV_Profile_Data_Month_Padded[Summer_Part_Peak_Binary_Data_Month_Padded == 1], tc = 'd')
else:
Part_Peak_Indices = all_tsteps[Winter_Part_Peak_Binary_Data_Month_Padded == 1]
A_CPP_DC = sparse(matrix(0., (sum(Winter_Part_Peak_Binary_Data_Month_Padded), length_x), tc = 'd'), tc = 'd')
b_CPP_DC = matrix(-Load_Profile_Data_Month_Padded[Winter_Part_Peak_Binary_Data_Month_Padded == 1] + \
Solar_PV_Profile_Data_Month_Padded[Winter_Part_Peak_Binary_Data_Month_Padded == 1], tc = 'd')
for n in range(0, len(Part_Peak_Indices)): # Iterates from Index 0 to Index (len(Part_Peak_Indices)-1) - equivalent to Timesteps 1 to len(Part_Peak_Indices)
Part_Peak_Index_n = int(Part_Peak_Indices[n])
A_CPP_DC[n, Part_Peak_Index_n] = 1.
A_CPP_DC[n, numtsteps + Part_Peak_Index_n] = -1.
A_CPP_DC[n, (3 * numtsteps) + 2] = -1.
A_Month = sparse([A_Month,
A_CPP_DC], tc = 'd')
b_Month = sparse([b_Month,
b_CPP_DC], tc = 'd')
# Add P_max_part_peak >=0 Constraint
# -P_max_part_peak <= 0
# Note: this non-negativity constraint is added even if the coincident part-peak
# demand charge is $0/kW for this tariff. This ensures that the
# decision variable P_max_part_peak goes to zero, and is not negative.
A_CPP_DC_gt0 = sparse(matrix(0., (1, length_x), tc = 'd'), tc = 'd')
A_CPP_DC_gt0[0, (3 * numtsteps) + 2] = -1.
b_CPP_DC_gt0 = matrix(0., tc = 'd')
A_Month = sparse([A_Month,
A_CPP_DC_gt0], tc = 'd')
b_Month = sparse([b_Month,
b_CPP_DC_gt0], tc = 'd')
## Optional Constraint - Solar ITC Charging Constraint
# This constraint requires that the storage system be charged 100% from
# solar. This ensures that the customer receives 100% of the
# solar Incentive Tax Credit. The ITC amount is prorated by the amount
# of energy entering into the battery that comes from solar
# (ex. a storage system charged 90% from solar receives 90% of the ITC).
# As a result, the optimal amount of solar charging is likely higher
# than the minimum requirement of 75%, and likely very close to 100%.
# P_ES_in(t) <= P_PV(t)
# Note that P_PV(t) can sometimes be negative for some PV profiles, if
# the solar inverter is consuming energy at night. As a result, P_PV(t)
# here refers to a modified version of the solar profile where all
# negative values are set to 0. Otherwise, the model would break
# because P_ES_in must be >= 0, and can't also be <= P_PV(t) if P_PV(t)
# <= 0.
if Model_Type_Input == "Solar Plus Storage" and Solar_Profile_Name_Input != "No Solar" and \
Solar_Size_Input > 0 and ITC_Constraint_Input == 1:
Solar_PV_Profile_Data_Month_Padded_Nonnegative = Solar_PV_Profile_Data_Month_Padded
Solar_PV_Profile_Data_Month_Padded_Nonnegative[Solar_PV_Profile_Data_Month_Padded_Nonnegative < 0] = 0.
A_ITC = sparse(matrix(0., (numtsteps, length_x)))
b_ITC = matrix(Solar_PV_Profile_Data_Month_Padded_Nonnegative, tc = 'd')
for n in range(0, numtsteps): # Iterates from Index 0 to Index (numtsteps-1) - equivalent to Timesteps 1 to (numtsteps)
A_ITC[n, n] = 1.
A_Month = sparse([A_Month,
A_ITC])
b_Month = sparse([b_Month,
b_ITC], tc = 'd')
## Optional Constraint - No-Charging Time Constraint
if GHG_Reduction_Solution_Input == "No-Charging Time Constraint":
# PG&E has suggested a set of time-based constraints on storage charging.
# One of these constraints is that storage would not be allowed to discharge between 4:00 pm and 9:00 pm.
# No-Charging Constraint
# Charging power in each timestep is set equal to 0 between 4:00 pm and 9:00 pm.
# Because charging power is constrained to be greater than
# zero, setting the sum of all charging power timesteps to 0 (a
# single constraint across all timesteps) ensures that all values will be zero
# without needing to set a constraint for each timestep.
# Sum of all P_ES_in(t) between 4:00 and 9:00 = 0
# Because of nonnegative constraint on P_ES_in(t), this is
# equivalent to a set of numtsteps constraints stating that
# all P_ES_in(t) between 4:00 and 9:00 = 0 for each timestep.
A_PGE_No_Charge = sparse(matrix(0., (1, length_x), tc = 'd'), tc = 'd')
PGE_No_Charge_Hour_Indices = all_tsteps[PGE_No_Charge_Hour_Binary_Data_Month_Padded == 1]
# Sum of all P_ES_in(t) between 4:00 and 9:00
A_PGE_No_Charge[0, PGE_No_Charge_Hour_Indices] = 1.
b_PGE_No_Charge = matrix(0., tc = 'd')
A_Month = sparse([A_Month,
A_PGE_No_Charge], tc = 'd')
b_Month = sparse([b_Month,
b_PGE_No_Charge], tc = 'd')
## Optional Constraint - Charging and Discharging Time Constraints
if GHG_Reduction_Solution_Input == "Charging and Discharging Time Constraints":
# PG&E has suggested a set of time-based constraints on storage charging.
# At least 50% of total charging would need to occur between 9:00 am and 2:00 pm,
# and at least 50% of total discharging would need to occur between 4:00 pm and 9:00 pm.
# In addition, storage would not be allowed to discharge between 4:00 pm and 9:00 pm.
# Derivation of charging constraint in standard linear form Ax <= 0:
# Sum of all P_ES_in(t) between 9:00 and 2:00/sum of all P_ES_in(t) >= 0.5
# Sum of all P_ES_in(t) between 9:00 and 2:00 >= 0.5 * sum of all P_ES_in(t)
# 0 >= 0.5 * sum of all P_ES_in(t) - sum of all P_ES_in(t) between 9:00 and 2:00
# 0.5 * sum of all P_ES_in(t) - sum of all P_ES_in(t) between 9:00 and 2:00 <= 0
# 0.5 * sum of all P_ES_in(t) not between 9:00 and 2:00 - 0.5 * sum of all P_ES_in(t)
# between 9:00 and 2:00 <= 0.
# Charging Constraint
A_PGE_Charge = sparse(matrix(0., (1, length_x), tc = 'd'), tc = 'd')
# 0.5 * sum of all P_ES_in(t)
A_PGE_Charge[0, range(0, numtsteps)] = 0.5
PGE_Charge_Hour_Indices = all_tsteps[PGE_Charge_Hour_Binary_Data_Month_Padded == 1]
# -0.5 * sum of all P_ES_in(t) between 12:00 and 4:00
A_PGE_Charge[0, PGE_Charge_Hour_Indices] = -0.5
b_PGE_Charge = matrix(0., tc = 'd')
A_Month = sparse([A_Month, A_PGE_Charge], tc = 'd')
b_Month = sparse([b_Month, b_PGE_Charge], tc = 'd')
# No-Charging Constraint
# Charging power in each timestep is set equal to 0 between 4:00 pm and 9:00 pm.
# Because charging power is constrained to be greater than
# zero, setting the sum of all charging power timesteps to 0 (a
# single constraint across all timesteps) ensures that all values will be zero
# without needing to set a constraint for each timestep.
# Sum of all P_ES_in(t) between 4:00 and 9:00 = 0
# Because of nonnegative constraint on P_ES_in(t), this is
# equivalent to a set of numtsteps constraints stating that
# all P_ES_in(t) between 4:00 and 9:00 = 0 for each timestep.
A_PGE_No_Charge = sparse(matrix(0., (1, length_x), tc = 'd'), tc = 'd')
PGE_No_Charge_Hour_Indices = all_tsteps[PGE_No_Charge_Hour_Binary_Data_Month_Padded == 1]
# Sum of all P_ES_in(t) between 4:00 and 9:00
A_PGE_No_Charge[0, PGE_No_Charge_Hour_Indices] = 1.
b_PGE_No_Charge = matrix(0., tc = 'd')
A_Month = sparse([A_Month,
A_PGE_No_Charge], tc = 'd')
b_Month = sparse([b_Month,
b_PGE_No_Charge], tc = 'd')
# Derivation of discharging constraint in standard linear form Ax <= 0:
# Sum of all P_ES_out(t) between 4:00 and 9:00/sum of all P_ES_out(t) >= 0.5
# Sum of all P_ES_out(t) between 4:00 and 9:00 >= 0.5 * sum of all P_ES_out(t)
# 0 >= 0.5 * sum of all P_ES_out(t) - sum of all P_ES_out(t) between 4:00 and 9:00
# 0.5 * sum of all P_ES_out(t) - sum of all P_ES_out(t) between 4:00 and 9:00 <= 0
# 0.5 * sum of all P_ES_out(t) not between 4:00 and 9:00 - 0.5 * sum of all P_ES_out(t)
# between 4:00 and 9:00 <= 0.
# Discharging Constraint
A_PGE_Discharge = sparse(matrix(0., (1, length_x), tc = 'd'), tc = 'd')
# 0.5 * sum of all P_ES_out(t)
A_PGE_Discharge[0, range(numtsteps, 2 * numtsteps)] = 0.5
PGE_Discharge_Hour_Indices = all_tsteps[PGE_Discharge_Hour_Binary_Data_Month_Padded == 1]
# -0.5 * sum of all P_ES_out(t) between 12:00 and 4:00
A_PGE_Discharge[0, numtsteps + PGE_Discharge_Hour_Indices] = -0.5
b_PGE_Discharge = matrix(0., tc = 'd')
A_Month = sparse([A_Month,
A_PGE_Discharge], tc = 'd')
b_Month = sparse([b_Month,
b_PGE_Discharge], tc = 'd')
## Optional Constraint - Investor-Owned-Utility-Proposed Charge-Discharge Hours
if GHG_Reduction_Solution_Input == "IOU-Proposed Charge-Discharge Time Constraints":
# The Investor-Owned Utilities have suggested constraints on charging in particular hours
# as a proposed method for reducing greenhouse gas emissions associated with storage dispatch.
# Specifically, at least 50% of total charging would need to occur between 12:00 noon and 4:00 pm,
# and at least 50% of total discharging would need to occur between 4:00 pm and 9:00 pm.
# Derivation of charging constraint in standard linear form Ax <= 0:
# Sum of all P_ES_in(t) between 12:00 and 4:00/sum of all P_ES_in(t) >= 0.5
# Sum of all P_ES_in(t) between 12:00 and 4:00 >= 0.5 * sum of all P_ES_in(t)
# 0 >= 0.5 * sum of all P_ES_in(t) - sum of all P_ES_in(t) between 12:00 and 4:00
# 0.5 * sum of all P_ES_in(t) - sum of all P_ES_in(t) between 12:00 and 4:00 <= 0
# 0.5 * sum of all P_ES_in(t) not between 12:00 and 4:00 - 0.5 * sum of all P_ES_in(t)
# between 12:00 and 4:00 <= 0.
# Charging Constraint
A_IOU_Charge = sparse(matrix(0., (1, length_x), tc = 'd'), tc = 'd')
# 0.5 * sum of all P_ES_in(t)
A_IOU_Charge[1, range(0, numtsteps)] = 0.5
IOU_Charge_Hour_Indices = all_tsteps[IOU_Charge_Hour_Binary_Data_Month_Padded == 1]
# -0.5 * sum of all P_ES_in(t) between 12:00 and 4:00
A_IOU_Charge[0, IOU_Charge_Hour_Indices] = -0.5
b_IOU_Charge = matrix(0., tc = 'd')
A_Month = sparse([A_Month,
A_IOU_Charge], tc = 'd')
b_Month = sparse([b_Month,
b_IOU_Charge], tc = 'd')
# Derivation of discharging constraint in standard linear form Ax <= 0:
# Sum of all P_ES_out(t) between 4:00 and 9:00/sum of all P_ES_out(t) >= 0.5
# Sum of all P_ES_out(t) between 4:00 and 9:00 >= 0.5 * sum of all P_ES_out(t)
# 0 >= 0.5 * sum of all P_ES_out(t) - sum of all P_ES_out(t) between 4:00 and 9:00
# 0.5 * sum of all P_ES_out(t) - sum of all P_ES_out(t) between 4:00 and 9:00 <= 0
# 0.5 * sum of all P_ES_out(t) not between 4:00 and 9:00 - 0.5 * sum of all P_ES_out(t)
# between 4:00 and 9:00 <= 0.
# Discharging Constraint
A_IOU_Discharge = sparse(matrix(0., (1, length_x)))
# 0.5 * sum of all P_ES_out(t)
A_IOU_Discharge[0, range(numtsteps, 2 * numtsteps)] = 0.5
IOU_Discharge_Hour_Indices = all_tsteps[IOU_Discharge_Hour_Binary_Data_Month_Padded == 1]
# -0.5 * sum of all P_ES_out(t) between 12:00 and 4:00
A_IOU_Discharge[0, numtsteps + IOU_Discharge_Hour_Indices] = -0.5
b_IOU_Discharge = matrix(0., tc = 'd')
A_Month = sparse([A_Month,
A_IOU_Discharge], tc = 'd')
b_Month = sparse([b_Month,
b_IOU_Discharge], tc = 'd')
## Optional Constraint - Non-Positive GHG Emissions Impact
# Note - the system is following the forecast signal to obey
# this constraint, not the evaluation signal. It may be necessary
# to adjust this constraint to aim for a negative GHG impact
# based on the forecast signal, in order to achieve a non-positive
# GHG impact as measured by the evaluation signal.
if GHG_Reduction_Solution_Input == "Non-Positive GHG Constraint":
# The sum of the net battery charge/discharge load in each
# timestep, multiplied by the marginal emissions rate in each
# timestep, must be less than or equal to 0.
# A_Non_Positive_GHG is similar to c_Month_Carbon_Only,
# but with Marginal Emissions Rate Data instead of Carbon Adder Data and transposed.
A_Non_Positive_GHG = matrix(np.concatenate((np.reshape(Marginal_Emissions_Rate_Data_Month_Padded * delta_t, (1, len(Marginal_Emissions_Rate_Data_Month_Padded))), \
np.reshape(-Marginal_Emissions_Rate_Data_Month_Padded * delta_t, (1, len(Marginal_Emissions_Rate_Data_Month_Padded))), \
np.zeros((1, numtsteps)), \
np.reshape(np.array([0., 0., 0.]), (1, 3))), \
axis = 1))
b_Non_Positive_GHG = matrix(0., tc = 'd')
A_Month = sparse([A_Month, A_Non_Positive_GHG], tc = 'd')
b_Month = sparse([b_Month, b_Non_Positive_GHG], tc = 'd')
## Optional Constraint - Equivalent Cycling Constraint
# Note: due to the OSESMO model structure, the annual cycling requirement
# must be converted to an equivalent monthly cycling requirement.
if Equivalent_Cycling_Constraint_Input > 0:
SGIP_Monthly_Cycling_Requirement = Equivalent_Cycling_Constraint_Input * \
(len(Load_Profile_Data_Month_Padded) / len(Load_Profile_Data))
# Formula for equivalent cycles is identical to the one used to calculate Cycles_Month:
# Equivalent Cycles = sum((P_ES_in(t) * (((Eff_c)/(2 * Size_ES)) * delta_t)) + \
# (P_ES_out(t) * ((1/(Eff_d * 2 * Size_ES)) * delta_t)))
# Equivalent Cycles >= SGIP_Monthly_Cycling Requirement
# To convert to standard linear program form, multiply both sides by -1.
# -Equivalent Cycles <= -SGIP_Monthly_Cycling_Requirement
A_Equivalent_Cycles = sparse(matrix(0., (1, length_x), tc = 'd'), tc = 'd')
# sum of all P_ES_in(t) * (((Eff_c)/(2 * Size_ES)) * delta_t)
A_Equivalent_Cycles[0, range(0, numtsteps)] = -(((Eff_c) / (2 * Total_Storage_Capacity)) * delta_t)
# sum of all P_ES_out(t) * ((1/(Eff_d * 2 * Size_ES)) * delta_t)
A_Equivalent_Cycles[0, range(numtsteps, 2 * numtsteps)] = -((1 / (Eff_d * 2 * Total_Storage_Capacity)) * delta_t)
b_Equivalent_Cycles = matrix(-SGIP_Monthly_Cycling_Requirement, tc = 'd')
A_Month = sparse([A_Month,
A_Equivalent_Cycles], tc = 'd')
b_Month = sparse([b_Month,
b_Equivalent_Cycles], tc = 'd')
## Optional Constraint - Operational/SGIP Round-Trip Efficiency Constraint
# Note: due to the OSESMO model structure, the annual RTE requirement
# must be converted to an equivalent monthly RTE requirement.
if Annual_RTE_Constraint_Input > 0:
# If it's impossible for the storage system to achieve the RTE requirement
# even if it were constantly cycling, stop the model.
if (Eff_c * Eff_d * Storage_Power_Rating_Input) / (
Storage_Power_Rating_Input + Parasitic_Storage_Load) < Annual_RTE_Constraint_Input:
print(['No solution - could not achieve SGIP RTE requirement' \
' with the provided nameplate efficiency and auxiliary storage load values.'])
# Formula for Annual Operational/SGIP round-trip efficiency is identical to the one
# used to calculate Operational_RTE_Percent:
# Operational_RTE_Percent = (sum(P_ES_out) * delta_t)/(sum(P_ES_in) * delta_t)
# Note that Auxiliary_Storage_Load has to be added to P_ES_in here.
# During the calculation of Operational_RTE_Percent, it has already
# been added previously, so it does not need to be included in the
# formula the way it is here.
# "The Commission concluded that storage devices should demonstrate
# an average RTE of at least 66.5% over ten years (equivalent to a
# first-year RTE of 69.6%) in order to qualify for SGIP incentive
# payments." (Stem, Inc.'s Petition for Modification of Decision 15-11-027, pg. 2)
# Operational RTE Percent >= 0.696
# (sum(P_ES_out) * delta_t)/((sum(P_ES_in) * delta_t) + (sum(Auxiliary_Storage_Load) * delta_t) >= 0.696
# (sum(P_ES_out) * delta_t) >= 0.696 * (sum(P_ES_in) * delta_t) + (sum(Auxiliary_Storage_Load) * delta_t)
# To convert to standard linear program form, multiply both sides by -1.
# -(sum(P_ES_out) * delta_t) <= -0.696 * (sum(P_ES_in) * delta_t) -(sum(Auxiliary_Storage_Load) * delta_t)
# -(sum(P_ES_out) * delta_t) + 0.696 * (sum(P_ES_in) * delta_t) <= -(sum(Auxiliary_Storage_Load) * delta_t)
# 0.696 * (sum(P_ES_in) * delta_t) -(sum(P_ES_out) * delta_t) <= -(sum(Auxiliary_Storage_Load) * delta_t)
A_SGIP_RTE = sparse(matrix(0., (1, length_x), tc = 'd'), tc = 'd')
# sum of all (P_ES_in(t) * (0.696 * delta_t)
A_SGIP_RTE[0, range(0, numtsteps)] = (Annual_RTE_Constraint_Input * delta_t)
# sum of all P_ES_out(t) * -delta_t
A_SGIP_RTE[0, range(numtsteps, 2 * numtsteps)] = -delta_t
# (sum(Auxiliary_Storage_Load) * delta_t)
b_SGIP_RTE = matrix(-((numtsteps * Parasitic_Storage_Load) * delta_t), tc = 'd')
A_Month = sparse([A_Month,
A_SGIP_RTE], tc = 'd')
b_Month = sparse([b_Month,
b_SGIP_RTE], tc = 'd')
## Optional Constraint - No-Export Constraint
# This constraint prevents the standalone energy-storage systems from
# backfeeding power from the storage system onto the distribution grid.
# Solar-plus storage systems are allowed to export to the grid.
if Model_Type_Input == "Storage Only":
# P_load(t) + P_ES_in(t) - P_ES_out(t) >= 0
# -P_ES_in(t) + P_ES_out(t) <= P_load(t)
A_No_Export = sparse(matrix(0., (numtsteps, length_x), tc = 'd'), tc = 'd')
b_No_Export = matrix(Load_Profile_Data_Month_Padded, tc = 'd')
for n in range(0, numtsteps): # Iterates from Index 0 to Index (numtsteps-1) - equivalent to Timesteps 1 to (numtsteps)
A_No_Export[n, n] = -1.
A_No_Export[n, n + numtsteps] = 1.
A_Month = sparse([A_Month,
A_No_Export], tc = 'd')
b_Month = sparse([b_Month,
b_No_Export], tc = 'd')
## Optional Constraint - Solar Self-Supply
# In the Economic Dispatch mode, this constraint is not necessary -
# the presence of a positive cost on battery charging ensures that
# simultaneous charging and discharging does not occur.
# However, in the Non-Economic Solar Self-Consumption, which negative
# costs on both charging and discharging, the battery charges and
# discharges simultaneously so as to minimize total cost.
# This constraint ensures that simultaneous charging and
# discharging does not occur, and ensures that the storage system
# only charges when there is excess solar power (net load is negative)
# and discharges when net load is positive.
if Storage_Control_Algorithm_Name == "OSESMO Non-Economic Solar Self-Supply":
# P_ES_in <= Non-negative(P_PV - P_Load)
Excess_Solar_Profile_Data_Month_Padded = Solar_PV_Profile_Data_Month_Padded - Load_Profile_Data_Month_Padded
Excess_Solar_Profile_Data_Month_Padded[Excess_Solar_Profile_Data_Month_Padded < 0] = 0
A_Self_Supply_Charge = sparse(matrix(0., (numtsteps, length_x), tc = 'd'), tc = 'd')
b_Self_Supply_Charge = matrix(Excess_Solar_Profile_Data_Month_Padded, tc = 'd')
for n in range(0, numtsteps): # Iterates from Index 0 to Index (numtsteps-1) - equivalent to Timesteps 1 to (numtsteps)
A_Self_Supply_Charge[n, n] = 1.
A_Month = sparse([A_Month,
A_Self_Supply_Charge], tc = 'd')
b_Month = sparse([b_Month,
b_Self_Supply_Charge], tc = 'd')
# P_ES_out <= Non-negative(P_Load - P_PV)
Non_Negative_Net_Load_Profile_Data_Month_Padded = Load_Profile_Data_Month_Padded - Solar_PV_Profile_Data_Month_Padded
Non_Negative_Net_Load_Profile_Data_Month_Padded[Non_Negative_Net_Load_Profile_Data_Month_Padded < 0] = 0
A_Self_Supply_Discharge = sparse(matrix(0., (numtsteps, length_x), tc = 'd'), tc = 'd')
b_Self_Supply_Discharge = Non_Negative_Net_Load_Profile_Data_Month_Padded
for n in range(0, numtsteps): # Iterates from Index 0 to Index (numtsteps-1) - equivalent to Timesteps 1 to (numtsteps)
A_Self_Supply_Discharge[n, n + numtsteps] = 1.
A_Month = sparse([A_Month,
A_Self_Supply_Discharge], tc = 'd')
b_Month = sparse([b_Month,
b_Self_Supply_Discharge], tc = 'd')
## Run LP Optimization Algorithm
# Check that number of rows in A_Month.size == number of rows in b_Month.size
# Check that A_Month.typecode, b_Month.typecode, c_Month.typecode == 'd'
b_Month = matrix(b_Month, tc = 'd') # Convert from sparse to dense matrix
lp_solution = solvers.lp(c_Month, A_Month, b_Month)
x_Month = lp_solution['x']
print("Optimization complete for Month %d." % Month_Iter)
## Separate Decision Variable Vectors
x_Month = np.asarray(x_Month)
P_ES_in_Month_Padded = x_Month[range(0, numtsteps)]
P_ES_out_Month_Padded = x_Month[range(numtsteps, 2 * numtsteps)]
Ene_Lvl_Month_Padded = x_Month[range(2 * numtsteps, 3 * numtsteps)]
## Add Auxiliary Load/Parasitic Losses to P_ES_in
P_ES_in_Month_Padded = P_ES_in_Month_Padded + Parasitic_Storage_Load
## Remove "Padding" from Decision Variables
# Data is padded in Months 1-11, and not in Month 12
if Month_Iter in range(1, 12):
P_ES_in_Month_Unpadded = P_ES_in_Month_Padded[range(0, (len(P_ES_in_Month_Padded)-int(End_of_Month_Padding_Days * 24 * (1 / delta_t))))]
P_ES_out_Month_Unpadded = P_ES_out_Month_Padded[range(0, (len(P_ES_out_Month_Padded)-int(End_of_Month_Padding_Days * 24 * (1 / delta_t))))]
Ene_Lvl_Month_Unpadded = Ene_Lvl_Month_Padded[range(0, (len(Ene_Lvl_Month_Padded)-int(End_of_Month_Padding_Days * 24 * (1 / delta_t))))]
elif Month_Iter == 12:
P_ES_in_Month_Unpadded = P_ES_in_Month_Padded
P_ES_out_Month_Unpadded = P_ES_out_Month_Padded
Ene_Lvl_Month_Unpadded = Ene_Lvl_Month_Padded
# Save Final Energy Level of Battery for use in next month
Previous_Month_Final_Energy_Level = Ene_Lvl_Month_Unpadded[-1,0]
Next_Month_Initial_Energy_Level = Previous_Month_Final_Energy_Level + \
((Eff_c * P_ES_in_Month_Unpadded[-1,0]) - \
((1 / Eff_d) * P_ES_out_Month_Unpadded[-1,0])) * delta_t
## Calculate Monthly Peak Demand Using 15-Minute Intervals
# Demand Charges are Based on 15-minute interval periods.
# If the model has 15-minute timestep resolution, the decision
# variables can be used directly as maximum coincident and noncoincident demand values.
# Otherwise (such as with 5-minute timestep resolution), maximum
# demand must be calculated by taking 15-minute averages of the
# demand values, and then calculating the maximum of these averages.
if delta_t < (15 / 60):
# Noncoincident Maximum Demand With and Without Solar and Storage
# Create Net Load Profile After Solar Only
Solar_Only_Net_Load_Profile_Data_Month_5_Min = (Load_Profile_Data_Month - Solar_PV_Profile_Data_Month)
# Create Net Load Profile After Solar and Storage
Solar_Storage_Net_Load_Profile_Data_Month_5_Min = (Load_Profile_Data_Month - Solar_PV_Profile_Data_Month + \
P_ES_in_Month_Unpadded - P_ES_out_Month_Unpadded)
# Number of timesteps to average to get 15-minute net load data.
Reshaped_Rows_Num = int((15 / 60) / delta_t)
# Reshape load data so that each 15-minute increment's data
# is in the same column. This creates an array with 3 rows for 5-minute data.
Load_Profile_Data_Month_Reshaped = np.reshape(Load_Profile_Data_Month, \
(Reshaped_Rows_Num, len(Load_Profile_Data_Month) / Reshaped_Rows_Num))
Solar_Only_Net_Load_Profile_Data_Month_5_Min_Reshaped = np.reshape(Solar_Only_Net_Load_Profile_Data_Month_5_Min, \
(Reshaped_Rows_Num, len(Solar_Only_Net_Load_Profile_Data_Month_5_Min) / Reshaped_Rows_Num))
Solar_Storage_Net_Load_Profile_Data_Month_5_Min_Reshaped = np.reshape(Solar_Storage_Net_Load_Profile_Data_Month_5_Min, \
(Reshaped_Rows_Num, len(Solar_Storage_Net_Load_Profile_Data_Month_5_Min) / Reshaped_Rows_Num))
# Create 15-minute load profiles by calculating the average of each column.
Load_Profile_Data_Month_15_Min = np.mean(Load_Profile_Data_Month_Reshaped, 1)
Solar_Only_Net_Load_Profile_Data_Month_15_Min = np.mean(Solar_Only_Net_Load_Profile_Data_Month_5_Min_Reshaped, 1)
Solar_Storage_Net_Load_Profile_Data_Month_15_Min = np.mean(Solar_Storage_Net_Load_Profile_Data_Month_5_Min_Reshaped, 1)
# Calculate Noncoincident Maximum Demand
P_max_NC_Month_Baseline = np.max(Load_Profile_Data_Month_15_Min)
P_max_NC_Month_with_Solar_Only = np.max(Solar_Only_Net_Load_Profile_Data_Month_15_Min)
P_max_NC_Month_with_Solar_and_Storage = np.max(Solar_Storage_Net_Load_Profile_Data_Month_15_Min)
# Coincident Peak Demand With and Without Storage
if Peak_DC > 0:
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
# Create Coincident Peak Load and Net Load Profiles
CPK_Load_Profile_Data_Month = Load_Profile_Data_Month[Summer_Peak_Binary_Data_Month == 1]
CPK_Solar_Only_Net_Load_Profile_Data_Month_5_Min = Solar_Only_Net_Load_Profile_Data_Month_5_Min[Summer_Peak_Binary_Data_Month == 1]
CPK_Solar_Storage_Net_Load_Profile_Data_Month_5_Min = Solar_Storage_Net_Load_Profile_Data_Month_5_Min[Summer_Peak_Binary_Data_Month == 1]
else:
# Create Coincident Peak Load and Net Load Profiles
CPK_Load_Profile_Data_Month = Load_Profile_Data_Month[Winter_Peak_Binary_Data_Month == 1]
CPK_Solar_Only_Net_Load_Profile_Data_Month_5_Min = Solar_Only_Net_Load_Profile_Data_Month_5_Min[Winter_Peak_Binary_Data_Month == 1]
CPK_Solar_Storage_Net_Load_Profile_Data_Month_5_Min = Solar_Storage_Net_Load_Profile_Data_Month_5_Min[Winter_Peak_Binary_Data_Month == 1]
# Reshape load data so that each 15-minute increment's data
# is in the same column. This creates an array with 3 rows for 5-minute data.
CPK_Load_Profile_Data_Month_Reshaped = np.reshape(CPK_Load_Profile_Data_Month, \
(Reshaped_Rows_Num, len(CPK_Load_Profile_Data_Month) / Reshaped_Rows_Num))
CPK_Solar_Only_Net_Load_Profile_Data_Month_5_Min_Reshaped = np.reshape(CPK_Solar_Only_Net_Load_Profile_Data_Month_5_Min, \
(Reshaped_Rows_Num, len(CPK_Solar_Only_Net_Load_Profile_Data_Month_5_Min) / Reshaped_Rows_Num))
CPK_Solar_Storage_Net_Load_Profile_Data_Month_5_Min_Reshaped = np.reshape(CPK_Solar_Storage_Net_Load_Profile_Data_Month_5_Min, \
(Reshaped_Rows_Num, len(CPK_Solar_Storage_Net_Load_Profile_Data_Month_5_Min) / Reshaped_Rows_Num))
# Create 15-minute load profiles by calculating the average of each column.
CPK_Load_Profile_Data_Month_15_Min = np.mean(CPK_Load_Profile_Data_Month_Reshaped, 1)
CPK_Solar_Only_Net_Load_Profile_Data_Month_15_Min = np.mean(CPK_Solar_Only_Net_Load_Profile_Data_Month_5_Min_Reshaped, 1)
CPK_Solar_Storage_Net_Load_Profile_Data_Month_15_Min = np.mean(CPK_Solar_Storage_Net_Load_Profile_Data_Month_5_Min_Reshaped, 1)
# Calculate Coincident Peak Demand
P_max_CPK_Month_Baseline = np.max(CPK_Load_Profile_Data_Month_15_Min)
P_max_CPK_Month_with_Solar_Only = np.max(CPK_Solar_Only_Net_Load_Profile_Data_Month_15_Min)
P_max_CPK_Month_with_Solar_and_Storage = np.max(CPK_Solar_Storage_Net_Load_Profile_Data_Month_15_Min)
else:
# If there is no Coincident Peak Demand Period (or if the
# corresponding demand charge is $0/kW), set P_max_CPK to 0 kW.
P_max_CPK_Month_Baseline = 0
P_max_CPK_Month_with_Solar_Only = 0
P_max_CPK_Month_with_Solar_and_Storage = 0
# Coincident Part-Peak Demand With and Without Storage
if Part_Peak_DC > 0:
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
# Create Coincident Part-Peak Load and Net Load Profiles
CPP_Load_Profile_Data_Month = Load_Profile_Data_Month[Summer_Part_Peak_Binary_Data_Month == 1]
CPP_Solar_Only_Net_Load_Profile_Data_Month_5_Min = Solar_Only_Net_Load_Profile_Data_Month_5_Min[Summer_Part_Peak_Binary_Data_Month == 1]
CPP_Solar_Storage_Net_Load_Profile_Data_Month_5_Min = Solar_Storage_Net_Load_Profile_Data_Month_5_Min[Summer_Part_Peak_Binary_Data_Month == 1]
else:
# Create Coincident Part-Peak Load and Net Load Profiles
CPP_Load_Profile_Data_Month = Load_Profile_Data_Month[Winter_Part_Peak_Binary_Data_Month == 1]
CPP_Solar_Only_Net_Load_Profile_Data_Month_5_Min = Solar_Only_Net_Load_Profile_Data_Month_5_Min[Winter_Part_Peak_Binary_Data_Month == 1]
CPP_Solar_Storage_Net_Load_Profile_Data_Month_5_Min = Solar_Storage_Net_Load_Profile_Data_Month_5_Min[Winter_Part_Peak_Binary_Data_Month == 1]
# Reshape load data so that each 15-minute increment's data
# is in the same column. This creates an array with 3 rows for 5-minute data.
Coincident_Part_Peak_Load_Profile_Data_Month_Reshaped = np.reshape(CPP_Load_Profile_Data_Month, \
(Reshaped_Rows_Num, len(CPP_Load_Profile_Data_Month) / Reshaped_Rows_Num))
CPP_Solar_Only_Net_Load_Profile_Data_Month_5_Min_Reshaped = np.reshape(CPP_Solar_Only_Net_Load_Profile_Data_Month_5_Min, \
(Reshaped_Rows_Num, len(CPP_Solar_Only_Net_Load_Profile_Data_Month_5_Min) / Reshaped_Rows_Num))
CPP_Solar_Storage_Net_Load_Profile_Data_Month_5_Min_Reshaped = np.reshape(CPP_Solar_Storage_Net_Load_Profile_Data_Month_5_Min, \
(Reshaped_Rows_Num, len(CPP_Solar_Storage_Net_Load_Profile_Data_Month_5_Min) / Reshaped_Rows_Num))
# Create 15-minute load profiles by calculating the average of each column.
CPP_Load_Profile_Data_Month_15_Min = np.mean(Coincident_Part_Peak_Load_Profile_Data_Month_Reshaped, 1)
CPP_Solar_Only_Net_Load_Profile_Data_Month_15_Min = np.mean(CPP_Solar_Only_Net_Load_Profile_Data_Month_5_Min_Reshaped, 1)
CPP_Solar_Storage_Net_Load_Profile_Data_Month_15_Min = np.mean(CPP_Solar_Storage_Net_Load_Profile_Data_Month_5_Min_Reshaped, 1)
# Calculate Coincident Part-Peak Demand
P_max_CPP_Month_Baseline = np.max(CPP_Load_Profile_Data_Month_15_Min)
P_max_CPP_Month_with_Solar_Only = np.max(CPP_Solar_Only_Net_Load_Profile_Data_Month_15_Min)
P_max_CPP_Month_with_Solar_and_Storage = np.max(CPP_Solar_Storage_Net_Load_Profile_Data_Month_15_Min)
else:
# If there is no Coincident Part-Peak Demand Period (or if the
# corresponding demand charge is $0/kW), set P_max_CPP to 0 kW.
P_max_CPP_Month_Baseline = 0
P_max_CPP_Month_with_Solar_Only = 0
P_max_CPP_Month_with_Solar_and_Storage = 0
elif delta_t == (60 / 60):
# Noncoincident Maximum Demand With and Without Storage
P_max_NC_Month_Baseline = np.max(Load_Profile_Data_Month)
P_max_NC_Month_with_Solar_Only = np.max(Load_Profile_Data_Month - Solar_PV_Profile_Data_Month)
P_max_NC_Month_with_Solar_and_Storage = x_Month[3 * numtsteps, 0]
# Coincident Peak Demand With and Without Storage
if Peak_DC > 0:
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
P_max_CPK_Month_Baseline = np.max(Load_Profile_Data_Month[Summer_Peak_Binary_Data_Month == 1])
P_max_CPK_Month_with_Solar_Only = np.max(Load_Profile_Data_Month[Summer_Peak_Binary_Data_Month == 1] - \
Solar_PV_Profile_Data_Month[Summer_Peak_Binary_Data_Month == 1])
else:
P_max_CPK_Month_Baseline = np.max(Load_Profile_Data_Month[Winter_Peak_Binary_Data_Month == 1])
P_max_CPK_Month_with_Solar_Only = np.max(Load_Profile_Data_Month[Winter_Peak_Binary_Data_Month == 1] - \
Solar_PV_Profile_Data_Month[Winter_Peak_Binary_Data_Month == 1])
P_max_CPK_Month_with_Solar_and_Storage = x_Month[3 * numtsteps + 1, 0]
else:
# If there is no Coincident Peak Demand Period (or if the
# corresponding demand charge is $0/kW), set P_max_CPK to 0 kW.
P_max_CPK_Month_Baseline = 0
P_max_CPK_Month_with_Solar_Only = 0
P_max_CPK_Month_with_Solar_and_Storage = 0
# Coincident Part-Peak Demand With and Without Storage
if Part_Peak_DC > 0:
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
P_max_CPP_Month_Baseline = np.max(Load_Profile_Data_Month[Summer_Part_Peak_Binary_Data_Month == 1])
P_max_CPP_Month_with_Solar_Only = np.max(Load_Profile_Data_Month[Summer_Part_Peak_Binary_Data_Month == 1] - \
Solar_PV_Profile_Data_Month[Summer_Part_Peak_Binary_Data_Month == 1])
else:
P_max_CPP_Month_Baseline = np.max(Load_Profile_Data_Month[Winter_Part_Peak_Binary_Data_Month == 1])
P_max_CPP_Month_with_Solar_Only = np.max(Load_Profile_Data_Month[Winter_Part_Peak_Binary_Data_Month == 1] - \
Solar_PV_Profile_Data_Month[Winter_Part_Peak_Binary_Data_Month == 1])
P_max_CPP_Month_with_Solar_and_Storage = x_Month[3 * numtsteps + 2, 0]
else:
# If there is no Coincident Part-Peak Demand Period (or if the
# corresponding demand charge is $0/kW), set P_max_CPP to 0 kW.
P_max_CPP_Month_Baseline = 0
P_max_CPP_Month_with_Solar_Only = 0
P_max_CPP_Month_with_Solar_and_Storage = 0
else:
print('Timestep is larger than 15 minutes. Cannot properly calculate billing demand.')
## Calculate Monthly Bill Cost with and Without Storage
# Monthly Cost from Daily Fixed Charge
# This value is not affected by the presence of storage.
Fixed_Charge_Month = Fixed_Per_Meter_Month_Charge + (
Fixed_Per_Meter_Day_Charge * len(Load_Profile_Data_Month) / (24 * (1 / delta_t)))
# Monthly Cost from Noncoincident Demand Charge - Baseline
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
NC_Demand_Charge_Month_Baseline = Summer_Noncoincident_DC * P_max_NC_Month_Baseline
else:
NC_Demand_Charge_Month_Baseline = Winter_Noncoincident_DC * P_max_NC_Month_Baseline
# Monthly Cost from Noncoincident Demand Charge - With Solar Only
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
NC_Demand_Charge_Month_with_Solar_Only = Summer_Noncoincident_DC * P_max_NC_Month_with_Solar_Only
else:
NC_Demand_Charge_Month_with_Solar_Only = Winter_Noncoincident_DC * P_max_NC_Month_with_Solar_Only
# Monthly Cost from Noncoincident Demand Charge - With Solar and Storage
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
NC_Demand_Charge_Month_with_Solar_and_Storage = Summer_Noncoincident_DC * P_max_NC_Month_with_Solar_and_Storage
else:
NC_Demand_Charge_Month_with_Solar_and_Storage = Winter_Noncoincident_DC * P_max_NC_Month_with_Solar_and_Storage
# Monthly Cost from Coincident Peak Demand Charge - Baseline
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
CPK_Demand_Charge_Month_Baseline = Summer_Peak_DC * P_max_CPK_Month_Baseline
else:
CPK_Demand_Charge_Month_Baseline = Winter_Peak_DC * P_max_CPK_Month_Baseline
# Monthly Cost from Coincident Peak Demand Charge - With Solar Only
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
CPK_Demand_Charge_Month_with_Solar_Only = Summer_Peak_DC * P_max_CPK_Month_with_Solar_Only
else:
CPK_Demand_Charge_Month_with_Solar_Only = Winter_Peak_DC * P_max_CPK_Month_with_Solar_Only
# Monthly Cost from Coincident Peak Demand Charge - With Solar and Storage
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
CPK_Demand_Charge_Month_with_Solar_and_Storage = Summer_Peak_DC * P_max_CPK_Month_with_Solar_and_Storage
else:
CPK_Demand_Charge_Month_with_Solar_and_Storage = Winter_Peak_DC * P_max_CPK_Month_with_Solar_and_Storage
# Monthly Cost from Coincident Part-Peak Demand Charge - Baseline
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
CPP_Demand_Charge_Month_Baseline = Summer_Part_Peak_DC * P_max_CPP_Month_Baseline
else:
CPP_Demand_Charge_Month_Baseline = Winter_Part_Peak_DC * P_max_CPP_Month_Baseline
# Monthly Cost from Coincident Part-Peak Demand Charge - With Solar Only
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
CPP_Demand_Charge_Month_with_Solar_Only = Summer_Part_Peak_DC * P_max_CPP_Month_with_Solar_Only
else:
CPP_Demand_Charge_Month_with_Solar_Only = Winter_Part_Peak_DC * P_max_CPP_Month_with_Solar_Only
# Monthly Cost from Coincident Part-Peak Demand Charge - With Solar and Storage
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
CPP_Demand_Charge_Month_with_Solar_and_Storage = Summer_Part_Peak_DC * P_max_CPP_Month_with_Solar_and_Storage
else:
CPP_Demand_Charge_Month_with_Solar_and_Storage = Winter_Part_Peak_DC * P_max_CPP_Month_with_Solar_and_Storage
# Monthly Cost from Volumetric Energy Rates - Baseline
Energy_Charge_Month_Baseline = np.dot(np.transpose(Load_Profile_Data_Month), Volumetric_Rate_Data_Month) * delta_t
# Monthly Cost from Volumetric Energy Rates - With Solar Only
Solar_Only_Net_Load_Profile_Month = Load_Profile_Data_Month - Solar_PV_Profile_Data_Month
Energy_Charge_Month_with_Solar_Only = np.dot(np.transpose(Solar_Only_Net_Load_Profile_Month), Volumetric_Rate_Data_Month) * delta_t
# Monthly Cost from Volumetric Energy Rates - With Solar and Storage
Solar_Storage_Net_Load_Profile_Month = Load_Profile_Data_Month - Solar_PV_Profile_Data_Month + np.transpose(P_ES_in_Month_Unpadded) - np.transpose(P_ES_out_Month_Unpadded)
Energy_Charge_Month_with_Solar_and_Storage = np.dot(Solar_Storage_Net_Load_Profile_Month, np.reshape(Volumetric_Rate_Data_Month, (len(Volumetric_Rate_Data_Month), 1))) * delta_t
Energy_Charge_Month_with_Solar_and_Storage = Energy_Charge_Month_with_Solar_and_Storage[0, 0] # Convert from single-value array to double
# Monthly Cycling Penalty
Cycles_Month = np.sum((P_ES_in_Month_Unpadded * (((Eff_c) / (2 * Total_Storage_Capacity)) * delta_t)) + \
(P_ES_out_Month_Unpadded * ((1 / (Eff_d * 2 * Total_Storage_Capacity)) * delta_t)))
Cycling_Penalty_Month = np.sum((P_ES_in_Month_Unpadded * (((Eff_c * cycle_pen) / (2 * Total_Storage_Capacity)) * delta_t)) + \
(P_ES_out_Month_Unpadded * ((cycle_pen / (Eff_d * 2 * Total_Storage_Capacity)) * delta_t)))
## Update Battery Capacity Based on Monthly Cycling
# This is to account for capacity fade in lithium-ion batteries.
# Based on standard definitions of battery cycle life, lithium-ion batteries are
# defined to have experienced capacity fade to 80% of its original
# capacity by the of its cycle life.
# Flow batteries do not experience capacity fade.
if Storage_Type_Input == "Lithium-Ion Battery":
Usable_Storage_Capacity = Usable_Storage_Capacity - (Usable_Storage_Capacity_Input * (Cycles_Month / Cycle_Life) * 0.2)
elif Storage_Type_Input == "Flow Battery":
Usable_Storage_Capacity = Usable_Storage_Capacity
# Update Previous Month Final Energy Level to account for capacity fade, if battery is full at
# of month. Otherwise, optimization is infeasible.
if Next_Month_Initial_Energy_Level > Usable_Storage_Capacity:
Next_Month_Initial_Energy_Level = Usable_Storage_Capacity
## Concatenate Decision Variable & Monthly Cost Values from Month Iteration
# Decision Variable Concatenation
P_ES_in = np.concatenate((P_ES_in, P_ES_in_Month_Unpadded)) if P_ES_in.size != 0 else P_ES_in_Month_Unpadded
P_ES_out = np.concatenate((P_ES_out, P_ES_out_Month_Unpadded)) if P_ES_out.size != 0 else P_ES_out_Month_Unpadded
Ene_Lvl = np.concatenate((Ene_Lvl, Ene_Lvl_Month_Unpadded)) if Ene_Lvl.size != 0 else Ene_Lvl_Month_Unpadded
P_max_NC = np.concatenate((P_max_NC, np.asarray(P_max_NC_Month_with_Solar_and_Storage).reshape((-1,1)))) if P_max_NC.size != 0 else np.asarray(P_max_NC_Month_with_Solar_and_Storage).reshape((-1,1))
P_max_peak = np.concatenate((P_max_peak, np.asarray(P_max_CPK_Month_with_Solar_and_Storage).reshape((-1, 1)))) if P_max_peak.size != 0 else np.asarray(P_max_CPK_Month_with_Solar_and_Storage).reshape((-1, 1))
P_max_part_peak = np.concatenate((P_max_part_peak, np.asarray(P_max_CPP_Month_with_Solar_and_Storage).reshape((-1, 1)))) if P_max_part_peak.size != 0 else np.asarray(P_max_CPP_Month_with_Solar_and_Storage).reshape((-1, 1))
# Monthly Cost Variable Concatenation
Fixed_Charge_Vector = np.concatenate((Fixed_Charge_Vector, np.asarray(Fixed_Charge_Month).reshape((-1,1)))) if Fixed_Charge_Vector.size != 0 else np.asarray(Fixed_Charge_Month).reshape((-1,1))
NC_DC_Baseline_Vector = np.concatenate((NC_DC_Baseline_Vector,
np.asarray(NC_Demand_Charge_Month_Baseline).reshape((-1, 1)))) if NC_DC_Baseline_Vector.size != 0 else np.asarray(NC_Demand_Charge_Month_Baseline).reshape((-1,1))
NC_DC_with_Solar_Only_Vector = np.concatenate((NC_DC_with_Solar_Only_Vector,
np.asarray(NC_Demand_Charge_Month_with_Solar_Only).reshape((-1, 1)))) if NC_DC_with_Solar_Only_Vector.size != 0 else np.asarray(NC_Demand_Charge_Month_with_Solar_Only).reshape((-1,1))
NC_DC_with_Solar_and_Storage_Vector = np.concatenate((NC_DC_with_Solar_and_Storage_Vector,
np.asarray(
NC_Demand_Charge_Month_with_Solar_and_Storage).reshape((-1, 1)))) if NC_DC_with_Solar_and_Storage_Vector.size != 0 else \
np.asarray(NC_Demand_Charge_Month_with_Solar_and_Storage).reshape((-1,1))
CPK_DC_Baseline_Vector = np.concatenate((CPK_DC_Baseline_Vector,
np.asarray(CPK_Demand_Charge_Month_Baseline).reshape((-1, 1)))) if CPK_DC_Baseline_Vector.size != 0 else np.asarray(CPK_Demand_Charge_Month_Baseline).reshape((-1,1))
CPK_DC_with_Solar_Only_Vector = np.concatenate((CPK_DC_with_Solar_Only_Vector,
np.asarray(CPK_Demand_Charge_Month_with_Solar_Only).reshape((-1, 1)))) if CPK_DC_with_Solar_Only_Vector.size != 0 else np.asarray(CPK_Demand_Charge_Month_with_Solar_Only).reshape((-1,1))
CPK_DC_with_Solar_and_Storage_Vector = np.concatenate((CPK_DC_with_Solar_and_Storage_Vector,
np.asarray(
CPK_Demand_Charge_Month_with_Solar_and_Storage).reshape((-1, 1)))) if CPK_DC_with_Solar_and_Storage_Vector.size != 0 else \
np.asarray(CPK_Demand_Charge_Month_with_Solar_and_Storage).reshape((-1,1))
CPP_DC_Baseline_Vector = np.concatenate((CPP_DC_Baseline_Vector,
np.asarray(CPP_Demand_Charge_Month_Baseline).reshape((-1, 1)))) if CPP_DC_Baseline_Vector.size != 0 else np.asarray(CPP_Demand_Charge_Month_Baseline).reshape((-1,1))
CPP_DC_with_Solar_Only_Vector = np.concatenate((CPP_DC_with_Solar_Only_Vector,
np.asarray(CPP_Demand_Charge_Month_with_Solar_Only).reshape((-1, 1)))) if CPP_DC_with_Solar_Only_Vector.size != 0 else np.asarray(CPP_Demand_Charge_Month_with_Solar_Only).reshape((-1,1))
CPP_DC_with_Solar_and_Storage_Vector = np.concatenate((CPP_DC_with_Solar_and_Storage_Vector,
np.asarray(CPP_Demand_Charge_Month_with_Solar_and_Storage).reshape((-1, 1)))) if CPP_DC_with_Solar_and_Storage_Vector.size != 0 else \
np.asarray(CPP_Demand_Charge_Month_with_Solar_and_Storage).reshape((-1,1))
Energy_Charge_Baseline_Vector = np.concatenate((Energy_Charge_Baseline_Vector,
np.asarray(Energy_Charge_Month_Baseline).reshape((-1, 1)))) if Energy_Charge_Baseline_Vector.size != 0 else np.asarray(Energy_Charge_Month_Baseline).reshape((-1,1))
Energy_Charge_with_Solar_Only_Vector = np.concatenate((Energy_Charge_with_Solar_Only_Vector,
np.asarray(Energy_Charge_Month_with_Solar_Only).reshape((-1, 1)))) if Energy_Charge_with_Solar_Only_Vector.size != 0 else np.asarray(Energy_Charge_Month_with_Solar_Only).reshape((-1,1))
Energy_Charge_with_Solar_and_Storage_Vector = np.concatenate((Energy_Charge_with_Solar_and_Storage_Vector,
np.asarray(Energy_Charge_Month_with_Solar_and_Storage).reshape((-1, 1)))) if Energy_Charge_with_Solar_and_Storage_Vector.size != 0 else \
np.asarray(Energy_Charge_Month_with_Solar_and_Storage).reshape((-1,1))
Cycles_Vector = np.concatenate((Cycles_Vector, np.asarray(Cycles_Month).reshape((-1,1)))) if Cycles_Vector.size != 0 else np.asarray(Cycles_Month).reshape((-1,1))
Cycling_Penalty_Vector = np.concatenate((Cycling_Penalty_Vector, np.asarray(Cycling_Penalty_Month).reshape((-1,1)))) if Cycling_Penalty_Vector.size != 0 else np.asarray(Cycling_Penalty_Month).reshape((-1,1))
# Report total script runtime.
tend = time.time()
telapsed = tend - tstart
print('Model Run %0.f complete. Elapsed time to run the optimization model is %0.0f seconds.' % (Model_Run_Number_Input, telapsed))
## Calculation of Additional Reported Model Inputs/Outputs
# Output current system date and time in standard ISO 8601 YYYY-MM-DD HH:MM format.
Model_Run_Date_Time = datetime.datetime.now().replace(microsecond=0).isoformat()
# Convert Retail Rate Name Input (which contains both utility name and rate
# name) into Retail Rate Utility and Retail Rate Name Output
if "PG&E" in Retail_Rate_Name_Input:
Retail_Rate_Utility = "PG&E"
elif "SCE" in Retail_Rate_Name_Input:
Retail_Rate_Utility = "SCE"
elif "SDG&E" in Retail_Rate_Name_Input:
Retail_Rate_Utility = "SDG&E"
Retail_Rate_Utility_Plus_Space = Retail_Rate_Utility + " "
Retail_Rate_Name_Output = Retail_Rate_Name_Input.replace(Retail_Rate_Utility_Plus_Space, "")
# If Solar Profile Name is "No Solar", Solar Profile Name Output is Blank
if Solar_Profile_Name_Input == "No Solar":
Solar_Profile_Name_Output = ""
else:
Solar_Profile_Name_Output = Solar_Profile_Name_Input
# Storage Control Algorithm Description (Optional)
if Storage_Control_Algorithm_Name == "OSESMO Economic Dispatch":
Storage_Control_Algorithm_Description = "Open Source Energy Storage Model - Economic Dispatch"
elif Storage_Control_Algorithm_Name == "OSESMO Non-Economic Solar Self-Supply":
Storage_Control_Algorithm_Description = "Open Source Energy Storage Model - Non-Economic Solar Self-Supply"
# Storage Algorithm Parameters Filename (Optional)
Storage_Control_Algorithms_Parameters_Filename = "" # No storage parameters file.
# Other Incentives or Penalities (Optional)
Other_Incentives_or_Penalities = "" # No other incentives or penalties.
Output_Summary_Filename = "OSESMO Reporting Inputs and Outputs.csv"
Output_Description_Filename = "" # No output description file.
Output_Visualizations_Filename = "Multiple files - in same folder as Output Summary file." # No single output visualizations file.
EV_Use = "" # Model does not calculate or report EV usage information.
EV_Charge = "" # Model does not calculate or report EV charge information.
EV_Gas_Savings = "" # Model does not calculate or report EV gas savings information.
EV_GHG_Savings = "" # Model does not calculate or report EV GHG savings information.
## Output Directory/Folder Names
if ITC_Constraint_Input == 0:
ITC_Constraint_Folder_Name = "No ITC Constraint"
elif ITC_Constraint_Input == 1:
ITC_Constraint_Folder_Name = "ITC Constraint"
# Ensures that folder is called "No Emissions Forecast Signal",
# and not "No Emissions Forecast Signal Emissions Forecast Signal"
if Emissions_Forecast_Signal_Input == "No Emissions Forecast Signal":
Emissions_Forecast_Signal_Input = "No"
Output_Directory_Filepath = os.path.join(Input_Output_Data_Directory_Location, "Models", "OSESMO", "Model Outputs", \
Model_Type_Input, str(Model_Timestep_Resolution) + "-Minute Timestep Resolution", \
Customer_Class_Input, Load_Profile_Name_Input, Retail_Rate_Name_Input, \
Solar_Profile_Name_Input, str(Solar_Size_Input) + " kW Solar", Storage_Type_Input, \
str(Storage_Power_Rating_Input) + " kW " + str(Usable_Storage_Capacity_Input) + " kWh Storage", \
str(int(Single_Cycle_RTE_Input * 100)) + " Percent Single-Cycle RTE", \
str(Parasitic_Storage_Load_Input * 100) + " Percent Parasitic Load", \
Storage_Control_Algorithm_Name, GHG_Reduction_Solution_Input, \
str(Equivalent_Cycling_Constraint_Input) + " Equivalent Cycles Constraint", \
str(int(Annual_RTE_Constraint_Input * 100)) + " Percent Annual RTE Constraint", \
ITC_Constraint_Folder_Name, \
str(Carbon_Adder_Incentive_Value_Input) + " Dollar Carbon Adder Incentive", \
Emissions_Forecast_Signal_Input + " Emissions Forecast Signal")
# Correct Emissions Forecast Signal Name back so that it is exported with
# the correct name in the Outputs model.
if Emissions_Forecast_Signal_Input == "No":
Emissions_Forecast_Signal_Input = "No Emissions Forecast Signal"
# Create folder if one does not exist already
if Export_Data and os.path.isdir(Output_Directory_Filepath) == False:
os.mkdir(Output_Directory_Filepath)
## Plot Energy Storage Dispatch Schedule
numtsteps_year = len(Load_Profile_Data)
t = np.linspace(1, 35040, 35040)
t = [Start_Time_Input + datetime.timedelta(minutes = int(60 * delta_t) * x) for x in range(0, numtsteps_year)]
P_ES = np.reshape(P_ES_out - P_ES_in, (numtsteps_year,))
# Note: The MATLAB version of OSESMO which saves files in .fig format, which allows plots of model runs to be
# re-opened and then explored interactively (ex. zooming in on specific days).
# OSESMO Python does not have this functionality currently, as matplotlib does not have any built-in features that make this possible.
# It may be possible to add this functionality in the future, using the pickle package.
# https://stackoverflow.com/questions/4348733/saving-interactive-matplotlib-figures
## Plot Energy Storage Energy Level
## Plot Volumetric Electricity Price Schedule and Marginal Carbon Emission Rates
if Show_Plots == 1 or Export_Plots == 1:
fig, ax1 = plt.subplots()
ax1.plot(t, Volumetric_Rate_Data, 'b-')
ax1.set_xlabel('Date & Time')
ax1.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%Y-%m-%d %H:%M'))
ax1.set_ylabel('Energy Price ($/kWh)', color='b')
ax1.tick_params('y', colors='b')
ax2 = ax1.twinx()
ax2.plot(t, Marginal_Emissions_Rate_Evaluation_Data, 'r-')
ax2.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%Y-%m-%d %H:%M'))
ax2.set_ylabel('Marginal Emissions Rate (metric tons/kWh)', color='r')
ax2.set_title('Electricity Rates and Marginal Emissions Rates')
ax2.tick_params('y', colors='r')
fig.autofmt_xdate()
fig.tight_layout()
plt.show()
if Export_Plots == 1:
plt.savefig(os.path.join(Output_Directory_Filepath, 'Energy Price and Carbon Plot.png'))
## Plot Coincident and Non-Coincident Demand Charge Schedule
# Create Summer/Winter Binary Flag Vector
Summer_Binary_Data_1 = Month_Data >= First_Summer_Month
Summer_Binary_Data_2 = Month_Data <= Last_Summer_Month
Summer_Binary_Data = np.logical_and(Summer_Binary_Data_1, Summer_Binary_Data_2)
Winter_Binary_Data_1 = Month_Data < First_Summer_Month
Winter_Binary_Data_2 = Month_Data > Last_Summer_Month
Winter_Binary_Data = np.logical_or(Winter_Binary_Data_1, Winter_Binary_Data_2)
# Create Total-Demand-Charge Vector
# Noncoincident Demand Charge is always included (although it may be 0).
# Coincident Peak and Part-Peak values are only added if they are non-zero
# and a binary-flag data input is available.
Total_DC = (Winter_Noncoincident_DC * Winter_Binary_Data) + \
(Summer_Noncoincident_DC * Summer_Binary_Data)
if Winter_Peak_DC > 0:
Total_DC = Total_DC + (Winter_Peak_DC * Winter_Peak_Binary_Data)
if Winter_Part_Peak_DC > 0:
Total_DC = Total_DC + (Winter_Part_Peak_DC * Winter_Part_Peak_Binary_Data)
if Summer_Peak_DC > 0:
Total_DC = Total_DC + (Summer_Peak_DC * Summer_Peak_Binary_Data)
if Summer_Part_Peak_DC > 0:
Total_DC = Total_DC + (Summer_Part_Peak_DC * Summer_Part_Peak_Binary_Data)
if Show_Plots == 1 or Export_Plots == 1:
fig, ax = plt.subplots()
ax.plot(t, Total_DC, 'g-')
ax.set_xlabel('Date & Time')
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%Y-%m-%d %H:%M'))
ax.set_ylabel('Total Demand Charge ($/kW)')
ax.set_title('Coincident + Non-Coincident Demand Charge Schedule')
fig.autofmt_xdate()
fig.tight_layout()
plt.show()
if Export_Plots == 1:
plt.savefig(os.path.join(Output_Directory_Filepath, 'Demand Charge Plot.png'))
## Plot Load, Net Load with Solar Only, Net Load with Solar and Storage
if Show_Plots == 1 or Export_Plots == 1:
if Model_Type_Input == "Storage Only":
fig, ax = plt.subplots()
ax.plot(t, Load_Profile_Data, 'k-', label = 'Original Load')
ax.plot(t, Load_Profile_Data - P_ES, 'r-', label = 'Net Load with Storage')
ax.set_xlabel('Date & Time')
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%Y-%m-%d %H:%M'))
ax.set_ylabel('Load (kW)')
ax.set_title('Original and Net Load Profiles')
ax.legend()
fig.autofmt_xdate()
fig.tight_layout()
plt.show()
elif Model_Type_Input == "Solar Plus Storage":
fig, ax = plt.subplots()
ax.plot(t, Load_Profile_Data, 'k-', label = 'Original Load')
ax.plot(t, Load_Profile_Data - Solar_PV_Profile_Data, 'b-', label='Net Load with Solar Only')
ax.plot(t, Load_Profile_Data - (Solar_PV_Profile_Data + P_ES), 'r-', label = 'Net Load with Solar + Storage')
ax.set_xlabel('Date & Time')
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%Y-%m-%d %H:%M'))
ax.set_ylabel('Load (kW)')
ax.set_title('Original and Net Load Profiles')
ax.legend()
fig.autofmt_xdate()
fig.tight_layout()
plt.show()
if Export_Plots == 1:
plt.savefig(os.path.join(Output_Directory_Filepath, 'Net Load Plot.png'))
if Model_Type_Input == "Storage Only":
Annual_Peak_Demand_with_Solar_Only = ""
Annual_Total_Energy_Consumption_with_Solar_Only = ""
elif Model_Type_Input == "Solar Plus Storage":
Annual_Peak_Demand_with_Solar_Only = np.max(Load_Profile_Data - Solar_PV_Profile_Data)
Annual_Total_Energy_Consumption_with_Solar_Only = np.sum(Load_Profile_Data - Solar_PV_Profile_Data) * delta_t
Annual_Peak_Demand_with_Solar_and_Storage = np.max(Load_Profile_Data - (Solar_PV_Profile_Data + P_ES))
Annual_Total_Energy_Consumption_with_Solar_and_Storage = np.sum(Load_Profile_Data - (Solar_PV_Profile_Data + P_ES)) * delta_t
if Model_Type_Input == "Storage Only":
Solar_Only_Peak_Demand_Reduction_Percentage = ""
elif Model_Type_Input == "Solar Plus Storage":
Solar_Only_Peak_Demand_Reduction_Percentage = ((Annual_Peak_Demand_Baseline - Annual_Peak_Demand_with_Solar_Only) / Annual_Peak_Demand_Baseline) * 100
Solar_Storage_Peak_Demand_Reduction_Percentage = ((Annual_Peak_Demand_Baseline - Annual_Peak_Demand_with_Solar_and_Storage) / Annual_Peak_Demand_Baseline) * 100
if Model_Type_Input == "Storage Only":
Solar_Only_Energy_Consumption_Decrease_Percentage = ""
elif Model_Type_Input == "Solar Plus Storage":
Solar_Only_Energy_Consumption_Decrease_Percentage = ((Annual_Total_Energy_Consumption_Baseline - Annual_Total_Energy_Consumption_with_Solar_Only) / Annual_Total_Energy_Consumption_Baseline) * 100
Solar_Storage_Energy_Consumption_Decrease_Percentage = ((Annual_Total_Energy_Consumption_Baseline - Annual_Total_Energy_Consumption_with_Solar_and_Storage) / Annual_Total_Energy_Consumption_Baseline) * 100
print('Baseline annual peak noncoincident demand is {0} kW.'.format(round(Annual_Peak_Demand_Baseline, 2)))
if Model_Type_Input == "Storage Only":
if Solar_Storage_Peak_Demand_Reduction_Percentage >= 0:
print('Peak demand with storage is {0} kW, representing a DECREASE OF {1}%.'.format(round(Annual_Peak_Demand_with_Solar_and_Storage, 2), round(Solar_Storage_Peak_Demand_Reduction_Percentage, 2)))
elif Solar_Storage_Peak_Demand_Reduction_Percentage < 0:
print('Peak demand with storage is {0} kW, representing an INCREASE OF {1}%.'.format(round(Annual_Peak_Demand_with_Solar_and_Storage, 2), round(-Solar_Storage_Peak_Demand_Reduction_Percentage, 2)))
print('Baseline annual total electricity consumption is {0} kWh.'.format(round(Annual_Total_Energy_Consumption_Baseline, 2)))
print('Electricity consumption with storage is {0} kWh, representing an INCREASE OF {1}%.'.format(round(Annual_Total_Energy_Consumption_with_Solar_and_Storage, 2),
round(-Solar_Storage_Energy_Consumption_Decrease_Percentage, 2)))
elif Model_Type_Input == "Solar Plus Storage":
print('Peak demand with solar only is {0} kW, representing a DECREASE OF {1}%.'.format(round(Annual_Peak_Demand_with_Solar_Only, 2), round(Solar_Only_Peak_Demand_Reduction_Percentage, 2)))
if Solar_Storage_Peak_Demand_Reduction_Percentage >= 0:
print('Peak demand with solar and storage is {0} kW, representing a DECREASE OF {1}%.'.format(round(Annual_Peak_Demand_with_Solar_and_Storage, 2), round(Solar_Storage_Peak_Demand_Reduction_Percentage, 2)))
elif Solar_Storage_Peak_Demand_Reduction_Percentage < 0:
print('Peak demand with solar and storage is {0} kW, representing an INCREASE OF {1}%.'.format(round(Annual_Peak_Demand_with_Solar_and_Storage, 2), round(-Solar_Storage_Peak_Demand_Reduction_Percentage, 2)))
print('Baseline annual total electricity consumption is {0} kWh.'.format(round(Annual_Total_Energy_Consumption_Baseline, 2)))
print('Electricity consumption with solar only is {0} kWh, representing a DECREASE OF {1}%.'.format(round(Annual_Total_Energy_Consumption_with_Solar_Only, 2),
round(Solar_Only_Energy_Consumption_Decrease_Percentage, 2)))
print('Electricity consumption with solar and storage is {0} kWh, representing a DECREASE OF {1}%.'.format(round(Annual_Total_Energy_Consumption_with_Solar_and_Storage, 2),
round(Solar_Storage_Energy_Consumption_Decrease_Percentage, 2)))
## Plot Monthly Costs as Bar Plot
# Calculate Baseline Monthly Costs
Monthly_Costs_Matrix_Baseline = np.concatenate((Fixed_Charge_Vector, NC_DC_Baseline_Vector, CPK_DC_Baseline_Vector, CPP_DC_Baseline_Vector, Energy_Charge_Baseline_Vector), axis = 1)
Annual_Costs_Vector_Baseline = np.concatenate((np.asarray(np.sum(Fixed_Charge_Vector)).reshape(1, -1), \
np.asarray(np.sum(NC_DC_Baseline_Vector) + np.sum(CPK_DC_Baseline_Vector) + np.sum(CPP_DC_Baseline_Vector)).reshape(1, -1), \
np.asarray(np.sum(Energy_Charge_Baseline_Vector)).reshape(1, -1)), axis = 0)
Annual_Demand_Charge_Cost_Baseline = Annual_Costs_Vector_Baseline[1, 0]
Annual_Energy_Charge_Cost_Baseline = Annual_Costs_Vector_Baseline[2, 0]
# Calculate Monthly Costs With Solar Only
Monthly_Costs_Matrix_with_Solar_Only = np.concatenate((Fixed_Charge_Vector, NC_DC_with_Solar_Only_Vector, CPK_DC_with_Solar_Only_Vector, CPP_DC_with_Solar_Only_Vector, Energy_Charge_with_Solar_Only_Vector), axis = 1)
Annual_Costs_Vector_with_Solar_Only = np.concatenate((np.asarray(np.sum(Fixed_Charge_Vector)).reshape(1, -1), \
np.asarray(np.sum(NC_DC_with_Solar_Only_Vector) + np.sum(CPK_DC_with_Solar_Only_Vector) + np.sum(CPP_DC_with_Solar_Only_Vector)).reshape(1, -1), \
np.asarray(np.sum(Energy_Charge_with_Solar_Only_Vector)).reshape(1, -1)), axis = 0)
if Model_Type_Input == "Storage Only":
Annual_Demand_Charge_Cost_with_Solar_Only = ""
Annual_Energy_Charge_Cost_with_Solar_Only = ""
elif Model_Type_Input == "Solar Plus Storage":
Annual_Demand_Charge_Cost_with_Solar_Only = Annual_Costs_Vector_with_Solar_Only[1, 0]
Annual_Energy_Charge_Cost_with_Solar_Only = Annual_Costs_Vector_with_Solar_Only[2, 0]
# Calculate Monthly Costs with Solar and Storage
Monthly_Costs_Matrix_with_Solar_and_Storage = np.concatenate((Fixed_Charge_Vector, NC_DC_with_Solar_and_Storage_Vector, CPK_DC_with_Solar_and_Storage_Vector, CPP_DC_with_Solar_and_Storage_Vector, \
Energy_Charge_with_Solar_and_Storage_Vector), axis = 1)
Annual_Costs_Vector_with_Solar_and_Storage = np.concatenate((np.asarray(np.sum(Fixed_Charge_Vector)).reshape(1, -1), \
np.asarray(np.sum(NC_DC_with_Solar_and_Storage_Vector) + np.sum(CPK_DC_with_Solar_and_Storage_Vector) + np.sum(CPP_DC_with_Solar_and_Storage_Vector)).reshape(1, -1), \
np.asarray(np.sum(Energy_Charge_with_Solar_and_Storage_Vector)).reshape(1, -1)), axis = 0)
Annual_Demand_Charge_Cost_with_Solar_and_Storage = Annual_Costs_Vector_with_Solar_and_Storage[1, 0]
Annual_Energy_Charge_Cost_with_Solar_and_Storage = Annual_Costs_Vector_with_Solar_and_Storage[2, 0]
# Calculate Maximum and Minimum Monthly Bills - to set y-axis for all plots
Maximum_Monthly_Bill_Baseline = np.max(np.sum(Monthly_Costs_Matrix_Baseline, axis = 1))
Minimum_Monthly_Bill_Baseline = np.min(np.sum(Monthly_Costs_Matrix_Baseline, axis = 1))
Maximum_Monthly_Bill_with_Solar_Only = np.max(np.sum(Monthly_Costs_Matrix_with_Solar_Only, axis = 1))
Minimum_Monthly_Bill_with_Solar_Only = np.min(np.sum(Monthly_Costs_Matrix_with_Solar_Only, axis = 1))
Maximum_Monthly_Bill_with_Solar_and_Storage = np.max(np.sum(Monthly_Costs_Matrix_with_Solar_and_Storage, axis = 1))
Minimum_Monthly_Bill_with_Solar_and_Storage = np.min(np.sum(Monthly_Costs_Matrix_with_Solar_and_Storage, axis = 1))
Maximum_Monthly_Bill = np.max((Maximum_Monthly_Bill_Baseline, \
Maximum_Monthly_Bill_with_Solar_Only, \
Maximum_Monthly_Bill_with_Solar_and_Storage))
Minimum_Monthly_Bill = np.min((Minimum_Monthly_Bill_Baseline, \
Minimum_Monthly_Bill_with_Solar_Only, \
Minimum_Monthly_Bill_with_Solar_and_Storage))
Max_Monthly_Bill_ylim = Maximum_Monthly_Bill * 1.1 # Make upper ylim 10% larger than largest monthly bill.
if Minimum_Monthly_Bill >= 0:
Min_Monthly_Bill_ylim = 0 # Make lower ylim equal to 0 if the lowest monthly bill is greater than zero.
elif Minimum_Monthly_Bill < 0:
Min_Monthly_Bill_ylim = Minimum_Monthly_Bill * 1.1 # Make lower ylim 10% smaller than the smallest monthly bill if less than zero.
# Define bar-chart-plotting function
# Created by StackOverflow user Bill: https://stackoverflow.com/questions/44309507/stacked-bar-plot-using-matplotlib
def stacked_bar(data, series_labels, category_labels=None,
show_values=False, value_format="{}", y_label=None,
grid=True, reverse=False):
"""Plots a stacked bar chart with the data and labels provided.
Keyword arguments:
data -- 2-dimensional numpy array or nested list
containing data for each series in rows
series_labels -- list of series labels (these appear in
the legend)
category_labels -- list of category labels (these appear
on the x-axis)
show_values -- If True then numeric value labels will
be shown on each bar
value_format -- Format string for numeric value labels
(default is "{}")
y_label -- Label for y-axis (str)
grid -- If True display grid
reverse -- If True reverse the order that the
series are displayed (left-to-right
or right-to-left)
"""
ny = len(data[0])
ind = list(range(ny))
axes = []
cum_size = np.zeros(ny)
data = np.array(data)
if reverse:
data = np.flip(data, axis=1)
category_labels = reversed(category_labels)
for i, row_data in enumerate(data):
axes.append(plt.bar(ind, row_data, bottom=cum_size,
label=series_labels[i]))
cum_size += row_data
if category_labels:
plt.xticks(ind, category_labels)
if y_label:
plt.ylabel(y_label)
plt.legend()
if grid:
plt.grid()
if show_values:
for axis in axes:
for bar in axis:
w, h = bar.get_width(), bar.get_height()
plt.text(bar.get_x() + w / 2, bar.get_y() + h / 2,
value_format.format(h), ha="center",
va="center")
# Plot Baseline Monthly Costs
if Show_Plots == 1 or Export_Plots == 1:
series_labels = ['Fixed Charges', 'Max DC', 'Peak DC', 'Part-Peak DC', 'Energy Charge']
category_labels = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']
plt.figure()
stacked_bar(np.transpose(Monthly_Costs_Matrix_Baseline),
series_labels,
category_labels=category_labels,
show_values=False,
value_format="{}",
y_label="Cost ($/Month)")
plt.xlabel('Month')
plt.ylim(bottom=Min_Monthly_Bill_ylim, top=Max_Monthly_Bill_ylim)
plt.title('Monthly Costs, Without Storage')
plt.show()
if Export_Plots == 1:
plt.savefig(os.path.join(Output_Directory_Filepath, 'Monthly Costs Baseline Plot.png'))
# Plot Monthly Costs With Solar Only
if Model_Type_Input == "Solar Plus Storage":
if Show_Plots == 1 or Export_Plots == 1:
series_labels = ['Fixed Charges', 'Max DC', 'Peak DC', 'Part-Peak DC', 'Energy Charge']
category_labels = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']
plt.figure()
stacked_bar(np.transpose(Monthly_Costs_Matrix_with_Solar_Only),
series_labels,
category_labels=category_labels,
show_values=False,
value_format="{}",
y_label="Cost ($/Month)")
plt.xlabel('Month')
plt.ylim(bottom = Min_Monthly_Bill_ylim, top = Max_Monthly_Bill_ylim)
plt.title('Monthly Costs, With Solar Only')
plt.show()
if Export_Plots == 1:
plt.savefig(os.path.join(Output_Directory_Filepath, 'Monthly Costs with Solar Only Plot.png'))
# Plot Monthly Costs with Solar and Storage
if Show_Plots == 1 or Export_Plots == 1:
series_labels = ['Fixed Charges', 'Max DC', 'Peak DC', 'Part-Peak DC', 'Energy Charge']
category_labels = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']
plt.figure()
stacked_bar(np.transpose(Monthly_Costs_Matrix_with_Solar_and_Storage),
series_labels,
category_labels=category_labels,
show_values=False,
value_format="{}",
y_label="Cost ($/Month)")
plt.xlabel('Month')
plt.ylim(bottom=Min_Monthly_Bill_ylim, top=Max_Monthly_Bill_ylim)
plt.title('Monthly Costs, With Storage')
plt.show()
if Export_Plots == 1:
if Model_Type_Input == "Storage Only":
plt.savefig(os.path.join(Output_Directory_Filepath, 'Monthly Costs with Storage Plot.png'))
elif Model_Type_Input == "Solar Plus Storage":
plt.savefig(os.path.join(Output_Directory_Filepath, 'Monthly Costs with Solar and Storage Plot.png'))
# Plot Monthly Savings From Storage
if Model_Type_Input == "Storage Only":
Monthly_Savings_Matrix_From_Storage = Monthly_Costs_Matrix_Baseline - Monthly_Costs_Matrix_with_Solar_and_Storage
elif Model_Type_Input == "Solar Plus Storage":
Monthly_Savings_Matrix_From_Storage = Monthly_Costs_Matrix_with_Solar_Only - Monthly_Costs_Matrix_with_Solar_and_Storage
# Remove fixed charges column.
Monthly_Savings_Matrix_Plot = Monthly_Savings_Matrix_From_Storage[:, [1, 2, 3, 4]]
if Show_Plots == 1 or Export_Plots == 1:
series_labels = ['Max DC', 'Peak DC', 'Part-Peak DC', 'Energy Charge']
category_labels = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']
plt.figure()
stacked_bar(np.transpose(Monthly_Savings_Matrix_Plot),
series_labels,
category_labels=category_labels,
show_values=False,
value_format="{}",
y_label="Savings ($/Month)")
plt.xlabel('Month')
plt.title('Monthly Savings From Storage')
plt.show()
if Export_Plots == 1:
plt.savefig(os.path.join(Output_Directory_Filepath, 'Monthly Savings from Storage Plot.png'))
## Report Annual Savings
# Report Baseline Cost without Solar or Storage
Annual_Customer_Bill_Baseline = np.sum(np.sum(Monthly_Costs_Matrix_Baseline))
if Model_Type_Input == "Storage Only":
Annual_Customer_Bill_with_Solar_Only = ""
elif Model_Type_Input == "Solar Plus Storage":
Annual_Customer_Bill_with_Solar_Only = np.sum(Annual_Costs_Vector_with_Solar_Only)
Annual_Customer_Bill_with_Solar_and_Storage = np.sum(Annual_Costs_Vector_with_Solar_and_Storage) # Doesn't include degradation cost.
if Model_Type_Input == "Storage Only":
Annual_Customer_Bill_Savings_from_Storage = Annual_Customer_Bill_Baseline - Annual_Customer_Bill_with_Solar_and_Storage
elif Model_Type_Input == "Solar Plus Storage":
Annual_Customer_Bill_Savings_from_Solar = Annual_Customer_Bill_Baseline - Annual_Customer_Bill_with_Solar_Only
Annual_Customer_Bill_Savings_from_Solar_Percent = (Annual_Customer_Bill_Savings_from_Solar / Annual_Customer_Bill_Baseline)
Annual_Customer_Bill_Savings_from_Storage = Annual_Customer_Bill_with_Solar_Only - Annual_Customer_Bill_with_Solar_and_Storage
Annual_Customer_Bill_Savings_from_Storage_Percent = (Annual_Customer_Bill_Savings_from_Storage / Annual_Customer_Bill_Baseline)
if Model_Type_Input == "Solar Plus Storage":
Solar_Installed_Cost = Solar_Size_Input * Solar_Installed_Cost_per_kW
Solar_Simple_Payback = Solar_Installed_Cost / Annual_Customer_Bill_Savings_from_Solar
print('Annual cost savings from solar is ${0}, representing {1}% of the original ${2} bill.'.format(
int(Annual_Customer_Bill_Savings_from_Solar), round(Annual_Customer_Bill_Savings_from_Solar_Percent * 100, 2),
int(Annual_Customer_Bill_Baseline)))
print('The solar PV system has a simple payback of {0} years, not including incentives.'.format(
round(Solar_Simple_Payback, 1)))
Storage_Installed_Cost = Total_Storage_Capacity * Storage_Installed_Cost_per_kWh
Storage_Simple_Payback = Storage_Installed_Cost / Annual_Customer_Bill_Savings_from_Storage
print('Annual cost savings from storage is ${0}, representing {1}% of the original ${2} bill.'.format(
int(Annual_Customer_Bill_Savings_from_Storage), round(Annual_Customer_Bill_Savings_from_Storage_Percent * 100, 2),
int(Annual_Customer_Bill_Baseline)))
print('The storage system has a simple payback of {0} years, not including incentives.'.format(
round(Storage_Simple_Payback, 1)))
## Report Cycling/Degradation Penalty
Annual_Equivalent_Storage_Cycles = np.sum(Cycles_Vector)
Annual_Cycling_Penalty = np.sum(Cycling_Penalty_Vector)
Annual_Capacity_Fade = Usable_Storage_Capacity_Input - Usable_Storage_Capacity
print('The battery cycles {0} times annually, with a degradation cost of ${1}, and experiences capacity fade of {2} kWh.'.format(
int(Annual_Equivalent_Storage_Cycles), int(Annual_Cycling_Penalty), round(Annual_Capacity_Fade, 1)))
## Report Operational/"SGIP" Round-Trip Efficiency
Annual_RTE = (np.sum(P_ES_out) * delta_t) / (np.sum(P_ES_in) * delta_t)
print('The battery has an Annual Operational/SGIP Round-Trip Efficiency of {0}%.'.format(
round(Annual_RTE * 100, 2)))
## Report Operational/"SGIP" Capacity Factor
# The SGIP Handbook uses the following definition of capacity factor for
# storage resources, based on the assumption that 60% of hours are
# available for discharge. The term "hours of data available" is equal to
# the number of hours in the year here. For actual operational data, it's
# the number of hours where data is available, which may be less than the
# number of hours in the year. Here, the number of hours in the year is
# calculated by multiplying the number of timesteps of original load profile data
# by the timestep length delta_t. This returns 8760 hours during
# non-leap years and 8784 during leap years.
# Capacity Factor = (kWh Discharge)/(Hours of Data Available x Rebated Capacity (kW) x 60%)
Operational_Capacity_Factor = ((np.sum(P_ES_out) * delta_t) / ((len(Load_Profile_Data) * delta_t) * Storage_Power_Rating_Input * 0.6))
print('The battery has an Operational/SGIP Capacity Factor of {0}%.'.format(
round(Operational_Capacity_Factor * 100, 2)))
## Report Grid Costs
# Calculate Total Annual Grid Costs
Annual_Grid_Cost_Baseline = np.dot(Generation_Cost_Data + Representative_Distribution_Cost_Data, Load_Profile_Data) * (1 / 1000) * delta_t
if Model_Type_Input == "Solar Plus Storage":
Annual_Grid_Cost_with_Solar_Only = np.dot(Generation_Cost_Data + Representative_Distribution_Cost_Data, Load_Profile_Data - Solar_PV_Profile_Data) * (1 / 1000) * delta_t
else:
Annual_Grid_Cost_with_Solar_Only = ""
Annual_Grid_Cost_with_Solar_and_Storage = np.dot(Generation_Cost_Data + Representative_Distribution_Cost_Data, Load_Profile_Data - Solar_PV_Profile_Data - \
P_ES_out.reshape((numtsteps_year,)) + P_ES_in.reshape((numtsteps_year,))) * (1 / 1000) * delta_t
# Calculate Monthly Grid Costs
Grid_Cost_Timestep_Baseline = np.concatenate((np.multiply(Generation_Cost_Data, Load_Profile_Data).reshape((numtsteps_year,1)) * (1 / 1000) * delta_t, \
np.multiply(Representative_Distribution_Cost_Data, Load_Profile_Data).reshape((numtsteps_year,1)) * (1 / 1000) * delta_t), axis = 1)
Grid_Cost_Month_Baseline = np.array([])
for Month_Iter in range(1, 12 + 1):
Grid_Cost_Single_Month_Baseline = np.sum(Grid_Cost_Timestep_Baseline[Month_Data == Month_Iter,:], axis = 0).reshape((1,2))
Grid_Cost_Month_Baseline = np.concatenate((Grid_Cost_Month_Baseline, Grid_Cost_Single_Month_Baseline), axis = 0) if Grid_Cost_Month_Baseline.size != 0 else Grid_Cost_Single_Month_Baseline
Grid_Cost_Timestep_with_Solar_Only = np.concatenate((np.multiply(Generation_Cost_Data, (Load_Profile_Data - Solar_PV_Profile_Data)).reshape((numtsteps_year,1)) * (1 / 1000) * delta_t, \
np.multiply(Representative_Distribution_Cost_Data, (Load_Profile_Data - Solar_PV_Profile_Data)).reshape((numtsteps_year,1)) * (1 / 1000) * delta_t), axis = 1)
Grid_Cost_Month_with_Solar_Only = np.array([])
for Month_Iter in range(1, 12 + 1):
Grid_Cost_Single_Month_with_Solar_Only = np.sum(Grid_Cost_Timestep_with_Solar_Only[Month_Data == Month_Iter,:], axis = 0).reshape((1,2))
Grid_Cost_Month_with_Solar_Only = np.concatenate((Grid_Cost_Month_with_Solar_Only, Grid_Cost_Single_Month_with_Solar_Only), axis = 0) if Grid_Cost_Month_with_Solar_Only.size != 0 else Grid_Cost_Single_Month_with_Solar_Only
Grid_Cost_Timestep_with_Solar_and_Storage = np.concatenate((np.multiply(Generation_Cost_Data,
(Load_Profile_Data - Solar_PV_Profile_Data - P_ES_out.reshape((numtsteps_year,)) + P_ES_in.reshape((numtsteps_year,)))).reshape((numtsteps_year,1)) *
(1 / 1000) * delta_t, \
np.multiply(Representative_Distribution_Cost_Data,
(Load_Profile_Data - Solar_PV_Profile_Data - P_ES_out.reshape((numtsteps_year,)) + P_ES_in.reshape((numtsteps_year,)))).reshape((numtsteps_year,1)) *
(1 / 1000) * delta_t), axis = 1)
Grid_Cost_Month_with_Solar_and_Storage = np.array([])
for Month_Iter in range(1, 12 + 1):
Grid_Cost_Single_Month_with_Solar_and_Storage = np.sum(Grid_Cost_Timestep_with_Solar_and_Storage[Month_Data == Month_Iter,:], axis = 0).reshape((1,2))
Grid_Cost_Month_with_Solar_and_Storage = np.concatenate((Grid_Cost_Month_with_Solar_and_Storage, Grid_Cost_Single_Month_with_Solar_and_Storage), axis = 0) if \
Grid_Cost_Month_with_Solar_and_Storage.size != 0 else Grid_Cost_Single_Month_with_Solar_and_Storage
# Calculate Monthly Grid Cost Savings from Storage
if Model_Type_Input == "Storage Only":
Grid_Cost_Savings_Month_from_Storage = Grid_Cost_Month_Baseline - Grid_Cost_Month_with_Solar_and_Storage
elif Model_Type_Input == "Solar Plus Storage":
Grid_Cost_Savings_Month_from_Storage = Grid_Cost_Month_with_Solar_Only - Grid_Cost_Month_with_Solar_and_Storage
# Report Grid Cost Savings from Solar
if Model_Type_Input == "Solar Plus Storage":
print('Installing solar DECREASES estimated utility grid costs (not including transmission costs, and using representative distribution costs) by ${0} per year.'.format(
round(Annual_Grid_Cost_Baseline - Annual_Grid_Cost_with_Solar_Only, 2)))
# Report Grid Cost Impact from Storage
if Model_Type_Input == "Storage Only":
if Annual_Grid_Cost_Baseline - Annual_Grid_Cost_with_Solar_and_Storage < 0:
print('Installing energy storage INCREASES estimated utility grid costs (not including transmission costs, and using representative distribution costs) by ${0} per year.'.format(
-round(Annual_Grid_Cost_Baseline - Annual_Grid_Cost_with_Solar_and_Storage, 2)))
else:
print('Installing energy storage DECREASES estimated utility grid costs (not including transmission costs, and using representative distribution costs) by ${0} per year.'.format(
round(Annual_Grid_Cost_Baseline - Annual_Grid_Cost_with_Solar_and_Storage, 2)))
elif Model_Type_Input == "Solar Plus Storage":
if Annual_Grid_Cost_with_Solar_Only - Annual_Grid_Cost_with_Solar_and_Storage < 0:
print('Installing energy storage INCREASES estimated utility grid costs (not including transmission costs, and using representative distribution costs) by ${0} per year.'.format(
-round(Annual_Grid_Cost_with_Solar_Only - Annual_Grid_Cost_with_Solar_and_Storage, 2)))
else:
print('Installing energy storage DECREASES estimated utility grid costs (not including transmission costs, and using representative distribution costs) by ${0} per year.'.format(
round(Annual_Grid_Cost_with_Solar_Only - Annual_Grid_Cost_with_Solar_and_Storage, 2)))
## Report Emissions Impact
# This approach multiplies net load by marginal emissions factors to
# calculate total annual emissions. This is consistent with the idea that
# the customer would pay an adder based on marginal emissions factors.
# Typically, total annual emissions is calculated using average emissions
# values, not marginal emissions values.
# https://www.pge.com/includes/docs/pdfs/shared/environment/calculator/pge_ghg_emission_factor_info_sheet.pdf
# (tons/kWh) = (tons/MWh) * (MWh/kWh)
Annual_GHG_Emissions_Baseline = np.dot(Marginal_Emissions_Rate_Evaluation_Data, Load_Profile_Data) * (1 / 1000) * delta_t
if Model_Type_Input == "Storage Only":
Annual_GHG_Emissions_with_Solar_Only = ""
elif Model_Type_Input == "Solar Plus Storage":
Annual_GHG_Emissions_with_Solar_Only = np.dot(Marginal_Emissions_Rate_Evaluation_Data, (Load_Profile_Data - Solar_PV_Profile_Data)) * (1 / 1000) * delta_t
Annual_GHG_Emissions_with_Solar_and_Storage = np.dot(Marginal_Emissions_Rate_Evaluation_Data,
(Load_Profile_Data - (Solar_PV_Profile_Data + P_ES_out.reshape((numtsteps_year,)) - P_ES_in.reshape((numtsteps_year,))))) * (1 / 1000) * delta_t
if Model_Type_Input == "Storage Only":
Annual_GHG_Emissions_Reduction_from_Solar = ""
elif Model_Type_Input == "Solar Plus Storage":
Annual_GHG_Emissions_Reduction_from_Solar = Annual_GHG_Emissions_Baseline - Annual_GHG_Emissions_with_Solar_Only
if Model_Type_Input == "Storage Only":
Annual_GHG_Emissions_Reduction_from_Storage = Annual_GHG_Emissions_Baseline - Annual_GHG_Emissions_with_Solar_and_Storage
elif Model_Type_Input == "Solar Plus Storage":
Annual_GHG_Emissions_Reduction_from_Storage = Annual_GHG_Emissions_with_Solar_Only - Annual_GHG_Emissions_with_Solar_and_Storage
if Model_Type_Input == "Storage Only":
Annual_GHG_Emissions_Reduction_from_Solar_Percent = ""
elif Model_Type_Input == "Solar Plus Storage":
Annual_GHG_Emissions_Reduction_from_Solar_Percent = 0 # (Annual_GHG_Emissions_Reduction_from_Solar / Annual_GHG_Emissions_Baseline)
Annual_GHG_Emissions_Reduction_from_Storage_Percent = 0 # (Annual_GHG_Emissions_Reduction_from_Storage / Annual_GHG_Emissions_Baseline)
if Model_Type_Input == "Solar Plus Storage":
print('Installing solar DECREASES marginal carbon emissions by {0} metric tons per year.'.format(
round(Annual_GHG_Emissions_Reduction_from_Solar, 2)))
print('This is equivalent to {0}% of baseline emissions, and brings total emissions to {1} metric tons per year.'.format(
round(Annual_GHG_Emissions_Reduction_from_Solar_Percent * 100, 2), round(Annual_GHG_Emissions_with_Solar_Only, 2)))
if Annual_GHG_Emissions_Reduction_from_Storage < 0:
print('Installing energy storage INCREASES marginal carbon emissions by {0} metric tons per year.'.format(
-round(Annual_GHG_Emissions_Reduction_from_Storage, 2)))
print('This is equivalent to {0}% of baseline emissions, and brings total emissions to {1} metric tons per year.'.format(
-round(Annual_GHG_Emissions_Reduction_from_Storage_Percent * 100, 2), round(Annual_GHG_Emissions_with_Solar_and_Storage, 2)))
else:
print('Installing energy storage DECREASES marginal carbon emissions by {0} metric tons per year.'.format(
round(Annual_GHG_Emissions_Reduction_from_Storage, 2)))
print('This is equivalent to {0}% of baseline emissions, and brings total emissions to {1} metric tons per year.'.format(
round(Annual_GHG_Emissions_Reduction_from_Storage_Percent * 100, 2), round(Annual_GHG_Emissions_with_Solar_and_Storage, 2)))
## Close All Figures
if Show_Plots == 0:
plt.close('all')
## Write Outputs to CSV
Model_Inputs_and_Outputs = np.array([Modeling_Team_Input, Model_Run_Number_Input, Model_Run_Date_Time, Model_Type_Input, Model_Timestep_Resolution, \
Customer_Class_Input, Load_Profile_Master_Index, Load_Profile_Name_Input, \
Retail_Rate_Master_Index, Retail_Rate_Utility, Retail_Rate_Name_Output, Retail_Rate_Effective_Date, \
Solar_Profile_Master_Index, Solar_Profile_Name_Output, Solar_Profile_Description, Solar_Size_Input, \
Storage_Type_Input, Storage_Power_Rating_Input, Usable_Storage_Capacity_Input, Single_Cycle_RTE_Input, Parasitic_Storage_Load_Input, \
Storage_Control_Algorithm_Name, Storage_Control_Algorithm_Description, Storage_Control_Algorithms_Parameters_Filename, \
GHG_Reduction_Solution_Input, Equivalent_Cycling_Constraint_Input, Annual_RTE_Constraint_Input, ITC_Constraint_Input, \
Carbon_Adder_Incentive_Value_Input, Other_Incentives_or_Penalities, Emissions_Forecast_Signal_Input, \
Annual_GHG_Emissions_Baseline, Annual_GHG_Emissions_with_Solar_Only, Annual_GHG_Emissions_with_Solar_and_Storage, \
Annual_Customer_Bill_Baseline, Annual_Customer_Bill_with_Solar_Only, Annual_Customer_Bill_with_Solar_and_Storage, \
Annual_Grid_Cost_Baseline, Annual_Grid_Cost_with_Solar_Only, Annual_Grid_Cost_with_Solar_and_Storage, \
Annual_Equivalent_Storage_Cycles, Annual_RTE, Operational_Capacity_Factor, \
Annual_Demand_Charge_Cost_Baseline, Annual_Demand_Charge_Cost_with_Solar_Only, Annual_Demand_Charge_Cost_with_Solar_and_Storage, \
Annual_Energy_Charge_Cost_Baseline, Annual_Energy_Charge_Cost_with_Solar_Only, Annual_Energy_Charge_Cost_with_Solar_and_Storage, \
Annual_Peak_Demand_Baseline, Annual_Peak_Demand_with_Solar_Only, Annual_Peak_Demand_with_Solar_and_Storage, \
Annual_Total_Energy_Consumption_Baseline, Annual_Total_Energy_Consumption_with_Solar_Only, Annual_Total_Energy_Consumption_with_Solar_and_Storage, \
Output_Summary_Filename, Output_Description_Filename, Output_Visualizations_Filename, \
EV_Use, EV_Charge, EV_Gas_Savings, EV_GHG_Savings]).reshape((1, 62))
Model_Inputs_and_Outputs = pd.DataFrame(Model_Inputs_and_Outputs, columns = ["Modeling_Team_Input", "Model_Run_Number_Input", "Model_Run_Date_Time", "Model_Type_Input", "Model_Timestep_Resolution", \
"Customer_Class_Input", "Load_Profile_Master_Index", "Load_Profile_Name_Input", \
"Retail_Rate_Master_Index", "Retail_Rate_Utility", "Retail_Rate_Name_Output", "Retail_Rate_Effective_Date", \
"Solar_Profile_Master_Index", "Solar_Profile_Name_Output", "Solar_Profile_Description", "Solar_Size_Input", \
"Storage_Type_Input", "Storage_Power_Rating_Input", "Usable_Storage_Capacity_Input", "Single_Cycle_RTE_Input", "Parasitic_Storage_Load_Input", \
"Storage_Control_Algorithm_Name", "Storage_Control_Algorithm_Description", "Storage_Control_Algorithms_Parameters_Filename", \
"GHG_Reduction_Solution_Input", "Equivalent_Cycling_Constraint_Input", "Annual_RTE_Constraint_Input", "ITC_Constraint_Input", \
"Carbon_Adder_Incentive_Value_Input", "Other_Incentives_or_Penalities", "Emissions_Forecast_Signal_Input", \
"Annual_GHG_Emissions_Baseline", "Annual_GHG_Emissions_with_Solar_Only", "Annual_GHG_Emissions_with_Solar_and_Storage", \
"Annual_Customer_Bill_Baseline", "Annual_Customer_Bill_with_Solar_Only", "Annual_Customer_Bill_with_Solar_and_Storage", \
"Annual_Grid_Cost_Baseline", "Annual_Grid_Cost_with_Solar_Only", "Annual_Grid_Cost_with_Solar_and_Storage", \
"Annual_Equivalent_Storage_Cycles", "Annual_RTE", "Operational_Capacity_Factor", \
"Annual_Demand_Charge_Cost_Baseline", "Annual_Demand_Charge_Cost_with_Solar_Only", "Annual_Demand_Charge_Cost_with_Solar_and_Storage", \
"Annual_Energy_Charge_Cost_Baseline", "Annual_Energy_Charge_Cost_with_Solar_Only", "Annual_Energy_Charge_Cost_with_Solar_and_Storage", \
"Annual_Peak_Demand_Baseline", "Annual_Peak_Demand_with_Solar_Only", "Annual_Peak_Demand_with_Solar_and_Storage", \
"Annual_Total_Energy_Consumption_Baseline", "Annual_Total_Energy_Consumption_with_Solar_Only", "Annual_Total_Energy_Consumption_with_Solar_and_Storage", \
"Output_Summary_Filename", "Output_Description_Filename", "Output_Visualizations_Filename", \
"EV_Use", "EV_Charge", "EV_Gas_Savings", "EV_GHG_Savings"])
Storage_Dispatch_Outputs = np.array([t, -P_ES]).transpose() # Flip sign for Genability convention.
Storage_Dispatch_Outputs = pd.DataFrame(Storage_Dispatch_Outputs, columns = ["Date_Time_Pacific_No_DST", "Storage_Output_kW"])
if Export_Data == 1:
Model_Inputs_and_Outputs.to_csv(os.path.join(Output_Directory_Filepath, Output_Summary_Filename), index = False)
Storage_Dispatch_Outputs.to_csv(os.path.join(Output_Directory_Filepath, "Storage Dispatch Profile Output.csv"), index = False)
## Return to OSESMO Git Repository Directory
os.chdir(OSESMO_Git_Repo_Directory)
P_ES_inverted = -P_ES
return P_ES_inverted | 52.499048 | 248 | 0.665439 | th as math
import time as time
import datetime as datetime
import numpy as np
import pandas as pd
from cvxopt import matrix, sparse, solvers
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
def OSESMO(Modeling_Team_Input=None, Model_Run_Number_Input=None, Model_Type_Input=None,
Model_Timestep_Resolution=None, Customer_Class_Input=None, Load_Profile_Name_Input=None,
Retail_Rate_Name_Input=None, Solar_Profile_Name_Input=None, Solar_Size_Input=None,
Storage_Type_Input=None, Storage_Power_Rating_Input=None, Usable_Storage_Capacity_Input=None,
Single_Cycle_RTE_Input=None, Parasitic_Storage_Load_Input=None,
Storage_Control_Algorithm_Name=None, GHG_Reduction_Solution_Input=None, Equivalent_Cycling_Constraint_Input=None,
Annual_RTE_Constraint_Input=None, ITC_Constraint_Input=None,
Carbon_Adder_Incentive_Value_Input=None, Emissions_Forecast_Signal_Input=None,
OSESMO_Git_Repo_Directory=None, Input_Output_Data_Directory_Location=None, Start_Time_Input=None,
Show_Plots=None, Export_Plots=None, Export_Data=None,
Solar_Installed_Cost_per_kW=None, Storage_Installed_Cost_per_kWh=None, Estimated_Future_Lithium_Ion_Battery_Installed_Cost_per_kWh=None,
Cycle_Life=None, Storage_Depth_of_Discharge=None, Initial_Final_SOC=None, End_of_Month_Padding_Days=None):
# https://www.lazard.com/media/450338/lazard-levelized-cost-of-storage-version-30.pdf
Eff_c = math.sqrt(Single_Cycle_RTE_Input)
Eff_d = math.sqrt(Single_Cycle_RTE_Input)
# Parasitic storage load (kW) calculated based on input value, which is
# given as a percentage of Storage Power Rating.
Parasitic_Storage_Load = Storage_Power_Rating_Input * Parasitic_Storage_Load_Input
# Set Carbon Adder to $0/metric ton if GHG Reduction Solution is not GHG Signal Co-Optimization.
# This serves as error-handling in case the user sets the Carbon Adder to a
# non-zero value, and sets the GHG Reduction Solution to something other
# than GHG Signal Co-Optimization.
if GHG_Reduction_Solution_Input != "GHG Signal Co-Optimization":
Carbon_Adder_Incentive_Value_Input = 0 # Value of carbon adder, in $ per metric ton.
Emissions_Forecast_Signal_Input = "No Emissions Forecast Signal" # Ensures consistent outputs.
# Set Solar Profile Name Input to "No Solar", set Solar Size Input to 0 kW,
# and set ITC Constraint to 0 if Model Type Input is Storage Only.
# This serves as error handling.
if Model_Type_Input == "Storage Only":
Solar_Profile_Name_Input = "No Solar"
Solar_Size_Input = 0
ITC_Constraint_Input = 0
# Throw an error if Model Type Input is set to Solar Plus Storage
# and Solar Profile Name Input is set to "No Solar",
# or if Solar Size Input is set to 0 kW.
if Model_Type_Input == "Solar Plus Storage":
if Solar_Profile_Name_Input == "No Solar":
print("Solar Plus Storage Model selected, but No Solar Profile Name Input selected.")
if Solar_Size_Input == 0:
print("Solar Plus Storage Model selected, but Solar Size Input set to 0 kW.")
# Throw an error if Storage Control Algorithm set to OSESMO Non-Economic
# Solar Self-Supply, and Model Type Input is set to Storage Only,
# or if Solar Profile Name Input is set to "No Solar",
# or if Solar Size Input is set to 0 kW.
if Storage_Control_Algorithm_Name == "OSESMO Non-Economic Solar Self-Supply":
if Model_Type_Input == "Storage Only":
print("OSESMO Non-Economic Solar Self-Supply control algorithm selected, but Model Type set to Storage Only.")
if Solar_Profile_Name_Input == "No Solar":
print("OSESMO Non-Economic Solar Self-Supply control algorithm selected, but No Solar Profile Name Input selected.")
if Solar_Size_Input == 0:
print("OSESMO Non-Economic Solar Self-Supply control algorithm selected, but Solar Size Input set to 0 kW.")
# Emissions Evaluation Signal
# Real-time five-minute marginal emissions signal used to evaluate emission impacts.
# Available for both NP15 (Northern California congestion zone)
# and SP15 (Southern California congestion zone).
# Mapped based on load profile site location (Northern or Southern CA).
if Load_Profile_Name_Input == "WattTime GreenButton Residential Berkeley" or \
Load_Profile_Name_Input == "WattTime GreenButton Residential Coulterville" or \
Load_Profile_Name_Input == "PG&E GreenButton E-6 Residential" or \
Load_Profile_Name_Input == "PG&E GreenButton Central Valley Residential CARE" or \
Load_Profile_Name_Input == "PG&E GreenButton Central Valley Residential Non-CARE" or \
Load_Profile_Name_Input == "Custom Power Solar GreenButton PG&E Albany Residential with EV" or \
Load_Profile_Name_Input == "Custom Power Solar GreenButton PG&E Crockett Residential with EV" or \
Load_Profile_Name_Input == "Avalon GreenButton East Bay Light Industrial" or \
Load_Profile_Name_Input == "Avalon GreenButton South Bay Education" or \
Load_Profile_Name_Input == "EnerNOC GreenButton San Francisco Office" or \
Load_Profile_Name_Input == "EnerNOC GreenButton San Francisco Industrial" or \
Load_Profile_Name_Input == "PG&E GreenButton A-6 SMB" or \
Load_Profile_Name_Input == "PG&E GreenButton A-10S MLB" or \
Load_Profile_Name_Input == "PG&E GreenButton Central Valley Residential Non-CARE" or \
Load_Profile_Name_Input == "PG&E GreenButton Central Valley Residential CARE":
Emissions_Evaluation_Signal_Input = "NP15 RT5M"
elif Load_Profile_Name_Input == "WattTime GreenButton Residential Long Beach" or\
Load_Profile_Name_Input == "Stem GreenButton SCE TOU-8B Office" or\
Load_Profile_Name_Input == "Stem GreenButton SDG&E G-16 Manufacturing" or\
Load_Profile_Name_Input == "Stem GreenButton SCE GS-3B Food Processing" or\
Load_Profile_Name_Input == "EnerNOC GreenButton Los Angeles Grocery" or\
Load_Profile_Name_Input == "EnerNOC GreenButton Los Angeles Industrial" or\
Load_Profile_Name_Input == "EnerNOC GreenButton San Diego Office":
Emissions_Evaluation_Signal_Input = "SP15 RT5M"
else:
print("This load profile name input has not been mapped to an emissions evaluation signal (NP15 or SP15).")
# Total Storage Capacity
# Total storage capacity is the total chemical capacity of the battery.
# The usable storage capacity is equal to the total storage capacity
# multiplied by storage depth of discharge. This means that the total
# storage capacity is equal to the usable storage capacity divided by
# storage depth of discharge. Total storage capacity is used to
# calculate battery cost, whereas usable battery capacity is used
# as an input to operational simulation portion of model.
Total_Storage_Capacity = Usable_Storage_Capacity_Input / Storage_Depth_of_Discharge
# Usable Storage Capacity
# Usable storage capacity is equal to the original usable storage capacity
# input, degraded every month based on the number of cycles performed in
# that month. Initialized at the usable storage capacity input value.
Usable_Storage_Capacity = Usable_Storage_Capacity_Input
# Cycling Penalty
# Cycling penalty for lithium-ion battery is equal to estimated replacement cell cost
# in 10 years divided by expected cycle life. Cycling penalty for flow batteries is $0/cycle.
if Storage_Type_Input == "Lithium-Ion Battery":
cycle_pen = (Total_Storage_Capacity * Estimated_Future_Lithium_Ion_Battery_Installed_Cost_per_kWh) / Cycle_Life
elif Storage_Type_Input == "Flow Battery":
cycle_pen = 0
## Import Data from CSV Files
# Begin script runtime timer
tstart = time.time()
# Import Load Profile Data
# Call Import_Load_Profile_Data function.
from switch_api.services.osemo.Code.Import_Load_Profile_Data_SC2019 import Import_Load_Profile_Data
[Load_Profile_Data, Load_Profile_Master_Index] = Import_Load_Profile_Data(Input_Output_Data_Directory_Location, OSESMO_Git_Repo_Directory,
delta_t, Load_Profile_Name_Input)
Annual_Peak_Demand_Baseline = np.max(Load_Profile_Data)
Annual_Total_Energy_Consumption_Baseline = np.sum(Load_Profile_Data) * delta_t
# Import Solar PV Generation Profile Data
# Scale base 10-kW or 100-kW profile to match user-input PV system size
if Model_Type_Input == "Solar Plus Storage":
from switch_api.services.osemo.Code.Import_Solar_PV_Profile_Data_SC2019 import Import_Solar_PV_Profile_Data
[Solar_Profile_Master_Index, Solar_Profile_Description, Solar_PV_Profile_Data] = Import_Solar_PV_Profile_Data(
Input_Output_Data_Directory_Location,
OSESMO_Git_Repo_Directory, delta_t,
Solar_Profile_Name_Input, Solar_Size_Input)
elif Model_Type_Input == "Storage Only" or Solar_Profile_Name_Input == "No Solar":
Solar_PV_Profile_Data = np.zeros(shape=Load_Profile_Data.shape)
# Import Retail Rate Data
# Call Import_Retail_Rate_Data function.
from switch_api.services.osemo.Code.Import_Retail_Rate_Data_SC2019 import Import_Retail_Rate_Data
[Retail_Rate_Master_Index, Retail_Rate_Effective_Date,
Volumetric_Rate_Data, Summer_Peak_DC, Summer_Part_Peak_DC, Summer_Noncoincident_DC,
Winter_Peak_DC, Winter_Part_Peak_DC, Winter_Noncoincident_DC,
Fixed_Per_Meter_Day_Charge, Fixed_Per_Meter_Month_Charge,
First_Summer_Month, Last_Summer_Month, Month_Data,
Summer_Peak_Binary_Data, Summer_Part_Peak_Binary_Data,
Winter_Peak_Binary_Data, Winter_Part_Peak_Binary_Data] = Import_Retail_Rate_Data(
Input_Output_Data_Directory_Location, OSESMO_Git_Repo_Directory,
delta_t, Retail_Rate_Name_Input)
Month_Data = Month_Data.astype(int)
Summer_Peak_Binary_Data = Summer_Peak_Binary_Data.astype(int)
Summer_Part_Peak_Binary_Data = Summer_Part_Peak_Binary_Data.astype(int)
Winter_Peak_Binary_Data = Winter_Peak_Binary_Data.astype(int)
Winter_Part_Peak_Binary_Data = Winter_Part_Peak_Binary_Data.astype(int)
# Import Marginal Emissions Rate Data Used as Forecast
# Call Import_Marginal_Emissions_Rate_Forecast_Data function.
# from Import_Marginal_Emissions_Rate_Forecast_Data import Import_Marginal_Emissions_Rate_Forecast_Data
Marginal_Emissions_Rate_Forecast_Data = np.zeros(shape=Load_Profile_Data.shape)
# Import Marginal Emissions Rate Data Used for Evaluation
# Call Import_Marginal_Emissions_Rate_Forecast_Data function.
# from Import_Marginal_Emissions_Rate_Evaluation_Data import Import_Marginal_Emissions_Rate_Evaluation_Data
Marginal_Emissions_Rate_Evaluation_Data = np.zeros(shape=Load_Profile_Data.shape)
# Import Carbon Adder Data
# Carbon Adder ($/kWh) = Marginal Emissions Rate (metric tons CO2/MWh) *
# Carbon Adder ($/metric ton) * (1 MWh/1000 kWh)
Carbon_Adder_Data = (Marginal_Emissions_Rate_Forecast_Data *
Carbon_Adder_Incentive_Value_Input) / 1000
# Import IOU-Proposed Charge and Discharge Hour Flag Vectors
if GHG_Reduction_Solution_Input == "IOU-Proposed Charge-Discharge Time Constraints":
from switch_api.services.osemo.Code.Import_IOU_Time_Constraint_Binary_Data import Import_IOU_Time_Constraint_Binary_Data
[IOU_Charge_Hour_Binary_Data, IOU_Discharge_Hour_Binary_Data] = Import_IOU_Time_Constraint_Binary_Data(
Input_Output_Data_Directory_Location,
OSESMO_Git_Repo_Directory, delta_t)
# Import PG&E-Proposed Charge, No-Charge, and Discharge Hour Flag Vectors
if GHG_Reduction_Solution_Input == "No-Charging Time Constraint" or GHG_Reduction_Solution_Input == "Charging and Discharging Time Constraints":
from switch_api.services.osemo.Code.Import_PGE_Time_Constraint_Binary_Data import Import_PGE_Time_Constraint_Binary_Data
[PGE_Charge_Hour_Binary_Data, PGE_No_Charge_Hour_Binary_Data, PGE_Discharge_Hour_Binary_Data] = Import_PGE_Time_Constraint_Binary_Data(
Input_Output_Data_Directory_Location, OSESMO_Git_Repo_Directory, delta_t)
# Import Utility Marginal Cost Data
# Marginal Costs are mapped to load profile location
# from Import_Utility_Marginal_Cost_Data import Import_Utility_Marginal_Cost_Data
Generation_Cost_Data = np.zeros(shape=Load_Profile_Data.shape)
Representative_Distribution_Cost_Data = np.zeros(shape=Load_Profile_Data.shape)
# Set Directory to Box Sync Folder
os.chdir(Input_Output_Data_Directory_Location)
## Iterate Through Months & Filter Data to Selected Month
# Initialize Blank Variables to store optimal decision variable values for
# all months
# Initialize Decision Variable Vectors
P_ES_in = np.array([])
P_ES_out = np.array([])
Ene_Lvl = np.array([])
P_max_NC = np.array([])
P_max_peak = np.array([])
P_max_part_peak = np.array([])
# Initialize Monthly Cost Variable Vectors
Fixed_Charge_Vector = np.array([])
NC_DC_Baseline_Vector = np.array([])
NC_DC_with_Solar_Only_Vector = np.array([])
NC_DC_with_Solar_and_Storage_Vector = np.array([])
CPK_DC_Baseline_Vector = np.array([])
CPK_DC_with_Solar_Only_Vector = np.array([])
CPK_DC_with_Solar_and_Storage_Vector = np.array([])
CPP_DC_Baseline_Vector = np.array([])
CPP_DC_with_Solar_Only_Vector = np.array([])
CPP_DC_with_Solar_and_Storage_Vector = np.array([])
Energy_Charge_Baseline_Vector = np.array([])
Energy_Charge_with_Solar_Only_Vector = np.array([])
Energy_Charge_with_Solar_and_Storage_Vector = np.array([])
Cycles_Vector = np.array([])
Cycling_Penalty_Vector = np.array([])
for Month_Iter in range(1,13): # Iterate through all months
# Filter Load Profile Data to Selected Month
Load_Profile_Data_Month = Load_Profile_Data[Month_Data == Month_Iter]
# Filter PV Production Profile Data to Selected Month
Solar_PV_Profile_Data_Month = Solar_PV_Profile_Data[Month_Data == Month_Iter]
# Filter Volumetric Rate Data to Selected Month
Volumetric_Rate_Data_Month = Volumetric_Rate_Data[Month_Data == Month_Iter]
# Filter Marginal Emissions Data to Selected Month
Marginal_Emissions_Rate_Forecast_Data_Month = Marginal_Emissions_Rate_Forecast_Data[Month_Data == Month_Iter]
# Filter Carbon Adder Data to Selected Month
Carbon_Adder_Data_Month = Carbon_Adder_Data[Month_Data == Month_Iter]
# Set Demand Charge Values Based on Month
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
Peak_DC = Summer_Peak_DC
Part_Peak_DC = Summer_Part_Peak_DC
Noncoincident_DC = Summer_Noncoincident_DC
else:
Peak_DC = Winter_Peak_DC
Part_Peak_DC = Winter_Part_Peak_DC
Noncoincident_DC = Winter_Noncoincident_DC
# Filter Peak and Part-Peak Binary Data to Selected Month
if Summer_Peak_DC > 0:
Summer_Peak_Binary_Data_Month = Summer_Peak_Binary_Data[Month_Data == Month_Iter]
if Summer_Part_Peak_DC > 0:
Summer_Part_Peak_Binary_Data_Month = Summer_Part_Peak_Binary_Data[Month_Data == Month_Iter]
if Winter_Peak_DC > 0:
Winter_Peak_Binary_Data_Month = Winter_Peak_Binary_Data[Month_Data == Month_Iter]
if Winter_Part_Peak_DC > 0:
Winter_Part_Peak_Binary_Data_Month = Winter_Part_Peak_Binary_Data[Month_Data == Month_Iter]
# Filter PG&E-Proposed Charge and Discharge Hour Binary Data to Selected Month
if GHG_Reduction_Solution_Input == "No-Charging Time Constraint" or \
GHG_Reduction_Solution_Input == "Charging and Discharging Time Constraints":
PGE_Charge_Hour_Binary_Data_Month = PGE_Charge_Hour_Binary_Data[Month_Data == Month_Iter]
PGE_No_Charge_Hour_Binary_Data_Month = PGE_No_Charge_Hour_Binary_Data[Month_Data == Month_Iter]
PGE_Discharge_Hour_Binary_Data_Month = PGE_Discharge_Hour_Binary_Data[Month_Data == Month_Iter]
# Filter IOU-Proposed Charge and Discharge Hour Binary Data to Selected Month
if GHG_Reduction_Solution_Input == "IOU-Proposed Charge-Discharge Time Constraints":
IOU_Charge_Hour_Binary_Data_Month = IOU_Charge_Hour_Binary_Data[Month_Data == Month_Iter]
IOU_Discharge_Hour_Binary_Data_Month = IOU_Discharge_Hour_Binary_Data[Month_Data == Month_Iter]
## Add "Padding" to Every Month of Data
# Don't pad Month 12, because the final state of charge is constrained
if Month_Iter in range(1, 12):
Load_Profile_Data_Month_Padded = np.concatenate((Load_Profile_Data_Month,
Load_Profile_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
Solar_PV_Profile_Data_Month_Padded = np.concatenate((Solar_PV_Profile_Data_Month,
Solar_PV_Profile_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
Volumetric_Rate_Data_Month_Padded = np.concatenate((Volumetric_Rate_Data_Month,
Volumetric_Rate_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
Marginal_Emissions_Rate_Data_Month_Padded = np.concatenate((Marginal_Emissions_Rate_Forecast_Data_Month,
Marginal_Emissions_Rate_Forecast_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
Carbon_Adder_Data_Month_Padded = np.concatenate((Carbon_Adder_Data_Month,
Carbon_Adder_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
if Summer_Peak_DC > 0:
Summer_Peak_Binary_Data_Month_Padded = np.concatenate((Summer_Peak_Binary_Data_Month,
Summer_Peak_Binary_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
if Summer_Part_Peak_DC > 0:
Summer_Part_Peak_Binary_Data_Month_Padded = np.concatenate((Summer_Part_Peak_Binary_Data_Month,
Summer_Part_Peak_Binary_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
if Winter_Peak_DC > 0:
Winter_Peak_Binary_Data_Month_Padded = np.concatenate((Winter_Peak_Binary_Data_Month,
Winter_Peak_Binary_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
if Winter_Part_Peak_DC > 0:
Winter_Part_Peak_Binary_Data_Month_Padded = np.concatenate((Winter_Part_Peak_Binary_Data_Month,
Winter_Part_Peak_Binary_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
if GHG_Reduction_Solution_Input == "No-Charging Time Constraint" or \
GHG_Reduction_Solution_Input == "Charging and Discharging Time Constraints":
PGE_Charge_Hour_Binary_Data_Month_Padded = np.concatenate((PGE_Charge_Hour_Binary_Data_Month,
PGE_Charge_Hour_Binary_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
PGE_No_Charge_Hour_Binary_Data_Month_Padded = np.concatenate((PGE_No_Charge_Hour_Binary_Data_Month,
PGE_No_Charge_Hour_Binary_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
PGE_Discharge_Hour_Binary_Data_Month_Padded = np.concatenate((PGE_Discharge_Hour_Binary_Data_Month,
PGE_Discharge_Hour_Binary_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
if GHG_Reduction_Solution_Input == "IOU-Proposed Charge-Discharge Time Constraints":
IOU_Charge_Hour_Binary_Data_Month_Padded = np.concatenate((IOU_Charge_Hour_Binary_Data_Month,
IOU_Charge_Hour_Binary_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
IOU_Discharge_Hour_Binary_Data_Month_Padded = np.concatenate((IOU_Discharge_Hour_Binary_Data_Month,
IOU_Discharge_Hour_Binary_Data_Month[-(End_of_Month_Padding_Days * 24 * int(1 / delta_t)):]))
elif Month_Iter == 12:
Load_Profile_Data_Month_Padded = Load_Profile_Data_Month
# Don't Pad PV Production Profile Data
Solar_PV_Profile_Data_Month_Padded = Solar_PV_Profile_Data_Month
Volumetric_Rate_Data_Month_Padded = Volumetric_Rate_Data_Month
# Don't Pad Marginal Emissions Data
Marginal_Emissions_Rate_Data_Month_Padded = Marginal_Emissions_Rate_Forecast_Data_Month
Carbon_Adder_Data_Month_Padded = Carbon_Adder_Data_Month
# Don't Pad Peak and Part-Peak Binary Data
if Summer_Peak_DC > 0:
Summer_Peak_Binary_Data_Month_Padded = Summer_Peak_Binary_Data_Month
if Summer_Part_Peak_DC > 0:
Summer_Part_Peak_Binary_Data_Month_Padded = Summer_Part_Peak_Binary_Data_Month
if Winter_Peak_DC > 0:
Winter_Peak_Binary_Data_Month_Padded = Winter_Peak_Binary_Data_Month
if Winter_Part_Peak_DC > 0:
Winter_Part_Peak_Binary_Data_Month_Padded = Winter_Part_Peak_Binary_Data_Month
if GHG_Reduction_Solution_Input == "No-Charging Time Constraint" or \
GHG_Reduction_Solution_Input == "Charging and Discharging Time Constraints":
PGE_Charge_Hour_Binary_Data_Month_Padded = PGE_Charge_Hour_Binary_Data_Month
PGE_No_Charge_Hour_Binary_Data_Month_Padded = PGE_No_Charge_Hour_Binary_Data_Month
PGE_Discharge_Hour_Binary_Data_Month_Padded = PGE_Discharge_Hour_Binary_Data_Month
# Don't Pad IOU-Proposed Charge and Discharge Hour Binary Data
if GHG_Reduction_Solution_Input == "IOU-Proposed Charge-Discharge Time Constraints":
IOU_Charge_Hour_Binary_Data_Month_Padded = IOU_Charge_Hour_Binary_Data_Month
IOU_Discharge_Hour_Binary_Data_Month_Padded = IOU_Discharge_Hour_Binary_Data_Month
= len(Load_Profile_Data_Month_Padded)
all_tsteps = np.array(list(range(0, numtsteps)))
c_Month_Bill_Only = np.concatenate(((Volumetric_Rate_Data_Month_Padded * delta_t),
(-Volumetric_Rate_Data_Month_Padded * delta_t),
np.zeros((numtsteps,)),
[Noncoincident_DC],
[Peak_DC],
[Part_Peak_DC]))
c_Month_Carbon_Only = np.concatenate(((Carbon_Adder_Data_Month_Padded * delta_t),
(-Carbon_Adder_Data_Month_Padded * delta_t),
np.zeros(numtsteps,),
[0.],
[0.],
[0.]))
c_Month_Degradation_Only = np.concatenate((
(((Eff_c * cycle_pen) / (2. * Total_Storage_Capacity)) * delta_t) * np.ones(numtsteps,),
((cycle_pen / (Eff_d * 2. * Total_Storage_Capacity)) * delta_t) * np.ones(numtsteps,),
np.zeros(numtsteps,),
[0.],
[0.],
[0.]))
if Storage_Control_Algorithm_Name == "OSESMO Economic Dispatch":
c_Month_Solar_Self_Supply = np.concatenate((np.zeros(numtsteps,),
np.zeros(numtsteps,),
np.zeros(numtsteps,),
[0.],
[0.],
[0.]))
elif Storage_Control_Algorithm_Name == "OSESMO Non-Economic Solar Self-Supply":
c_Month_Solar_Self_Supply = np.concatenate((-np.ones(numtsteps,),
np.zeros(numtsteps,),
np.zeros(numtsteps,),
[0.],
[0.],
[0.]))
c_Month = c_Month_Bill_Only + c_Month_Carbon_Only + c_Month_Degradation_Only + c_Month_Solar_Self_Supply
length_x = len(c_Month)
c_Month = matrix(c_Month, tc = 'd')
A_E = sparse(matrix(0., (numtsteps - 1, length_x), tc = 'd'), tc = 'd')
b_E = sparse(matrix(0., (numtsteps - 1, 1), tc = 'd'), tc = 'd')
for n in range(0, numtsteps - 1):
A_E[n, n + (2 * numtsteps)] = 1.
A_E[n, n + (2 * numtsteps) + 1] = -1.
A_E[n, n] = Eff_c * delta_t
A_E[n, n + numtsteps] = (-1 / Eff_d) * delta_t
A_Month = sparse([A_E,
-A_E], tc = 'd')
b_Month = sparse([b_E,
-b_E], tc = 'd')
A_P_ES_in = sparse(matrix(0., (numtsteps, length_x), tc = 'd'), tc = 'd')
for n in range(0, numtsteps):
A_P_ES_in[n, n] = -1.
A_Month = sparse([A_Month,
A_P_ES_in,
-A_P_ES_in], tc = 'd')
b_Month = sparse([b_Month,
sparse(matrix(0., (numtsteps, 1), tc = 'd'), tc = 'd'),
sparse(matrix(Storage_Power_Rating_Input, (numtsteps, 1), tc = 'd'), tc = 'd')], tc = 'd')
A_P_ES_out = sparse(matrix(0., (numtsteps, length_x), tc = 'd'), tc = 'd')
for n in range(0, numtsteps):
A_P_ES_out[n, n + numtsteps] = -1.
A_Month = sparse([A_Month,
A_P_ES_out,
-A_P_ES_out], tc = 'd')
b_Month = sparse([b_Month,
sparse(matrix(0., (numtsteps, 1), tc = 'd'), tc = 'd'),
sparse(matrix(Storage_Power_Rating_Input, (numtsteps, 1), tc = 'd'), tc = 'd')], tc = 'd')
A_Ene_Lvl_min = sparse(matrix(0., (numtsteps, length_x), tc = 'd'), tc = 'd')
b_Ene_Lvl_min = sparse(matrix(0., (numtsteps, 1), tc = 'd'), tc = 'd')
for n in range(0, numtsteps):
A_Ene_Lvl_min[n, n + (2 * numtsteps)] = -1.
A_Month = sparse([A_Month,
A_Ene_Lvl_min], tc = 'd')
b_Month = sparse([b_Month,
b_Ene_Lvl_min], tc = 'd')
A_Ene_Lvl_max = sparse(matrix(0., (numtsteps, length_x), tc = 'd'), tc = 'd')
b_Ene_Lvl_max = matrix(Usable_Storage_Capacity * np.ones((numtsteps,1)), tc = 'd')
for n in range(0, numtsteps):
A_Ene_Lvl_max[n, n + (2 * numtsteps)] = 1.
A_Month = sparse([A_Month,
A_Ene_Lvl_max], tc = 'd')
b_Month = sparse([b_Month,
b_Ene_Lvl_max], tc = 'd')
A_Ene_Lvl_0 = sparse(matrix(0., (1, length_x), tc = 'd'), tc = 'd')
A_Ene_Lvl_0[0, (2 * numtsteps)] = 1.
if Month_Iter == 1:
b_Ene_Lvl_0 = matrix(Initial_Final_SOC * Usable_Storage_Capacity_Input, tc = 'd')
elif Month_Iter in range(2, (12 + 1)):
b_Ene_Lvl_0 = matrix(Next_Month_Initial_Energy_Level, tc = 'd')
A_Month = sparse([A_Month,
A_Ene_Lvl_0,
-A_Ene_Lvl_0], tc = 'd')
b_Month = sparse([b_Month,
b_Ene_Lvl_0,
-b_Ene_Lvl_0], tc = 'd')
A_Ene_Lvl_N = sparse(matrix(0., (1, length_x), tc = 'd'), tc = 'd')
A_Ene_Lvl_N[0, (3 * numtsteps) - 1] = 1.
b_Ene_Lvl_N = matrix(Initial_Final_SOC * Usable_Storage_Capacity_Input, tc = 'd')
A_Month = sparse([A_Month,
A_Ene_Lvl_N,
-A_Ene_Lvl_N], tc = 'd')
b_Month = sparse([b_Month,
b_Ene_Lvl_N,
-b_Ene_Lvl_N], tc = 'd')
if Noncoincident_DC > 0:
A_NC_DC = sparse(matrix(0., (numtsteps, length_x), tc = 'd'), tc = 'd')
b_NC_DC = matrix(-Load_Profile_Data_Month_Padded + Solar_PV_Profile_Data_Month_Padded, tc = 'd')
for n in range(0, numtsteps):
A_NC_DC[n, n] = 1.
A_NC_DC[n, n + numtsteps] = -1.
A_NC_DC[n, (3 * numtsteps)] = -1.
A_Month = sparse([A_Month,
A_NC_DC], tc = 'd')
b_Month = sparse([b_Month,
b_NC_DC], tc = 'd')
A_NC_DC_gt0 = sparse(matrix(0., (1, length_x), tc = 'd'), tc = 'd')
A_NC_DC_gt0[0, (3 * numtsteps)] = -1.
b_NC_DC_gt0 = matrix(0., tc = 'd')
A_Month = sparse([A_Month,
A_NC_DC_gt0], tc = 'd')
b_Month = sparse([b_Month,
b_NC_DC_gt0], tc = 'd')
if Peak_DC > 0:
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
Peak_Indices = all_tsteps[Summer_Peak_Binary_Data_Month_Padded == 1]
A_CPK_DC = sparse(matrix(0., (sum(Summer_Peak_Binary_Data_Month_Padded), length_x), tc = 'd'), tc = 'd')
b_CPK_DC = matrix(-Load_Profile_Data_Month_Padded[Summer_Peak_Binary_Data_Month_Padded == 1] + \
Solar_PV_Profile_Data_Month_Padded[Summer_Peak_Binary_Data_Month_Padded == 1], tc = 'd')
else:
Peak_Indices = all_tsteps[Winter_Peak_Binary_Data_Month_Padded == 1]
A_CPK_DC = sparse(matrix(0., (sum(Winter_Peak_Binary_Data_Month_Padded), length_x), tc = 'd'), tc = 'd')
b_CPK_DC = matrix(-Load_Profile_Data_Month_Padded[Winter_Peak_Binary_Data_Month_Padded == 1] + \
Solar_PV_Profile_Data_Month_Padded[Winter_Peak_Binary_Data_Month_Padded == 1], tc = 'd')
for n in range(0, len(Peak_Indices)):
Peak_Index_n = int(Peak_Indices[n])
A_CPK_DC[n, Peak_Index_n] = 1.
A_CPK_DC[n, numtsteps + Peak_Index_n] = -1.
A_CPK_DC[n, (3 * numtsteps) + 1] = -1.
A_Month = sparse([A_Month,
A_CPK_DC], tc = 'd')
b_Month = sparse([b_Month,
b_CPK_DC], tc = 'd')
A_CPK_DC_gt0 = sparse(matrix(0., (1, length_x), tc = 'd'), tc = 'd')
A_CPK_DC_gt0[0, (3 * numtsteps) + 1] = -1.
b_CPK_DC_gt0 = matrix(0., tc = 'd')
A_Month = sparse([A_Month,
A_CPK_DC_gt0], tc = 'd')
b_Month = sparse([b_Month,
b_CPK_DC_gt0], tc = 'd')
if Part_Peak_DC > 0:
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
Part_Peak_Indices = all_tsteps[Summer_Part_Peak_Binary_Data_Month_Padded == 1]
A_CPP_DC = sparse(matrix(0., (sum(Summer_Part_Peak_Binary_Data_Month_Padded), length_x), tc = 'd'), tc = 'd')
b_CPP_DC = matrix(-Load_Profile_Data_Month_Padded[Summer_Part_Peak_Binary_Data_Month_Padded == 1] + \
Solar_PV_Profile_Data_Month_Padded[Summer_Part_Peak_Binary_Data_Month_Padded == 1], tc = 'd')
else:
Part_Peak_Indices = all_tsteps[Winter_Part_Peak_Binary_Data_Month_Padded == 1]
A_CPP_DC = sparse(matrix(0., (sum(Winter_Part_Peak_Binary_Data_Month_Padded), length_x), tc = 'd'), tc = 'd')
b_CPP_DC = matrix(-Load_Profile_Data_Month_Padded[Winter_Part_Peak_Binary_Data_Month_Padded == 1] + \
Solar_PV_Profile_Data_Month_Padded[Winter_Part_Peak_Binary_Data_Month_Padded == 1], tc = 'd')
for n in range(0, len(Part_Peak_Indices)):
Part_Peak_Index_n = int(Part_Peak_Indices[n])
A_CPP_DC[n, Part_Peak_Index_n] = 1.
A_CPP_DC[n, numtsteps + Part_Peak_Index_n] = -1.
A_CPP_DC[n, (3 * numtsteps) + 2] = -1.
A_Month = sparse([A_Month,
A_CPP_DC], tc = 'd')
b_Month = sparse([b_Month,
b_CPP_DC], tc = 'd')
A_CPP_DC_gt0 = sparse(matrix(0., (1, length_x), tc = 'd'), tc = 'd')
A_CPP_DC_gt0[0, (3 * numtsteps) + 2] = -1.
b_CPP_DC_gt0 = matrix(0., tc = 'd')
A_Month = sparse([A_Month,
A_CPP_DC_gt0], tc = 'd')
b_Month = sparse([b_Month,
b_CPP_DC_gt0], tc = 'd')
# <= 0.
if Model_Type_Input == "Solar Plus Storage" and Solar_Profile_Name_Input != "No Solar" and \
Solar_Size_Input > 0 and ITC_Constraint_Input == 1:
Solar_PV_Profile_Data_Month_Padded_Nonnegative = Solar_PV_Profile_Data_Month_Padded
Solar_PV_Profile_Data_Month_Padded_Nonnegative[Solar_PV_Profile_Data_Month_Padded_Nonnegative < 0] = 0.
A_ITC = sparse(matrix(0., (numtsteps, length_x)))
b_ITC = matrix(Solar_PV_Profile_Data_Month_Padded_Nonnegative, tc = 'd')
for n in range(0, numtsteps): # Iterates from Index 0 to Index (numtsteps-1) - equivalent to Timesteps 1 to (numtsteps)
A_ITC[n, n] = 1.
A_Month = sparse([A_Month,
A_ITC])
b_Month = sparse([b_Month,
b_ITC], tc = 'd')
## Optional Constraint - No-Charging Time Constraint
if GHG_Reduction_Solution_Input == "No-Charging Time Constraint":
# PG&E has suggested a set of time-based constraints on storage charging.
# One of these constraints is that storage would not be allowed to discharge between 4:00 pm and 9:00 pm.
# No-Charging Constraint
# Charging power in each timestep is set equal to 0 between 4:00 pm and 9:00 pm.
# Because charging power is constrained to be greater than
# zero, setting the sum of all charging power timesteps to 0 (a
# single constraint across all timesteps) ensures that all values will be zero
# without needing to set a constraint for each timestep.
# Sum of all P_ES_in(t) between 4:00 and 9:00 = 0
# Because of nonnegative constraint on P_ES_in(t), this is
# equivalent to a set of numtsteps constraints stating that
# all P_ES_in(t) between 4:00 and 9:00 = 0 for each timestep.
A_PGE_No_Charge = sparse(matrix(0., (1, length_x), tc = 'd'), tc = 'd')
PGE_No_Charge_Hour_Indices = all_tsteps[PGE_No_Charge_Hour_Binary_Data_Month_Padded == 1]
# Sum of all P_ES_in(t) between 4:00 and 9:00
A_PGE_No_Charge[0, PGE_No_Charge_Hour_Indices] = 1.
b_PGE_No_Charge = matrix(0., tc = 'd')
A_Month = sparse([A_Month,
A_PGE_No_Charge], tc = 'd')
b_Month = sparse([b_Month,
b_PGE_No_Charge], tc = 'd')
## Optional Constraint - Charging and Discharging Time Constraints
if GHG_Reduction_Solution_Input == "Charging and Discharging Time Constraints":
# PG&E has suggested a set of time-based constraints on storage charging.
# At least 50% of total charging would need to occur between 9:00 am and 2:00 pm,
# and at least 50% of total discharging would need to occur between 4:00 pm and 9:00 pm.
# In addition, storage would not be allowed to discharge between 4:00 pm and 9:00 pm.
# Derivation of charging constraint in standard linear form Ax <= 0:
# Sum of all P_ES_in(t) between 9:00 and 2:00/sum of all P_ES_in(t) >= 0.5
# Sum of all P_ES_in(t) between 9:00 and 2:00 >= 0.5 * sum of all P_ES_in(t)
# 0 >= 0.5 * sum of all P_ES_in(t) - sum of all P_ES_in(t) between 9:00 and 2:00
# 0.5 * sum of all P_ES_in(t) - sum of all P_ES_in(t) between 9:00 and 2:00 <= 0
# 0.5 * sum of all P_ES_in(t) not between 9:00 and 2:00 - 0.5 * sum of all P_ES_in(t)
# between 9:00 and 2:00 <= 0.
# Charging Constraint
A_PGE_Charge = sparse(matrix(0., (1, length_x), tc = 'd'), tc = 'd')
# 0.5 * sum of all P_ES_in(t)
A_PGE_Charge[0, range(0, numtsteps)] = 0.5
PGE_Charge_Hour_Indices = all_tsteps[PGE_Charge_Hour_Binary_Data_Month_Padded == 1]
# -0.5 * sum of all P_ES_in(t) between 12:00 and 4:00
A_PGE_Charge[0, PGE_Charge_Hour_Indices] = -0.5
b_PGE_Charge = matrix(0., tc = 'd')
A_Month = sparse([A_Month, A_PGE_Charge], tc = 'd')
b_Month = sparse([b_Month, b_PGE_Charge], tc = 'd')
# No-Charging Constraint
# Charging power in each timestep is set equal to 0 between 4:00 pm and 9:00 pm.
# Because charging power is constrained to be greater than
# zero, setting the sum of all charging power timesteps to 0 (a
# single constraint across all timesteps) ensures that all values will be zero
# without needing to set a constraint for each timestep.
# Sum of all P_ES_in(t) between 4:00 and 9:00 = 0
# Because of nonnegative constraint on P_ES_in(t), this is
# equivalent to a set of numtsteps constraints stating that
# all P_ES_in(t) between 4:00 and 9:00 = 0 for each timestep.
A_PGE_No_Charge = sparse(matrix(0., (1, length_x), tc = 'd'), tc = 'd')
PGE_No_Charge_Hour_Indices = all_tsteps[PGE_No_Charge_Hour_Binary_Data_Month_Padded == 1]
# Sum of all P_ES_in(t) between 4:00 and 9:00
A_PGE_No_Charge[0, PGE_No_Charge_Hour_Indices] = 1.
b_PGE_No_Charge = matrix(0., tc = 'd')
A_Month = sparse([A_Month,
A_PGE_No_Charge], tc = 'd')
b_Month = sparse([b_Month,
b_PGE_No_Charge], tc = 'd')
# Derivation of discharging constraint in standard linear form Ax <= 0:
# Sum of all P_ES_out(t) between 4:00 and 9:00/sum of all P_ES_out(t) >= 0.5
# Sum of all P_ES_out(t) between 4:00 and 9:00 >= 0.5 * sum of all P_ES_out(t)
# 0 >= 0.5 * sum of all P_ES_out(t) - sum of all P_ES_out(t) between 4:00 and 9:00
# 0.5 * sum of all P_ES_out(t) - sum of all P_ES_out(t) between 4:00 and 9:00 <= 0
# 0.5 * sum of all P_ES_out(t) not between 4:00 and 9:00 - 0.5 * sum of all P_ES_out(t)
# between 4:00 and 9:00 <= 0.
# Discharging Constraint
A_PGE_Discharge = sparse(matrix(0., (1, length_x), tc = 'd'), tc = 'd')
# 0.5 * sum of all P_ES_out(t)
A_PGE_Discharge[0, range(numtsteps, 2 * numtsteps)] = 0.5
PGE_Discharge_Hour_Indices = all_tsteps[PGE_Discharge_Hour_Binary_Data_Month_Padded == 1]
# -0.5 * sum of all P_ES_out(t) between 12:00 and 4:00
A_PGE_Discharge[0, numtsteps + PGE_Discharge_Hour_Indices] = -0.5
b_PGE_Discharge = matrix(0., tc = 'd')
A_Month = sparse([A_Month,
A_PGE_Discharge], tc = 'd')
b_Month = sparse([b_Month,
b_PGE_Discharge], tc = 'd')
## Optional Constraint - Investor-Owned-Utility-Proposed Charge-Discharge Hours
if GHG_Reduction_Solution_Input == "IOU-Proposed Charge-Discharge Time Constraints":
# The Investor-Owned Utilities have suggested constraints on charging in particular hours
# as a proposed method for reducing greenhouse gas emissions associated with storage dispatch.
# Specifically, at least 50% of total charging would need to occur between 12:00 noon and 4:00 pm,
# and at least 50% of total discharging would need to occur between 4:00 pm and 9:00 pm.
# Derivation of charging constraint in standard linear form Ax <= 0:
# Sum of all P_ES_in(t) between 12:00 and 4:00/sum of all P_ES_in(t) >= 0.5
# Sum of all P_ES_in(t) between 12:00 and 4:00 >= 0.5 * sum of all P_ES_in(t)
# 0 >= 0.5 * sum of all P_ES_in(t) - sum of all P_ES_in(t) between 12:00 and 4:00
# 0.5 * sum of all P_ES_in(t) - sum of all P_ES_in(t) between 12:00 and 4:00 <= 0
# 0.5 * sum of all P_ES_in(t) not between 12:00 and 4:00 - 0.5 * sum of all P_ES_in(t)
# between 12:00 and 4:00 <= 0.
# Charging Constraint
A_IOU_Charge = sparse(matrix(0., (1, length_x), tc = 'd'), tc = 'd')
# 0.5 * sum of all P_ES_in(t)
A_IOU_Charge[1, range(0, numtsteps)] = 0.5
IOU_Charge_Hour_Indices = all_tsteps[IOU_Charge_Hour_Binary_Data_Month_Padded == 1]
# -0.5 * sum of all P_ES_in(t) between 12:00 and 4:00
A_IOU_Charge[0, IOU_Charge_Hour_Indices] = -0.5
b_IOU_Charge = matrix(0., tc = 'd')
A_Month = sparse([A_Month,
A_IOU_Charge], tc = 'd')
b_Month = sparse([b_Month,
b_IOU_Charge], tc = 'd')
# Derivation of discharging constraint in standard linear form Ax <= 0:
# Sum of all P_ES_out(t) between 4:00 and 9:00/sum of all P_ES_out(t) >= 0.5
# Sum of all P_ES_out(t) between 4:00 and 9:00 >= 0.5 * sum of all P_ES_out(t)
# 0 >= 0.5 * sum of all P_ES_out(t) - sum of all P_ES_out(t) between 4:00 and 9:00
# 0.5 * sum of all P_ES_out(t) - sum of all P_ES_out(t) between 4:00 and 9:00 <= 0
# 0.5 * sum of all P_ES_out(t) not between 4:00 and 9:00 - 0.5 * sum of all P_ES_out(t)
# between 4:00 and 9:00 <= 0.
# Discharging Constraint
A_IOU_Discharge = sparse(matrix(0., (1, length_x)))
# 0.5 * sum of all P_ES_out(t)
A_IOU_Discharge[0, range(numtsteps, 2 * numtsteps)] = 0.5
IOU_Discharge_Hour_Indices = all_tsteps[IOU_Discharge_Hour_Binary_Data_Month_Padded == 1]
# -0.5 * sum of all P_ES_out(t) between 12:00 and 4:00
A_IOU_Discharge[0, numtsteps + IOU_Discharge_Hour_Indices] = -0.5
b_IOU_Discharge = matrix(0., tc = 'd')
A_Month = sparse([A_Month,
A_IOU_Discharge], tc = 'd')
b_Month = sparse([b_Month,
b_IOU_Discharge], tc = 'd')
## Optional Constraint - Non-Positive GHG Emissions Impact
# Note - the system is following the forecast signal to obey
# this constraint, not the evaluation signal. It may be necessary
# to adjust this constraint to aim for a negative GHG impact
# based on the forecast signal, in order to achieve a non-positive
# GHG impact as measured by the evaluation signal.
if GHG_Reduction_Solution_Input == "Non-Positive GHG Constraint":
# The sum of the net battery charge/discharge load in each
# timestep, multiplied by the marginal emissions rate in each
# timestep, must be less than or equal to 0.
# A_Non_Positive_GHG is similar to c_Month_Carbon_Only,
# but with Marginal Emissions Rate Data instead of Carbon Adder Data and transposed.
A_Non_Positive_GHG = matrix(np.concatenate((np.reshape(Marginal_Emissions_Rate_Data_Month_Padded * delta_t, (1, len(Marginal_Emissions_Rate_Data_Month_Padded))), \
np.reshape(-Marginal_Emissions_Rate_Data_Month_Padded * delta_t, (1, len(Marginal_Emissions_Rate_Data_Month_Padded))), \
np.zeros((1, numtsteps)), \
np.reshape(np.array([0., 0., 0.]), (1, 3))), \
axis = 1))
b_Non_Positive_GHG = matrix(0., tc = 'd')
A_Month = sparse([A_Month, A_Non_Positive_GHG], tc = 'd')
b_Month = sparse([b_Month, b_Non_Positive_GHG], tc = 'd')
## Optional Constraint - Equivalent Cycling Constraint
# Note: due to the OSESMO model structure, the annual cycling requirement
# must be converted to an equivalent monthly cycling requirement.
if Equivalent_Cycling_Constraint_Input > 0:
SGIP_Monthly_Cycling_Requirement = Equivalent_Cycling_Constraint_Input * \
(len(Load_Profile_Data_Month_Padded) / len(Load_Profile_Data))
# Formula for equivalent cycles is identical to the one used to calculate Cycles_Month:
# Equivalent Cycles = sum((P_ES_in(t) * (((Eff_c)/(2 * Size_ES)) * delta_t)) + \
# (P_ES_out(t) * ((1/(Eff_d * 2 * Size_ES)) * delta_t)))
# Equivalent Cycles >= SGIP_Monthly_Cycling Requirement
# To convert to standard linear program form, multiply both sides by -1.
# -Equivalent Cycles <= -SGIP_Monthly_Cycling_Requirement
A_Equivalent_Cycles = sparse(matrix(0., (1, length_x), tc = 'd'), tc = 'd')
# sum of all P_ES_in(t) * (((Eff_c)/(2 * Size_ES)) * delta_t)
A_Equivalent_Cycles[0, range(0, numtsteps)] = -(((Eff_c) / (2 * Total_Storage_Capacity)) * delta_t)
# sum of all P_ES_out(t) * ((1/(Eff_d * 2 * Size_ES)) * delta_t)
A_Equivalent_Cycles[0, range(numtsteps, 2 * numtsteps)] = -((1 / (Eff_d * 2 * Total_Storage_Capacity)) * delta_t)
b_Equivalent_Cycles = matrix(-SGIP_Monthly_Cycling_Requirement, tc = 'd')
A_Month = sparse([A_Month,
A_Equivalent_Cycles], tc = 'd')
b_Month = sparse([b_Month,
b_Equivalent_Cycles], tc = 'd')
## Optional Constraint - Operational/SGIP Round-Trip Efficiency Constraint
# Note: due to the OSESMO model structure, the annual RTE requirement
# must be converted to an equivalent monthly RTE requirement.
if Annual_RTE_Constraint_Input > 0:
# If it's impossible for the storage system to achieve the RTE requirement
if (Eff_c * Eff_d * Storage_Power_Rating_Input) / (
Storage_Power_Rating_Input + Parasitic_Storage_Load) < Annual_RTE_Constraint_Input:
print(['No solution - could not achieve SGIP RTE requirement' \
' with the provided nameplate efficiency and auxiliary storage load values.'])
# an average RTE of at least 66.5% over ten years (equivalent to a
# first-year RTE of 69.6%) in order to qualify for SGIP incentive
# payments." (Stem, Inc.'s Petition for Modification of Decision 15-11-027, pg. 2)
# Operational RTE Percent >= 0.696
# (sum(P_ES_out) * delta_t)/((sum(P_ES_in) * delta_t) + (sum(Auxiliary_Storage_Load) * delta_t) >= 0.696
# (sum(P_ES_out) * delta_t) >= 0.696 * (sum(P_ES_in) * delta_t) + (sum(Auxiliary_Storage_Load) * delta_t)
# To convert to standard linear program form, multiply both sides by -1.
# -(sum(P_ES_out) * delta_t) <= -0.696 * (sum(P_ES_in) * delta_t) -(sum(Auxiliary_Storage_Load) * delta_t)
# -(sum(P_ES_out) * delta_t) + 0.696 * (sum(P_ES_in) * delta_t) <= -(sum(Auxiliary_Storage_Load) * delta_t)
# 0.696 * (sum(P_ES_in) * delta_t) -(sum(P_ES_out) * delta_t) <= -(sum(Auxiliary_Storage_Load) * delta_t)
A_SGIP_RTE = sparse(matrix(0., (1, length_x), tc = 'd'), tc = 'd')
# sum of all (P_ES_in(t) * (0.696 * delta_t)
A_SGIP_RTE[0, range(0, numtsteps)] = (Annual_RTE_Constraint_Input * delta_t)
# sum of all P_ES_out(t) * -delta_t
A_SGIP_RTE[0, range(numtsteps, 2 * numtsteps)] = -delta_t
# (sum(Auxiliary_Storage_Load) * delta_t)
b_SGIP_RTE = matrix(-((numtsteps * Parasitic_Storage_Load) * delta_t), tc = 'd')
A_Month = sparse([A_Month,
A_SGIP_RTE], tc = 'd')
b_Month = sparse([b_Month,
b_SGIP_RTE], tc = 'd')
## Optional Constraint - No-Export Constraint
# This constraint prevents the standalone energy-storage systems from
# backfeeding power from the storage system onto the distribution grid.
# Solar-plus storage systems are allowed to export to the grid.
if Model_Type_Input == "Storage Only":
# P_load(t) + P_ES_in(t) - P_ES_out(t) >= 0
# -P_ES_in(t) + P_ES_out(t) <= P_load(t)
A_No_Export = sparse(matrix(0., (numtsteps, length_x), tc = 'd'), tc = 'd')
b_No_Export = matrix(Load_Profile_Data_Month_Padded, tc = 'd')
for n in range(0, numtsteps): # Iterates from Index 0 to Index (numtsteps-1) - equivalent to Timesteps 1 to (numtsteps)
A_No_Export[n, n] = -1.
A_No_Export[n, n + numtsteps] = 1.
A_Month = sparse([A_Month,
A_No_Export], tc = 'd')
b_Month = sparse([b_Month,
b_No_Export], tc = 'd')
## Optional Constraint - Solar Self-Supply
# In the Economic Dispatch mode, this constraint is not necessary -
# the presence of a positive cost on battery charging ensures that
# simultaneous charging and discharging does not occur.
# However, in the Non-Economic Solar Self-Consumption, which negative
# costs on both charging and discharging, the battery charges and
# discharges simultaneously so as to minimize total cost.
# This constraint ensures that simultaneous charging and
# discharging does not occur, and ensures that the storage system
# only charges when there is excess solar power (net load is negative)
# and discharges when net load is positive.
if Storage_Control_Algorithm_Name == "OSESMO Non-Economic Solar Self-Supply":
# P_ES_in <= Non-negative(P_PV - P_Load)
Excess_Solar_Profile_Data_Month_Padded = Solar_PV_Profile_Data_Month_Padded - Load_Profile_Data_Month_Padded
Excess_Solar_Profile_Data_Month_Padded[Excess_Solar_Profile_Data_Month_Padded < 0] = 0
A_Self_Supply_Charge = sparse(matrix(0., (numtsteps, length_x), tc = 'd'), tc = 'd')
b_Self_Supply_Charge = matrix(Excess_Solar_Profile_Data_Month_Padded, tc = 'd')
for n in range(0, numtsteps): # Iterates from Index 0 to Index (numtsteps-1) - equivalent to Timesteps 1 to (numtsteps)
A_Self_Supply_Charge[n, n] = 1.
A_Month = sparse([A_Month,
A_Self_Supply_Charge], tc = 'd')
b_Month = sparse([b_Month,
b_Self_Supply_Charge], tc = 'd')
# P_ES_out <= Non-negative(P_Load - P_PV)
Non_Negative_Net_Load_Profile_Data_Month_Padded = Load_Profile_Data_Month_Padded - Solar_PV_Profile_Data_Month_Padded
Non_Negative_Net_Load_Profile_Data_Month_Padded[Non_Negative_Net_Load_Profile_Data_Month_Padded < 0] = 0
A_Self_Supply_Discharge = sparse(matrix(0., (numtsteps, length_x), tc = 'd'), tc = 'd')
b_Self_Supply_Discharge = Non_Negative_Net_Load_Profile_Data_Month_Padded
for n in range(0, numtsteps): # Iterates from Index 0 to Index (numtsteps-1) - equivalent to Timesteps 1 to (numtsteps)
A_Self_Supply_Discharge[n, n + numtsteps] = 1.
A_Month = sparse([A_Month,
A_Self_Supply_Discharge], tc = 'd')
b_Month = sparse([b_Month,
b_Self_Supply_Discharge], tc = 'd')
## Run LP Optimization Algorithm
# Check that number of rows in A_Month.size == number of rows in b_Month.size
# Check that A_Month.typecode, b_Month.typecode, c_Month.typecode == 'd'
b_Month = matrix(b_Month, tc = 'd') # Convert from sparse to dense matrix
lp_solution = solvers.lp(c_Month, A_Month, b_Month)
x_Month = lp_solution['x']
print("Optimization complete for Month %d." % Month_Iter)
## Separate Decision Variable Vectors
x_Month = np.asarray(x_Month)
P_ES_in_Month_Padded = x_Month[range(0, numtsteps)]
P_ES_out_Month_Padded = x_Month[range(numtsteps, 2 * numtsteps)]
Ene_Lvl_Month_Padded = x_Month[range(2 * numtsteps, 3 * numtsteps)]
## Add Auxiliary Load/Parasitic Losses to P_ES_in
P_ES_in_Month_Padded = P_ES_in_Month_Padded + Parasitic_Storage_Load
## Remove "Padding" from Decision Variables
# Data is padded in Months 1-11, and not in Month 12
if Month_Iter in range(1, 12):
P_ES_in_Month_Unpadded = P_ES_in_Month_Padded[range(0, (len(P_ES_in_Month_Padded)-int(End_of_Month_Padding_Days * 24 * (1 / delta_t))))]
P_ES_out_Month_Unpadded = P_ES_out_Month_Padded[range(0, (len(P_ES_out_Month_Padded)-int(End_of_Month_Padding_Days * 24 * (1 / delta_t))))]
Ene_Lvl_Month_Unpadded = Ene_Lvl_Month_Padded[range(0, (len(Ene_Lvl_Month_Padded)-int(End_of_Month_Padding_Days * 24 * (1 / delta_t))))]
elif Month_Iter == 12:
P_ES_in_Month_Unpadded = P_ES_in_Month_Padded
P_ES_out_Month_Unpadded = P_ES_out_Month_Padded
Ene_Lvl_Month_Unpadded = Ene_Lvl_Month_Padded
# Save Final Energy Level of Battery for use in next month
Previous_Month_Final_Energy_Level = Ene_Lvl_Month_Unpadded[-1,0]
Next_Month_Initial_Energy_Level = Previous_Month_Final_Energy_Level + \
((Eff_c * P_ES_in_Month_Unpadded[-1,0]) - \
((1 / Eff_d) * P_ES_out_Month_Unpadded[-1,0])) * delta_t
## Calculate Monthly Peak Demand Using 15-Minute Intervals
# Demand Charges are Based on 15-minute interval periods.
# If the model has 15-minute timestep resolution, the decision
# variables can be used directly as maximum coincident and noncoincident demand values.
# Otherwise (such as with 5-minute timestep resolution), maximum
# demand must be calculated by taking 15-minute averages of the
# demand values, and then calculating the maximum of these averages.
if delta_t < (15 / 60):
# Noncoincident Maximum Demand With and Without Solar and Storage
# Create Net Load Profile After Solar Only
Solar_Only_Net_Load_Profile_Data_Month_5_Min = (Load_Profile_Data_Month - Solar_PV_Profile_Data_Month)
# Create Net Load Profile After Solar and Storage
Solar_Storage_Net_Load_Profile_Data_Month_5_Min = (Load_Profile_Data_Month - Solar_PV_Profile_Data_Month + \
P_ES_in_Month_Unpadded - P_ES_out_Month_Unpadded)
# Number of timesteps to average to get 15-minute net load data.
Reshaped_Rows_Num = int((15 / 60) / delta_t)
# Reshape load data so that each 15-minute increment's data
Load_Profile_Data_Month_Reshaped = np.reshape(Load_Profile_Data_Month, \
(Reshaped_Rows_Num, len(Load_Profile_Data_Month) / Reshaped_Rows_Num))
Solar_Only_Net_Load_Profile_Data_Month_5_Min_Reshaped = np.reshape(Solar_Only_Net_Load_Profile_Data_Month_5_Min, \
(Reshaped_Rows_Num, len(Solar_Only_Net_Load_Profile_Data_Month_5_Min) / Reshaped_Rows_Num))
Solar_Storage_Net_Load_Profile_Data_Month_5_Min_Reshaped = np.reshape(Solar_Storage_Net_Load_Profile_Data_Month_5_Min, \
(Reshaped_Rows_Num, len(Solar_Storage_Net_Load_Profile_Data_Month_5_Min) / Reshaped_Rows_Num))
Load_Profile_Data_Month_15_Min = np.mean(Load_Profile_Data_Month_Reshaped, 1)
Solar_Only_Net_Load_Profile_Data_Month_15_Min = np.mean(Solar_Only_Net_Load_Profile_Data_Month_5_Min_Reshaped, 1)
Solar_Storage_Net_Load_Profile_Data_Month_15_Min = np.mean(Solar_Storage_Net_Load_Profile_Data_Month_5_Min_Reshaped, 1)
P_max_NC_Month_Baseline = np.max(Load_Profile_Data_Month_15_Min)
P_max_NC_Month_with_Solar_Only = np.max(Solar_Only_Net_Load_Profile_Data_Month_15_Min)
P_max_NC_Month_with_Solar_and_Storage = np.max(Solar_Storage_Net_Load_Profile_Data_Month_15_Min)
if Peak_DC > 0:
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
CPK_Load_Profile_Data_Month = Load_Profile_Data_Month[Summer_Peak_Binary_Data_Month == 1]
CPK_Solar_Only_Net_Load_Profile_Data_Month_5_Min = Solar_Only_Net_Load_Profile_Data_Month_5_Min[Summer_Peak_Binary_Data_Month == 1]
CPK_Solar_Storage_Net_Load_Profile_Data_Month_5_Min = Solar_Storage_Net_Load_Profile_Data_Month_5_Min[Summer_Peak_Binary_Data_Month == 1]
else:
CPK_Load_Profile_Data_Month = Load_Profile_Data_Month[Winter_Peak_Binary_Data_Month == 1]
CPK_Solar_Only_Net_Load_Profile_Data_Month_5_Min = Solar_Only_Net_Load_Profile_Data_Month_5_Min[Winter_Peak_Binary_Data_Month == 1]
CPK_Solar_Storage_Net_Load_Profile_Data_Month_5_Min = Solar_Storage_Net_Load_Profile_Data_Month_5_Min[Winter_Peak_Binary_Data_Month == 1]
# is in the same column. This creates an array with 3 rows for 5-minute data.
CPK_Load_Profile_Data_Month_Reshaped = np.reshape(CPK_Load_Profile_Data_Month, \
(Reshaped_Rows_Num, len(CPK_Load_Profile_Data_Month) / Reshaped_Rows_Num))
CPK_Solar_Only_Net_Load_Profile_Data_Month_5_Min_Reshaped = np.reshape(CPK_Solar_Only_Net_Load_Profile_Data_Month_5_Min, \
(Reshaped_Rows_Num, len(CPK_Solar_Only_Net_Load_Profile_Data_Month_5_Min) / Reshaped_Rows_Num))
CPK_Solar_Storage_Net_Load_Profile_Data_Month_5_Min_Reshaped = np.reshape(CPK_Solar_Storage_Net_Load_Profile_Data_Month_5_Min, \
(Reshaped_Rows_Num, len(CPK_Solar_Storage_Net_Load_Profile_Data_Month_5_Min) / Reshaped_Rows_Num))
# Create 15-minute load profiles by calculating the average of each column.
CPK_Load_Profile_Data_Month_15_Min = np.mean(CPK_Load_Profile_Data_Month_Reshaped, 1)
CPK_Solar_Only_Net_Load_Profile_Data_Month_15_Min = np.mean(CPK_Solar_Only_Net_Load_Profile_Data_Month_5_Min_Reshaped, 1)
CPK_Solar_Storage_Net_Load_Profile_Data_Month_15_Min = np.mean(CPK_Solar_Storage_Net_Load_Profile_Data_Month_5_Min_Reshaped, 1)
# Calculate Coincident Peak Demand
P_max_CPK_Month_Baseline = np.max(CPK_Load_Profile_Data_Month_15_Min)
P_max_CPK_Month_with_Solar_Only = np.max(CPK_Solar_Only_Net_Load_Profile_Data_Month_15_Min)
P_max_CPK_Month_with_Solar_and_Storage = np.max(CPK_Solar_Storage_Net_Load_Profile_Data_Month_15_Min)
else:
# If there is no Coincident Peak Demand Period (or if the
# corresponding demand charge is $0/kW), set P_max_CPK to 0 kW.
P_max_CPK_Month_Baseline = 0
P_max_CPK_Month_with_Solar_Only = 0
P_max_CPK_Month_with_Solar_and_Storage = 0
# Coincident Part-Peak Demand With and Without Storage
if Part_Peak_DC > 0:
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
# Create Coincident Part-Peak Load and Net Load Profiles
CPP_Load_Profile_Data_Month = Load_Profile_Data_Month[Summer_Part_Peak_Binary_Data_Month == 1]
CPP_Solar_Only_Net_Load_Profile_Data_Month_5_Min = Solar_Only_Net_Load_Profile_Data_Month_5_Min[Summer_Part_Peak_Binary_Data_Month == 1]
CPP_Solar_Storage_Net_Load_Profile_Data_Month_5_Min = Solar_Storage_Net_Load_Profile_Data_Month_5_Min[Summer_Part_Peak_Binary_Data_Month == 1]
else:
# Create Coincident Part-Peak Load and Net Load Profiles
CPP_Load_Profile_Data_Month = Load_Profile_Data_Month[Winter_Part_Peak_Binary_Data_Month == 1]
CPP_Solar_Only_Net_Load_Profile_Data_Month_5_Min = Solar_Only_Net_Load_Profile_Data_Month_5_Min[Winter_Part_Peak_Binary_Data_Month == 1]
CPP_Solar_Storage_Net_Load_Profile_Data_Month_5_Min = Solar_Storage_Net_Load_Profile_Data_Month_5_Min[Winter_Part_Peak_Binary_Data_Month == 1]
# Reshape load data so that each 15-minute increment's data
Coincident_Part_Peak_Load_Profile_Data_Month_Reshaped = np.reshape(CPP_Load_Profile_Data_Month, \
(Reshaped_Rows_Num, len(CPP_Load_Profile_Data_Month) / Reshaped_Rows_Num))
CPP_Solar_Only_Net_Load_Profile_Data_Month_5_Min_Reshaped = np.reshape(CPP_Solar_Only_Net_Load_Profile_Data_Month_5_Min, \
(Reshaped_Rows_Num, len(CPP_Solar_Only_Net_Load_Profile_Data_Month_5_Min) / Reshaped_Rows_Num))
CPP_Solar_Storage_Net_Load_Profile_Data_Month_5_Min_Reshaped = np.reshape(CPP_Solar_Storage_Net_Load_Profile_Data_Month_5_Min, \
(Reshaped_Rows_Num, len(CPP_Solar_Storage_Net_Load_Profile_Data_Month_5_Min) / Reshaped_Rows_Num))
CPP_Load_Profile_Data_Month_15_Min = np.mean(Coincident_Part_Peak_Load_Profile_Data_Month_Reshaped, 1)
CPP_Solar_Only_Net_Load_Profile_Data_Month_15_Min = np.mean(CPP_Solar_Only_Net_Load_Profile_Data_Month_5_Min_Reshaped, 1)
CPP_Solar_Storage_Net_Load_Profile_Data_Month_15_Min = np.mean(CPP_Solar_Storage_Net_Load_Profile_Data_Month_5_Min_Reshaped, 1)
P_max_CPP_Month_Baseline = np.max(CPP_Load_Profile_Data_Month_15_Min)
P_max_CPP_Month_with_Solar_Only = np.max(CPP_Solar_Only_Net_Load_Profile_Data_Month_15_Min)
P_max_CPP_Month_with_Solar_and_Storage = np.max(CPP_Solar_Storage_Net_Load_Profile_Data_Month_15_Min)
else:
P_max_CPP_Month_Baseline = 0
P_max_CPP_Month_with_Solar_Only = 0
P_max_CPP_Month_with_Solar_and_Storage = 0
elif delta_t == (60 / 60):
P_max_NC_Month_Baseline = np.max(Load_Profile_Data_Month)
P_max_NC_Month_with_Solar_Only = np.max(Load_Profile_Data_Month - Solar_PV_Profile_Data_Month)
P_max_NC_Month_with_Solar_and_Storage = x_Month[3 * numtsteps, 0]
if Peak_DC > 0:
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
P_max_CPK_Month_Baseline = np.max(Load_Profile_Data_Month[Summer_Peak_Binary_Data_Month == 1])
P_max_CPK_Month_with_Solar_Only = np.max(Load_Profile_Data_Month[Summer_Peak_Binary_Data_Month == 1] - \
Solar_PV_Profile_Data_Month[Summer_Peak_Binary_Data_Month == 1])
else:
P_max_CPK_Month_Baseline = np.max(Load_Profile_Data_Month[Winter_Peak_Binary_Data_Month == 1])
P_max_CPK_Month_with_Solar_Only = np.max(Load_Profile_Data_Month[Winter_Peak_Binary_Data_Month == 1] - \
Solar_PV_Profile_Data_Month[Winter_Peak_Binary_Data_Month == 1])
P_max_CPK_Month_with_Solar_and_Storage = x_Month[3 * numtsteps + 1, 0]
else:
P_max_CPK_Month_Baseline = 0
P_max_CPK_Month_with_Solar_Only = 0
P_max_CPK_Month_with_Solar_and_Storage = 0
if Part_Peak_DC > 0:
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
P_max_CPP_Month_Baseline = np.max(Load_Profile_Data_Month[Summer_Part_Peak_Binary_Data_Month == 1])
P_max_CPP_Month_with_Solar_Only = np.max(Load_Profile_Data_Month[Summer_Part_Peak_Binary_Data_Month == 1] - \
Solar_PV_Profile_Data_Month[Summer_Part_Peak_Binary_Data_Month == 1])
else:
P_max_CPP_Month_Baseline = np.max(Load_Profile_Data_Month[Winter_Part_Peak_Binary_Data_Month == 1])
P_max_CPP_Month_with_Solar_Only = np.max(Load_Profile_Data_Month[Winter_Part_Peak_Binary_Data_Month == 1] - \
Solar_PV_Profile_Data_Month[Winter_Part_Peak_Binary_Data_Month == 1])
P_max_CPP_Month_with_Solar_and_Storage = x_Month[3 * numtsteps + 2, 0]
else:
P_max_CPP_Month_Baseline = 0
P_max_CPP_Month_with_Solar_Only = 0
P_max_CPP_Month_with_Solar_and_Storage = 0
else:
print('Timestep is larger than 15 minutes. Cannot properly calculate billing demand.')
_Per_Meter_Month_Charge + (
Fixed_Per_Meter_Day_Charge * len(Load_Profile_Data_Month) / (24 * (1 / delta_t)))
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
NC_Demand_Charge_Month_Baseline = Summer_Noncoincident_DC * P_max_NC_Month_Baseline
else:
NC_Demand_Charge_Month_Baseline = Winter_Noncoincident_DC * P_max_NC_Month_Baseline
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
NC_Demand_Charge_Month_with_Solar_Only = Summer_Noncoincident_DC * P_max_NC_Month_with_Solar_Only
else:
NC_Demand_Charge_Month_with_Solar_Only = Winter_Noncoincident_DC * P_max_NC_Month_with_Solar_Only
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
NC_Demand_Charge_Month_with_Solar_and_Storage = Summer_Noncoincident_DC * P_max_NC_Month_with_Solar_and_Storage
else:
NC_Demand_Charge_Month_with_Solar_and_Storage = Winter_Noncoincident_DC * P_max_NC_Month_with_Solar_and_Storage
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
CPK_Demand_Charge_Month_Baseline = Summer_Peak_DC * P_max_CPK_Month_Baseline
else:
CPK_Demand_Charge_Month_Baseline = Winter_Peak_DC * P_max_CPK_Month_Baseline
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
CPK_Demand_Charge_Month_with_Solar_Only = Summer_Peak_DC * P_max_CPK_Month_with_Solar_Only
else:
CPK_Demand_Charge_Month_with_Solar_Only = Winter_Peak_DC * P_max_CPK_Month_with_Solar_Only
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
CPK_Demand_Charge_Month_with_Solar_and_Storage = Summer_Peak_DC * P_max_CPK_Month_with_Solar_and_Storage
else:
CPK_Demand_Charge_Month_with_Solar_and_Storage = Winter_Peak_DC * P_max_CPK_Month_with_Solar_and_Storage
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
CPP_Demand_Charge_Month_Baseline = Summer_Part_Peak_DC * P_max_CPP_Month_Baseline
else:
CPP_Demand_Charge_Month_Baseline = Winter_Part_Peak_DC * P_max_CPP_Month_Baseline
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
CPP_Demand_Charge_Month_with_Solar_Only = Summer_Part_Peak_DC * P_max_CPP_Month_with_Solar_Only
else:
CPP_Demand_Charge_Month_with_Solar_Only = Winter_Part_Peak_DC * P_max_CPP_Month_with_Solar_Only
if Month_Iter in range(First_Summer_Month, (Last_Summer_Month + 1)):
CPP_Demand_Charge_Month_with_Solar_and_Storage = Summer_Part_Peak_DC * P_max_CPP_Month_with_Solar_and_Storage
else:
CPP_Demand_Charge_Month_with_Solar_and_Storage = Winter_Part_Peak_DC * P_max_CPP_Month_with_Solar_and_Storage
Energy_Charge_Month_Baseline = np.dot(np.transpose(Load_Profile_Data_Month), Volumetric_Rate_Data_Month) * delta_t
Solar_Only_Net_Load_Profile_Month = Load_Profile_Data_Month - Solar_PV_Profile_Data_Month
Energy_Charge_Month_with_Solar_Only = np.dot(np.transpose(Solar_Only_Net_Load_Profile_Month), Volumetric_Rate_Data_Month) * delta_t
Solar_Storage_Net_Load_Profile_Month = Load_Profile_Data_Month - Solar_PV_Profile_Data_Month + np.transpose(P_ES_in_Month_Unpadded) - np.transpose(P_ES_out_Month_Unpadded)
Energy_Charge_Month_with_Solar_and_Storage = np.dot(Solar_Storage_Net_Load_Profile_Month, np.reshape(Volumetric_Rate_Data_Month, (len(Volumetric_Rate_Data_Month), 1))) * delta_t
Energy_Charge_Month_with_Solar_and_Storage = Energy_Charge_Month_with_Solar_and_Storage[0, 0]
Cycles_Month = np.sum((P_ES_in_Month_Unpadded * (((Eff_c) / (2 * Total_Storage_Capacity)) * delta_t)) + \
(P_ES_out_Month_Unpadded * ((1 / (Eff_d * 2 * Total_Storage_Capacity)) * delta_t)))
Cycling_Penalty_Month = np.sum((P_ES_in_Month_Unpadded * (((Eff_c * cycle_pen) / (2 * Total_Storage_Capacity)) * delta_t)) + \
(P_ES_out_Month_Unpadded * ((cycle_pen / (Eff_d * 2 * Total_Storage_Capacity)) * delta_t)))
if Storage_Type_Input == "Lithium-Ion Battery":
Usable_Storage_Capacity = Usable_Storage_Capacity - (Usable_Storage_Capacity_Input * (Cycles_Month / Cycle_Life) * 0.2)
elif Storage_Type_Input == "Flow Battery":
Usable_Storage_Capacity = Usable_Storage_Capacity
if Next_Month_Initial_Energy_Level > Usable_Storage_Capacity:
Next_Month_Initial_Energy_Level = Usable_Storage_Capacity
ed)) if P_ES_in.size != 0 else P_ES_in_Month_Unpadded
P_ES_out = np.concatenate((P_ES_out, P_ES_out_Month_Unpadded)) if P_ES_out.size != 0 else P_ES_out_Month_Unpadded
Ene_Lvl = np.concatenate((Ene_Lvl, Ene_Lvl_Month_Unpadded)) if Ene_Lvl.size != 0 else Ene_Lvl_Month_Unpadded
P_max_NC = np.concatenate((P_max_NC, np.asarray(P_max_NC_Month_with_Solar_and_Storage).reshape((-1,1)))) if P_max_NC.size != 0 else np.asarray(P_max_NC_Month_with_Solar_and_Storage).reshape((-1,1))
P_max_peak = np.concatenate((P_max_peak, np.asarray(P_max_CPK_Month_with_Solar_and_Storage).reshape((-1, 1)))) if P_max_peak.size != 0 else np.asarray(P_max_CPK_Month_with_Solar_and_Storage).reshape((-1, 1))
P_max_part_peak = np.concatenate((P_max_part_peak, np.asarray(P_max_CPP_Month_with_Solar_and_Storage).reshape((-1, 1)))) if P_max_part_peak.size != 0 else np.asarray(P_max_CPP_Month_with_Solar_and_Storage).reshape((-1, 1))
Fixed_Charge_Vector = np.concatenate((Fixed_Charge_Vector, np.asarray(Fixed_Charge_Month).reshape((-1,1)))) if Fixed_Charge_Vector.size != 0 else np.asarray(Fixed_Charge_Month).reshape((-1,1))
NC_DC_Baseline_Vector = np.concatenate((NC_DC_Baseline_Vector,
np.asarray(NC_Demand_Charge_Month_Baseline).reshape((-1, 1)))) if NC_DC_Baseline_Vector.size != 0 else np.asarray(NC_Demand_Charge_Month_Baseline).reshape((-1,1))
NC_DC_with_Solar_Only_Vector = np.concatenate((NC_DC_with_Solar_Only_Vector,
np.asarray(NC_Demand_Charge_Month_with_Solar_Only).reshape((-1, 1)))) if NC_DC_with_Solar_Only_Vector.size != 0 else np.asarray(NC_Demand_Charge_Month_with_Solar_Only).reshape((-1,1))
NC_DC_with_Solar_and_Storage_Vector = np.concatenate((NC_DC_with_Solar_and_Storage_Vector,
np.asarray(
NC_Demand_Charge_Month_with_Solar_and_Storage).reshape((-1, 1)))) if NC_DC_with_Solar_and_Storage_Vector.size != 0 else \
np.asarray(NC_Demand_Charge_Month_with_Solar_and_Storage).reshape((-1,1))
CPK_DC_Baseline_Vector = np.concatenate((CPK_DC_Baseline_Vector,
np.asarray(CPK_Demand_Charge_Month_Baseline).reshape((-1, 1)))) if CPK_DC_Baseline_Vector.size != 0 else np.asarray(CPK_Demand_Charge_Month_Baseline).reshape((-1,1))
CPK_DC_with_Solar_Only_Vector = np.concatenate((CPK_DC_with_Solar_Only_Vector,
np.asarray(CPK_Demand_Charge_Month_with_Solar_Only).reshape((-1, 1)))) if CPK_DC_with_Solar_Only_Vector.size != 0 else np.asarray(CPK_Demand_Charge_Month_with_Solar_Only).reshape((-1,1))
CPK_DC_with_Solar_and_Storage_Vector = np.concatenate((CPK_DC_with_Solar_and_Storage_Vector,
np.asarray(
CPK_Demand_Charge_Month_with_Solar_and_Storage).reshape((-1, 1)))) if CPK_DC_with_Solar_and_Storage_Vector.size != 0 else \
np.asarray(CPK_Demand_Charge_Month_with_Solar_and_Storage).reshape((-1,1))
CPP_DC_Baseline_Vector = np.concatenate((CPP_DC_Baseline_Vector,
np.asarray(CPP_Demand_Charge_Month_Baseline).reshape((-1, 1)))) if CPP_DC_Baseline_Vector.size != 0 else np.asarray(CPP_Demand_Charge_Month_Baseline).reshape((-1,1))
CPP_DC_with_Solar_Only_Vector = np.concatenate((CPP_DC_with_Solar_Only_Vector,
np.asarray(CPP_Demand_Charge_Month_with_Solar_Only).reshape((-1, 1)))) if CPP_DC_with_Solar_Only_Vector.size != 0 else np.asarray(CPP_Demand_Charge_Month_with_Solar_Only).reshape((-1,1))
CPP_DC_with_Solar_and_Storage_Vector = np.concatenate((CPP_DC_with_Solar_and_Storage_Vector,
np.asarray(CPP_Demand_Charge_Month_with_Solar_and_Storage).reshape((-1, 1)))) if CPP_DC_with_Solar_and_Storage_Vector.size != 0 else \
np.asarray(CPP_Demand_Charge_Month_with_Solar_and_Storage).reshape((-1,1))
Energy_Charge_Baseline_Vector = np.concatenate((Energy_Charge_Baseline_Vector,
np.asarray(Energy_Charge_Month_Baseline).reshape((-1, 1)))) if Energy_Charge_Baseline_Vector.size != 0 else np.asarray(Energy_Charge_Month_Baseline).reshape((-1,1))
Energy_Charge_with_Solar_Only_Vector = np.concatenate((Energy_Charge_with_Solar_Only_Vector,
np.asarray(Energy_Charge_Month_with_Solar_Only).reshape((-1, 1)))) if Energy_Charge_with_Solar_Only_Vector.size != 0 else np.asarray(Energy_Charge_Month_with_Solar_Only).reshape((-1,1))
Energy_Charge_with_Solar_and_Storage_Vector = np.concatenate((Energy_Charge_with_Solar_and_Storage_Vector,
np.asarray(Energy_Charge_Month_with_Solar_and_Storage).reshape((-1, 1)))) if Energy_Charge_with_Solar_and_Storage_Vector.size != 0 else \
np.asarray(Energy_Charge_Month_with_Solar_and_Storage).reshape((-1,1))
Cycles_Vector = np.concatenate((Cycles_Vector, np.asarray(Cycles_Month).reshape((-1,1)))) if Cycles_Vector.size != 0 else np.asarray(Cycles_Month).reshape((-1,1))
Cycling_Penalty_Vector = np.concatenate((Cycling_Penalty_Vector, np.asarray(Cycling_Penalty_Month).reshape((-1,1)))) if Cycling_Penalty_Vector.size != 0 else np.asarray(Cycling_Penalty_Month).reshape((-1,1))
tend = time.time()
telapsed = tend - tstart
print('Model Run %0.f complete. Elapsed time to run the optimization model is %0.0f seconds.' % (Model_Run_Number_Input, telapsed))
replace(microsecond=0).isoformat()
if "PG&E" in Retail_Rate_Name_Input:
Retail_Rate_Utility = "PG&E"
elif "SCE" in Retail_Rate_Name_Input:
Retail_Rate_Utility = "SCE"
elif "SDG&E" in Retail_Rate_Name_Input:
Retail_Rate_Utility = "SDG&E"
Retail_Rate_Utility_Plus_Space = Retail_Rate_Utility + " "
Retail_Rate_Name_Output = Retail_Rate_Name_Input.replace(Retail_Rate_Utility_Plus_Space, "")
if Solar_Profile_Name_Input == "No Solar":
Solar_Profile_Name_Output = ""
else:
Solar_Profile_Name_Output = Solar_Profile_Name_Input
if Storage_Control_Algorithm_Name == "OSESMO Economic Dispatch":
Storage_Control_Algorithm_Description = "Open Source Energy Storage Model - Economic Dispatch"
elif Storage_Control_Algorithm_Name == "OSESMO Non-Economic Solar Self-Supply":
Storage_Control_Algorithm_Description = "Open Source Energy Storage Model - Non-Economic Solar Self-Supply"
Storage_Control_Algorithms_Parameters_Filename = ""
Other_Incentives_or_Penalities = ""
Output_Summary_Filename = "OSESMO Reporting Inputs and Outputs.csv"
Output_Description_Filename = ""
Output_Visualizations_Filename = "Multiple files - in same folder as Output Summary file."
EV_Use = ""
EV_Charge = ""
EV_Gas_Savings = ""
EV_GHG_Savings = ""
= 0:
ITC_Constraint_Folder_Name = "No ITC Constraint"
elif ITC_Constraint_Input == 1:
ITC_Constraint_Folder_Name = "ITC Constraint"
if Emissions_Forecast_Signal_Input == "No Emissions Forecast Signal":
Emissions_Forecast_Signal_Input = "No"
Output_Directory_Filepath = os.path.join(Input_Output_Data_Directory_Location, "Models", "OSESMO", "Model Outputs", \
Model_Type_Input, str(Model_Timestep_Resolution) + "-Minute Timestep Resolution", \
Customer_Class_Input, Load_Profile_Name_Input, Retail_Rate_Name_Input, \
Solar_Profile_Name_Input, str(Solar_Size_Input) + " kW Solar", Storage_Type_Input, \
str(Storage_Power_Rating_Input) + " kW " + str(Usable_Storage_Capacity_Input) + " kWh Storage", \
str(int(Single_Cycle_RTE_Input * 100)) + " Percent Single-Cycle RTE", \
str(Parasitic_Storage_Load_Input * 100) + " Percent Parasitic Load", \
Storage_Control_Algorithm_Name, GHG_Reduction_Solution_Input, \
str(Equivalent_Cycling_Constraint_Input) + " Equivalent Cycles Constraint", \
str(int(Annual_RTE_Constraint_Input * 100)) + " Percent Annual RTE Constraint", \
ITC_Constraint_Folder_Name, \
str(Carbon_Adder_Incentive_Value_Input) + " Dollar Carbon Adder Incentive", \
Emissions_Forecast_Signal_Input + " Emissions Forecast Signal")
if Emissions_Forecast_Signal_Input == "No":
Emissions_Forecast_Signal_Input = "No Emissions Forecast Signal"
if Export_Data and os.path.isdir(Output_Directory_Filepath) == False:
os.mkdir(Output_Directory_Filepath)
_Data)
t = np.linspace(1, 35040, 35040)
t = [Start_Time_Input + datetime.timedelta(minutes = int(60 * delta_t) * x) for x in range(0, numtsteps_year)]
P_ES = np.reshape(P_ES_out - P_ES_in, (numtsteps_year,))
umetric_Rate_Data, 'b-')
ax1.set_xlabel('Date & Time')
ax1.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%Y-%m-%d %H:%M'))
ax1.set_ylabel('Energy Price ($/kWh)', color='b')
ax1.tick_params('y', colors='b')
ax2 = ax1.twinx()
ax2.plot(t, Marginal_Emissions_Rate_Evaluation_Data, 'r-')
ax2.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%Y-%m-%d %H:%M'))
ax2.set_ylabel('Marginal Emissions Rate (metric tons/kWh)', color='r')
ax2.set_title('Electricity Rates and Marginal Emissions Rates')
ax2.tick_params('y', colors='r')
fig.autofmt_xdate()
fig.tight_layout()
plt.show()
if Export_Plots == 1:
plt.savefig(os.path.join(Output_Directory_Filepath, 'Energy Price and Carbon Plot.png'))
r_Month
Summer_Binary_Data_2 = Month_Data <= Last_Summer_Month
Summer_Binary_Data = np.logical_and(Summer_Binary_Data_1, Summer_Binary_Data_2)
Winter_Binary_Data_1 = Month_Data < First_Summer_Month
Winter_Binary_Data_2 = Month_Data > Last_Summer_Month
Winter_Binary_Data = np.logical_or(Winter_Binary_Data_1, Winter_Binary_Data_2)
Total_DC = (Winter_Noncoincident_DC * Winter_Binary_Data) + \
(Summer_Noncoincident_DC * Summer_Binary_Data)
if Winter_Peak_DC > 0:
Total_DC = Total_DC + (Winter_Peak_DC * Winter_Peak_Binary_Data)
if Winter_Part_Peak_DC > 0:
Total_DC = Total_DC + (Winter_Part_Peak_DC * Winter_Part_Peak_Binary_Data)
if Summer_Peak_DC > 0:
Total_DC = Total_DC + (Summer_Peak_DC * Summer_Peak_Binary_Data)
if Summer_Part_Peak_DC > 0:
Total_DC = Total_DC + (Summer_Part_Peak_DC * Summer_Part_Peak_Binary_Data)
if Show_Plots == 1 or Export_Plots == 1:
fig, ax = plt.subplots()
ax.plot(t, Total_DC, 'g-')
ax.set_xlabel('Date & Time')
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%Y-%m-%d %H:%M'))
ax.set_ylabel('Total Demand Charge ($/kW)')
ax.set_title('Coincident + Non-Coincident Demand Charge Schedule')
fig.autofmt_xdate()
fig.tight_layout()
plt.show()
if Export_Plots == 1:
plt.savefig(os.path.join(Output_Directory_Filepath, 'Demand Charge Plot.png'))
nput == "Storage Only":
fig, ax = plt.subplots()
ax.plot(t, Load_Profile_Data, 'k-', label = 'Original Load')
ax.plot(t, Load_Profile_Data - P_ES, 'r-', label = 'Net Load with Storage')
ax.set_xlabel('Date & Time')
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%Y-%m-%d %H:%M'))
ax.set_ylabel('Load (kW)')
ax.set_title('Original and Net Load Profiles')
ax.legend()
fig.autofmt_xdate()
fig.tight_layout()
plt.show()
elif Model_Type_Input == "Solar Plus Storage":
fig, ax = plt.subplots()
ax.plot(t, Load_Profile_Data, 'k-', label = 'Original Load')
ax.plot(t, Load_Profile_Data - Solar_PV_Profile_Data, 'b-', label='Net Load with Solar Only')
ax.plot(t, Load_Profile_Data - (Solar_PV_Profile_Data + P_ES), 'r-', label = 'Net Load with Solar + Storage')
ax.set_xlabel('Date & Time')
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%Y-%m-%d %H:%M'))
ax.set_ylabel('Load (kW)')
ax.set_title('Original and Net Load Profiles')
ax.legend()
fig.autofmt_xdate()
fig.tight_layout()
plt.show()
if Export_Plots == 1:
plt.savefig(os.path.join(Output_Directory_Filepath, 'Net Load Plot.png'))
if Model_Type_Input == "Storage Only":
Annual_Peak_Demand_with_Solar_Only = ""
Annual_Total_Energy_Consumption_with_Solar_Only = ""
elif Model_Type_Input == "Solar Plus Storage":
Annual_Peak_Demand_with_Solar_Only = np.max(Load_Profile_Data - Solar_PV_Profile_Data)
Annual_Total_Energy_Consumption_with_Solar_Only = np.sum(Load_Profile_Data - Solar_PV_Profile_Data) * delta_t
Annual_Peak_Demand_with_Solar_and_Storage = np.max(Load_Profile_Data - (Solar_PV_Profile_Data + P_ES))
Annual_Total_Energy_Consumption_with_Solar_and_Storage = np.sum(Load_Profile_Data - (Solar_PV_Profile_Data + P_ES)) * delta_t
if Model_Type_Input == "Storage Only":
Solar_Only_Peak_Demand_Reduction_Percentage = ""
elif Model_Type_Input == "Solar Plus Storage":
Solar_Only_Peak_Demand_Reduction_Percentage = ((Annual_Peak_Demand_Baseline - Annual_Peak_Demand_with_Solar_Only) / Annual_Peak_Demand_Baseline) * 100
Solar_Storage_Peak_Demand_Reduction_Percentage = ((Annual_Peak_Demand_Baseline - Annual_Peak_Demand_with_Solar_and_Storage) / Annual_Peak_Demand_Baseline) * 100
if Model_Type_Input == "Storage Only":
Solar_Only_Energy_Consumption_Decrease_Percentage = ""
elif Model_Type_Input == "Solar Plus Storage":
Solar_Only_Energy_Consumption_Decrease_Percentage = ((Annual_Total_Energy_Consumption_Baseline - Annual_Total_Energy_Consumption_with_Solar_Only) / Annual_Total_Energy_Consumption_Baseline) * 100
Solar_Storage_Energy_Consumption_Decrease_Percentage = ((Annual_Total_Energy_Consumption_Baseline - Annual_Total_Energy_Consumption_with_Solar_and_Storage) / Annual_Total_Energy_Consumption_Baseline) * 100
print('Baseline annual peak noncoincident demand is {0} kW.'.format(round(Annual_Peak_Demand_Baseline, 2)))
if Model_Type_Input == "Storage Only":
if Solar_Storage_Peak_Demand_Reduction_Percentage >= 0:
print('Peak demand with storage is {0} kW, representing a DECREASE OF {1}%.'.format(round(Annual_Peak_Demand_with_Solar_and_Storage, 2), round(Solar_Storage_Peak_Demand_Reduction_Percentage, 2)))
elif Solar_Storage_Peak_Demand_Reduction_Percentage < 0:
print('Peak demand with storage is {0} kW, representing an INCREASE OF {1}%.'.format(round(Annual_Peak_Demand_with_Solar_and_Storage, 2), round(-Solar_Storage_Peak_Demand_Reduction_Percentage, 2)))
print('Baseline annual total electricity consumption is {0} kWh.'.format(round(Annual_Total_Energy_Consumption_Baseline, 2)))
print('Electricity consumption with storage is {0} kWh, representing an INCREASE OF {1}%.'.format(round(Annual_Total_Energy_Consumption_with_Solar_and_Storage, 2),
round(-Solar_Storage_Energy_Consumption_Decrease_Percentage, 2)))
elif Model_Type_Input == "Solar Plus Storage":
print('Peak demand with solar only is {0} kW, representing a DECREASE OF {1}%.'.format(round(Annual_Peak_Demand_with_Solar_Only, 2), round(Solar_Only_Peak_Demand_Reduction_Percentage, 2)))
if Solar_Storage_Peak_Demand_Reduction_Percentage >= 0:
print('Peak demand with solar and storage is {0} kW, representing a DECREASE OF {1}%.'.format(round(Annual_Peak_Demand_with_Solar_and_Storage, 2), round(Solar_Storage_Peak_Demand_Reduction_Percentage, 2)))
elif Solar_Storage_Peak_Demand_Reduction_Percentage < 0:
print('Peak demand with solar and storage is {0} kW, representing an INCREASE OF {1}%.'.format(round(Annual_Peak_Demand_with_Solar_and_Storage, 2), round(-Solar_Storage_Peak_Demand_Reduction_Percentage, 2)))
print('Baseline annual total electricity consumption is {0} kWh.'.format(round(Annual_Total_Energy_Consumption_Baseline, 2)))
print('Electricity consumption with solar only is {0} kWh, representing a DECREASE OF {1}%.'.format(round(Annual_Total_Energy_Consumption_with_Solar_Only, 2),
round(Solar_Only_Energy_Consumption_Decrease_Percentage, 2)))
print('Electricity consumption with solar and storage is {0} kWh, representing a DECREASE OF {1}%.'.format(round(Annual_Total_Energy_Consumption_with_Solar_and_Storage, 2),
round(Solar_Storage_Energy_Consumption_Decrease_Percentage, 2)))
_Baseline = np.concatenate((Fixed_Charge_Vector, NC_DC_Baseline_Vector, CPK_DC_Baseline_Vector, CPP_DC_Baseline_Vector, Energy_Charge_Baseline_Vector), axis = 1)
Annual_Costs_Vector_Baseline = np.concatenate((np.asarray(np.sum(Fixed_Charge_Vector)).reshape(1, -1), \
np.asarray(np.sum(NC_DC_Baseline_Vector) + np.sum(CPK_DC_Baseline_Vector) + np.sum(CPP_DC_Baseline_Vector)).reshape(1, -1), \
np.asarray(np.sum(Energy_Charge_Baseline_Vector)).reshape(1, -1)), axis = 0)
Annual_Demand_Charge_Cost_Baseline = Annual_Costs_Vector_Baseline[1, 0]
Annual_Energy_Charge_Cost_Baseline = Annual_Costs_Vector_Baseline[2, 0]
Monthly_Costs_Matrix_with_Solar_Only = np.concatenate((Fixed_Charge_Vector, NC_DC_with_Solar_Only_Vector, CPK_DC_with_Solar_Only_Vector, CPP_DC_with_Solar_Only_Vector, Energy_Charge_with_Solar_Only_Vector), axis = 1)
Annual_Costs_Vector_with_Solar_Only = np.concatenate((np.asarray(np.sum(Fixed_Charge_Vector)).reshape(1, -1), \
np.asarray(np.sum(NC_DC_with_Solar_Only_Vector) + np.sum(CPK_DC_with_Solar_Only_Vector) + np.sum(CPP_DC_with_Solar_Only_Vector)).reshape(1, -1), \
np.asarray(np.sum(Energy_Charge_with_Solar_Only_Vector)).reshape(1, -1)), axis = 0)
if Model_Type_Input == "Storage Only":
Annual_Demand_Charge_Cost_with_Solar_Only = ""
Annual_Energy_Charge_Cost_with_Solar_Only = ""
elif Model_Type_Input == "Solar Plus Storage":
Annual_Demand_Charge_Cost_with_Solar_Only = Annual_Costs_Vector_with_Solar_Only[1, 0]
Annual_Energy_Charge_Cost_with_Solar_Only = Annual_Costs_Vector_with_Solar_Only[2, 0]
Monthly_Costs_Matrix_with_Solar_and_Storage = np.concatenate((Fixed_Charge_Vector, NC_DC_with_Solar_and_Storage_Vector, CPK_DC_with_Solar_and_Storage_Vector, CPP_DC_with_Solar_and_Storage_Vector, \
Energy_Charge_with_Solar_and_Storage_Vector), axis = 1)
Annual_Costs_Vector_with_Solar_and_Storage = np.concatenate((np.asarray(np.sum(Fixed_Charge_Vector)).reshape(1, -1), \
np.asarray(np.sum(NC_DC_with_Solar_and_Storage_Vector) + np.sum(CPK_DC_with_Solar_and_Storage_Vector) + np.sum(CPP_DC_with_Solar_and_Storage_Vector)).reshape(1, -1), \
np.asarray(np.sum(Energy_Charge_with_Solar_and_Storage_Vector)).reshape(1, -1)), axis = 0)
Annual_Demand_Charge_Cost_with_Solar_and_Storage = Annual_Costs_Vector_with_Solar_and_Storage[1, 0]
Annual_Energy_Charge_Cost_with_Solar_and_Storage = Annual_Costs_Vector_with_Solar_and_Storage[2, 0]
Maximum_Monthly_Bill_Baseline = np.max(np.sum(Monthly_Costs_Matrix_Baseline, axis = 1))
Minimum_Monthly_Bill_Baseline = np.min(np.sum(Monthly_Costs_Matrix_Baseline, axis = 1))
Maximum_Monthly_Bill_with_Solar_Only = np.max(np.sum(Monthly_Costs_Matrix_with_Solar_Only, axis = 1))
Minimum_Monthly_Bill_with_Solar_Only = np.min(np.sum(Monthly_Costs_Matrix_with_Solar_Only, axis = 1))
Maximum_Monthly_Bill_with_Solar_and_Storage = np.max(np.sum(Monthly_Costs_Matrix_with_Solar_and_Storage, axis = 1))
Minimum_Monthly_Bill_with_Solar_and_Storage = np.min(np.sum(Monthly_Costs_Matrix_with_Solar_and_Storage, axis = 1))
Maximum_Monthly_Bill = np.max((Maximum_Monthly_Bill_Baseline, \
Maximum_Monthly_Bill_with_Solar_Only, \
Maximum_Monthly_Bill_with_Solar_and_Storage))
Minimum_Monthly_Bill = np.min((Minimum_Monthly_Bill_Baseline, \
Minimum_Monthly_Bill_with_Solar_Only, \
Minimum_Monthly_Bill_with_Solar_and_Storage))
Max_Monthly_Bill_ylim = Maximum_Monthly_Bill * 1.1
if Minimum_Monthly_Bill >= 0:
Min_Monthly_Bill_ylim = 0
elif Minimum_Monthly_Bill < 0:
Min_Monthly_Bill_ylim = Minimum_Monthly_Bill * 1.1
def stacked_bar(data, series_labels, category_labels=None,
show_values=False, value_format="{}", y_label=None,
grid=True, reverse=False):
ny = len(data[0])
ind = list(range(ny))
axes = []
cum_size = np.zeros(ny)
data = np.array(data)
if reverse:
data = np.flip(data, axis=1)
category_labels = reversed(category_labels)
for i, row_data in enumerate(data):
axes.append(plt.bar(ind, row_data, bottom=cum_size,
label=series_labels[i]))
cum_size += row_data
if category_labels:
plt.xticks(ind, category_labels)
if y_label:
plt.ylabel(y_label)
plt.legend()
if grid:
plt.grid()
if show_values:
for axis in axes:
for bar in axis:
w, h = bar.get_width(), bar.get_height()
plt.text(bar.get_x() + w / 2, bar.get_y() + h / 2,
value_format.format(h), ha="center",
va="center")
if Show_Plots == 1 or Export_Plots == 1:
series_labels = ['Fixed Charges', 'Max DC', 'Peak DC', 'Part-Peak DC', 'Energy Charge']
category_labels = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']
plt.figure()
stacked_bar(np.transpose(Monthly_Costs_Matrix_Baseline),
series_labels,
category_labels=category_labels,
show_values=False,
value_format="{}",
y_label="Cost ($/Month)")
plt.xlabel('Month')
plt.ylim(bottom=Min_Monthly_Bill_ylim, top=Max_Monthly_Bill_ylim)
plt.title('Monthly Costs, Without Storage')
plt.show()
if Export_Plots == 1:
plt.savefig(os.path.join(Output_Directory_Filepath, 'Monthly Costs Baseline Plot.png'))
if Model_Type_Input == "Solar Plus Storage":
if Show_Plots == 1 or Export_Plots == 1:
series_labels = ['Fixed Charges', 'Max DC', 'Peak DC', 'Part-Peak DC', 'Energy Charge']
category_labels = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']
plt.figure()
stacked_bar(np.transpose(Monthly_Costs_Matrix_with_Solar_Only),
series_labels,
category_labels=category_labels,
show_values=False,
value_format="{}",
y_label="Cost ($/Month)")
plt.xlabel('Month')
plt.ylim(bottom = Min_Monthly_Bill_ylim, top = Max_Monthly_Bill_ylim)
plt.title('Monthly Costs, With Solar Only')
plt.show()
if Export_Plots == 1:
plt.savefig(os.path.join(Output_Directory_Filepath, 'Monthly Costs with Solar Only Plot.png'))
if Show_Plots == 1 or Export_Plots == 1:
series_labels = ['Fixed Charges', 'Max DC', 'Peak DC', 'Part-Peak DC', 'Energy Charge']
category_labels = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']
plt.figure()
stacked_bar(np.transpose(Monthly_Costs_Matrix_with_Solar_and_Storage),
series_labels,
category_labels=category_labels,
show_values=False,
value_format="{}",
y_label="Cost ($/Month)")
plt.xlabel('Month')
plt.ylim(bottom=Min_Monthly_Bill_ylim, top=Max_Monthly_Bill_ylim)
plt.title('Monthly Costs, With Storage')
plt.show()
if Export_Plots == 1:
if Model_Type_Input == "Storage Only":
plt.savefig(os.path.join(Output_Directory_Filepath, 'Monthly Costs with Storage Plot.png'))
elif Model_Type_Input == "Solar Plus Storage":
plt.savefig(os.path.join(Output_Directory_Filepath, 'Monthly Costs with Solar and Storage Plot.png'))
if Model_Type_Input == "Storage Only":
Monthly_Savings_Matrix_From_Storage = Monthly_Costs_Matrix_Baseline - Monthly_Costs_Matrix_with_Solar_and_Storage
elif Model_Type_Input == "Solar Plus Storage":
Monthly_Savings_Matrix_From_Storage = Monthly_Costs_Matrix_with_Solar_Only - Monthly_Costs_Matrix_with_Solar_and_Storage
Monthly_Savings_Matrix_Plot = Monthly_Savings_Matrix_From_Storage[:, [1, 2, 3, 4]]
if Show_Plots == 1 or Export_Plots == 1:
series_labels = ['Max DC', 'Peak DC', 'Part-Peak DC', 'Energy Charge']
category_labels = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']
plt.figure()
stacked_bar(np.transpose(Monthly_Savings_Matrix_Plot),
series_labels,
category_labels=category_labels,
show_values=False,
value_format="{}",
y_label="Savings ($/Month)")
plt.xlabel('Month')
plt.title('Monthly Savings From Storage')
plt.show()
if Export_Plots == 1:
plt.savefig(os.path.join(Output_Directory_Filepath, 'Monthly Savings from Storage Plot.png'))
mer_Bill_Baseline = np.sum(np.sum(Monthly_Costs_Matrix_Baseline))
if Model_Type_Input == "Storage Only":
Annual_Customer_Bill_with_Solar_Only = ""
elif Model_Type_Input == "Solar Plus Storage":
Annual_Customer_Bill_with_Solar_Only = np.sum(Annual_Costs_Vector_with_Solar_Only)
Annual_Customer_Bill_with_Solar_and_Storage = np.sum(Annual_Costs_Vector_with_Solar_and_Storage)
if Model_Type_Input == "Storage Only":
Annual_Customer_Bill_Savings_from_Storage = Annual_Customer_Bill_Baseline - Annual_Customer_Bill_with_Solar_and_Storage
elif Model_Type_Input == "Solar Plus Storage":
Annual_Customer_Bill_Savings_from_Solar = Annual_Customer_Bill_Baseline - Annual_Customer_Bill_with_Solar_Only
Annual_Customer_Bill_Savings_from_Solar_Percent = (Annual_Customer_Bill_Savings_from_Solar / Annual_Customer_Bill_Baseline)
Annual_Customer_Bill_Savings_from_Storage = Annual_Customer_Bill_with_Solar_Only - Annual_Customer_Bill_with_Solar_and_Storage
Annual_Customer_Bill_Savings_from_Storage_Percent = (Annual_Customer_Bill_Savings_from_Storage / Annual_Customer_Bill_Baseline)
if Model_Type_Input == "Solar Plus Storage":
Solar_Installed_Cost = Solar_Size_Input * Solar_Installed_Cost_per_kW
Solar_Simple_Payback = Solar_Installed_Cost / Annual_Customer_Bill_Savings_from_Solar
print('Annual cost savings from solar is ${0}, representing {1}% of the original ${2} bill.'.format(
int(Annual_Customer_Bill_Savings_from_Solar), round(Annual_Customer_Bill_Savings_from_Solar_Percent * 100, 2),
int(Annual_Customer_Bill_Baseline)))
print('The solar PV system has a simple payback of {0} years, not including incentives.'.format(
round(Solar_Simple_Payback, 1)))
Storage_Installed_Cost = Total_Storage_Capacity * Storage_Installed_Cost_per_kWh
Storage_Simple_Payback = Storage_Installed_Cost / Annual_Customer_Bill_Savings_from_Storage
print('Annual cost savings from storage is ${0}, representing {1}% of the original ${2} bill.'.format(
int(Annual_Customer_Bill_Savings_from_Storage), round(Annual_Customer_Bill_Savings_from_Storage_Percent * 100, 2),
int(Annual_Customer_Bill_Baseline)))
print('The storage system has a simple payback of {0} years, not including incentives.'.format(
round(Storage_Simple_Payback, 1)))
## Report Cycling/Degradation Penalty
Annual_Equivalent_Storage_Cycles = np.sum(Cycles_Vector)
Annual_Cycling_Penalty = np.sum(Cycling_Penalty_Vector)
Annual_Capacity_Fade = Usable_Storage_Capacity_Input - Usable_Storage_Capacity
print('The battery cycles {0} times annually, with a degradation cost of ${1}, and experiences capacity fade of {2} kWh.'.format(
int(Annual_Equivalent_Storage_Cycles), int(Annual_Cycling_Penalty), round(Annual_Capacity_Fade, 1)))
## Report Operational/"SGIP" Round-Trip Efficiency
Annual_RTE = (np.sum(P_ES_out) * delta_t) / (np.sum(P_ES_in) * delta_t)
print('The battery has an Annual Operational/SGIP Round-Trip Efficiency of {0}%.'.format(
round(Annual_RTE * 100, 2)))
## Report Operational/"SGIP" Capacity Factor
# The SGIP Handbook uses the following definition of capacity factor for
# storage resources, based on the assumption that 60% of hours are
# available for discharge. The term "hours of data available" is equal to
# the number of hours in the year here. For actual operational data, it's
Operational_Capacity_Factor = ((np.sum(P_ES_out) * delta_t) / ((len(Load_Profile_Data) * delta_t) * Storage_Power_Rating_Input * 0.6))
print('The battery has an Operational/SGIP Capacity Factor of {0}%.'.format(
round(Operational_Capacity_Factor * 100, 2)))
Grid_Cost_Baseline = np.dot(Generation_Cost_Data + Representative_Distribution_Cost_Data, Load_Profile_Data) * (1 / 1000) * delta_t
if Model_Type_Input == "Solar Plus Storage":
Annual_Grid_Cost_with_Solar_Only = np.dot(Generation_Cost_Data + Representative_Distribution_Cost_Data, Load_Profile_Data - Solar_PV_Profile_Data) * (1 / 1000) * delta_t
else:
Annual_Grid_Cost_with_Solar_Only = ""
Annual_Grid_Cost_with_Solar_and_Storage = np.dot(Generation_Cost_Data + Representative_Distribution_Cost_Data, Load_Profile_Data - Solar_PV_Profile_Data - \
P_ES_out.reshape((numtsteps_year,)) + P_ES_in.reshape((numtsteps_year,))) * (1 / 1000) * delta_t
Grid_Cost_Timestep_Baseline = np.concatenate((np.multiply(Generation_Cost_Data, Load_Profile_Data).reshape((numtsteps_year,1)) * (1 / 1000) * delta_t, \
np.multiply(Representative_Distribution_Cost_Data, Load_Profile_Data).reshape((numtsteps_year,1)) * (1 / 1000) * delta_t), axis = 1)
Grid_Cost_Month_Baseline = np.array([])
for Month_Iter in range(1, 12 + 1):
Grid_Cost_Single_Month_Baseline = np.sum(Grid_Cost_Timestep_Baseline[Month_Data == Month_Iter,:], axis = 0).reshape((1,2))
Grid_Cost_Month_Baseline = np.concatenate((Grid_Cost_Month_Baseline, Grid_Cost_Single_Month_Baseline), axis = 0) if Grid_Cost_Month_Baseline.size != 0 else Grid_Cost_Single_Month_Baseline
Grid_Cost_Timestep_with_Solar_Only = np.concatenate((np.multiply(Generation_Cost_Data, (Load_Profile_Data - Solar_PV_Profile_Data)).reshape((numtsteps_year,1)) * (1 / 1000) * delta_t, \
np.multiply(Representative_Distribution_Cost_Data, (Load_Profile_Data - Solar_PV_Profile_Data)).reshape((numtsteps_year,1)) * (1 / 1000) * delta_t), axis = 1)
Grid_Cost_Month_with_Solar_Only = np.array([])
for Month_Iter in range(1, 12 + 1):
Grid_Cost_Single_Month_with_Solar_Only = np.sum(Grid_Cost_Timestep_with_Solar_Only[Month_Data == Month_Iter,:], axis = 0).reshape((1,2))
Grid_Cost_Month_with_Solar_Only = np.concatenate((Grid_Cost_Month_with_Solar_Only, Grid_Cost_Single_Month_with_Solar_Only), axis = 0) if Grid_Cost_Month_with_Solar_Only.size != 0 else Grid_Cost_Single_Month_with_Solar_Only
Grid_Cost_Timestep_with_Solar_and_Storage = np.concatenate((np.multiply(Generation_Cost_Data,
(Load_Profile_Data - Solar_PV_Profile_Data - P_ES_out.reshape((numtsteps_year,)) + P_ES_in.reshape((numtsteps_year,)))).reshape((numtsteps_year,1)) *
(1 / 1000) * delta_t, \
np.multiply(Representative_Distribution_Cost_Data,
(Load_Profile_Data - Solar_PV_Profile_Data - P_ES_out.reshape((numtsteps_year,)) + P_ES_in.reshape((numtsteps_year,)))).reshape((numtsteps_year,1)) *
(1 / 1000) * delta_t), axis = 1)
Grid_Cost_Month_with_Solar_and_Storage = np.array([])
for Month_Iter in range(1, 12 + 1):
Grid_Cost_Single_Month_with_Solar_and_Storage = np.sum(Grid_Cost_Timestep_with_Solar_and_Storage[Month_Data == Month_Iter,:], axis = 0).reshape((1,2))
Grid_Cost_Month_with_Solar_and_Storage = np.concatenate((Grid_Cost_Month_with_Solar_and_Storage, Grid_Cost_Single_Month_with_Solar_and_Storage), axis = 0) if \
Grid_Cost_Month_with_Solar_and_Storage.size != 0 else Grid_Cost_Single_Month_with_Solar_and_Storage
if Model_Type_Input == "Storage Only":
Grid_Cost_Savings_Month_from_Storage = Grid_Cost_Month_Baseline - Grid_Cost_Month_with_Solar_and_Storage
elif Model_Type_Input == "Solar Plus Storage":
Grid_Cost_Savings_Month_from_Storage = Grid_Cost_Month_with_Solar_Only - Grid_Cost_Month_with_Solar_and_Storage
if Model_Type_Input == "Solar Plus Storage":
print('Installing solar DECREASES estimated utility grid costs (not including transmission costs, and using representative distribution costs) by ${0} per year.'.format(
round(Annual_Grid_Cost_Baseline - Annual_Grid_Cost_with_Solar_Only, 2)))
if Model_Type_Input == "Storage Only":
if Annual_Grid_Cost_Baseline - Annual_Grid_Cost_with_Solar_and_Storage < 0:
print('Installing energy storage INCREASES estimated utility grid costs (not including transmission costs, and using representative distribution costs) by ${0} per year.'.format(
-round(Annual_Grid_Cost_Baseline - Annual_Grid_Cost_with_Solar_and_Storage, 2)))
else:
print('Installing energy storage DECREASES estimated utility grid costs (not including transmission costs, and using representative distribution costs) by ${0} per year.'.format(
round(Annual_Grid_Cost_Baseline - Annual_Grid_Cost_with_Solar_and_Storage, 2)))
elif Model_Type_Input == "Solar Plus Storage":
if Annual_Grid_Cost_with_Solar_Only - Annual_Grid_Cost_with_Solar_and_Storage < 0:
print('Installing energy storage INCREASES estimated utility grid costs (not including transmission costs, and using representative distribution costs) by ${0} per year.'.format(
-round(Annual_Grid_Cost_with_Solar_Only - Annual_Grid_Cost_with_Solar_and_Storage, 2)))
else:
print('Installing energy storage DECREASES estimated utility grid costs (not including transmission costs, and using representative distribution costs) by ${0} per year.'.format(
round(Annual_Grid_Cost_with_Solar_Only - Annual_Grid_Cost_with_Solar_and_Storage, 2)))
Annual_GHG_Emissions_Baseline = np.dot(Marginal_Emissions_Rate_Evaluation_Data, Load_Profile_Data) * (1 / 1000) * delta_t
if Model_Type_Input == "Storage Only":
Annual_GHG_Emissions_with_Solar_Only = ""
elif Model_Type_Input == "Solar Plus Storage":
Annual_GHG_Emissions_with_Solar_Only = np.dot(Marginal_Emissions_Rate_Evaluation_Data, (Load_Profile_Data - Solar_PV_Profile_Data)) * (1 / 1000) * delta_t
Annual_GHG_Emissions_with_Solar_and_Storage = np.dot(Marginal_Emissions_Rate_Evaluation_Data,
(Load_Profile_Data - (Solar_PV_Profile_Data + P_ES_out.reshape((numtsteps_year,)) - P_ES_in.reshape((numtsteps_year,))))) * (1 / 1000) * delta_t
if Model_Type_Input == "Storage Only":
Annual_GHG_Emissions_Reduction_from_Solar = ""
elif Model_Type_Input == "Solar Plus Storage":
Annual_GHG_Emissions_Reduction_from_Solar = Annual_GHG_Emissions_Baseline - Annual_GHG_Emissions_with_Solar_Only
if Model_Type_Input == "Storage Only":
Annual_GHG_Emissions_Reduction_from_Storage = Annual_GHG_Emissions_Baseline - Annual_GHG_Emissions_with_Solar_and_Storage
elif Model_Type_Input == "Solar Plus Storage":
Annual_GHG_Emissions_Reduction_from_Storage = Annual_GHG_Emissions_with_Solar_Only - Annual_GHG_Emissions_with_Solar_and_Storage
if Model_Type_Input == "Storage Only":
Annual_GHG_Emissions_Reduction_from_Solar_Percent = ""
elif Model_Type_Input == "Solar Plus Storage":
Annual_GHG_Emissions_Reduction_from_Solar_Percent = 0
Annual_GHG_Emissions_Reduction_from_Storage_Percent = 0
if Model_Type_Input == "Solar Plus Storage":
print('Installing solar DECREASES marginal carbon emissions by {0} metric tons per year.'.format(
round(Annual_GHG_Emissions_Reduction_from_Solar, 2)))
print('This is equivalent to {0}% of baseline emissions, and brings total emissions to {1} metric tons per year.'.format(
round(Annual_GHG_Emissions_Reduction_from_Solar_Percent * 100, 2), round(Annual_GHG_Emissions_with_Solar_Only, 2)))
if Annual_GHG_Emissions_Reduction_from_Storage < 0:
print('Installing energy storage INCREASES marginal carbon emissions by {0} metric tons per year.'.format(
-round(Annual_GHG_Emissions_Reduction_from_Storage, 2)))
print('This is equivalent to {0}% of baseline emissions, and brings total emissions to {1} metric tons per year.'.format(
-round(Annual_GHG_Emissions_Reduction_from_Storage_Percent * 100, 2), round(Annual_GHG_Emissions_with_Solar_and_Storage, 2)))
else:
print('Installing energy storage DECREASES marginal carbon emissions by {0} metric tons per year.'.format(
round(Annual_GHG_Emissions_Reduction_from_Storage, 2)))
print('This is equivalent to {0}% of baseline emissions, and brings total emissions to {1} metric tons per year.'.format(
round(Annual_GHG_Emissions_Reduction_from_Storage_Percent * 100, 2), round(Annual_GHG_Emissions_with_Solar_and_Storage, 2)))
== 0:
plt.close('all')
_Outputs = np.array([Modeling_Team_Input, Model_Run_Number_Input, Model_Run_Date_Time, Model_Type_Input, Model_Timestep_Resolution, \
Customer_Class_Input, Load_Profile_Master_Index, Load_Profile_Name_Input, \
Retail_Rate_Master_Index, Retail_Rate_Utility, Retail_Rate_Name_Output, Retail_Rate_Effective_Date, \
Solar_Profile_Master_Index, Solar_Profile_Name_Output, Solar_Profile_Description, Solar_Size_Input, \
Storage_Type_Input, Storage_Power_Rating_Input, Usable_Storage_Capacity_Input, Single_Cycle_RTE_Input, Parasitic_Storage_Load_Input, \
Storage_Control_Algorithm_Name, Storage_Control_Algorithm_Description, Storage_Control_Algorithms_Parameters_Filename, \
GHG_Reduction_Solution_Input, Equivalent_Cycling_Constraint_Input, Annual_RTE_Constraint_Input, ITC_Constraint_Input, \
Carbon_Adder_Incentive_Value_Input, Other_Incentives_or_Penalities, Emissions_Forecast_Signal_Input, \
Annual_GHG_Emissions_Baseline, Annual_GHG_Emissions_with_Solar_Only, Annual_GHG_Emissions_with_Solar_and_Storage, \
Annual_Customer_Bill_Baseline, Annual_Customer_Bill_with_Solar_Only, Annual_Customer_Bill_with_Solar_and_Storage, \
Annual_Grid_Cost_Baseline, Annual_Grid_Cost_with_Solar_Only, Annual_Grid_Cost_with_Solar_and_Storage, \
Annual_Equivalent_Storage_Cycles, Annual_RTE, Operational_Capacity_Factor, \
Annual_Demand_Charge_Cost_Baseline, Annual_Demand_Charge_Cost_with_Solar_Only, Annual_Demand_Charge_Cost_with_Solar_and_Storage, \
Annual_Energy_Charge_Cost_Baseline, Annual_Energy_Charge_Cost_with_Solar_Only, Annual_Energy_Charge_Cost_with_Solar_and_Storage, \
Annual_Peak_Demand_Baseline, Annual_Peak_Demand_with_Solar_Only, Annual_Peak_Demand_with_Solar_and_Storage, \
Annual_Total_Energy_Consumption_Baseline, Annual_Total_Energy_Consumption_with_Solar_Only, Annual_Total_Energy_Consumption_with_Solar_and_Storage, \
Output_Summary_Filename, Output_Description_Filename, Output_Visualizations_Filename, \
EV_Use, EV_Charge, EV_Gas_Savings, EV_GHG_Savings]).reshape((1, 62))
Model_Inputs_and_Outputs = pd.DataFrame(Model_Inputs_and_Outputs, columns = ["Modeling_Team_Input", "Model_Run_Number_Input", "Model_Run_Date_Time", "Model_Type_Input", "Model_Timestep_Resolution", \
"Customer_Class_Input", "Load_Profile_Master_Index", "Load_Profile_Name_Input", \
"Retail_Rate_Master_Index", "Retail_Rate_Utility", "Retail_Rate_Name_Output", "Retail_Rate_Effective_Date", \
"Solar_Profile_Master_Index", "Solar_Profile_Name_Output", "Solar_Profile_Description", "Solar_Size_Input", \
"Storage_Type_Input", "Storage_Power_Rating_Input", "Usable_Storage_Capacity_Input", "Single_Cycle_RTE_Input", "Parasitic_Storage_Load_Input", \
"Storage_Control_Algorithm_Name", "Storage_Control_Algorithm_Description", "Storage_Control_Algorithms_Parameters_Filename", \
"GHG_Reduction_Solution_Input", "Equivalent_Cycling_Constraint_Input", "Annual_RTE_Constraint_Input", "ITC_Constraint_Input", \
"Carbon_Adder_Incentive_Value_Input", "Other_Incentives_or_Penalities", "Emissions_Forecast_Signal_Input", \
"Annual_GHG_Emissions_Baseline", "Annual_GHG_Emissions_with_Solar_Only", "Annual_GHG_Emissions_with_Solar_and_Storage", \
"Annual_Customer_Bill_Baseline", "Annual_Customer_Bill_with_Solar_Only", "Annual_Customer_Bill_with_Solar_and_Storage", \
"Annual_Grid_Cost_Baseline", "Annual_Grid_Cost_with_Solar_Only", "Annual_Grid_Cost_with_Solar_and_Storage", \
"Annual_Equivalent_Storage_Cycles", "Annual_RTE", "Operational_Capacity_Factor", \
"Annual_Demand_Charge_Cost_Baseline", "Annual_Demand_Charge_Cost_with_Solar_Only", "Annual_Demand_Charge_Cost_with_Solar_and_Storage", \
"Annual_Energy_Charge_Cost_Baseline", "Annual_Energy_Charge_Cost_with_Solar_Only", "Annual_Energy_Charge_Cost_with_Solar_and_Storage", \
"Annual_Peak_Demand_Baseline", "Annual_Peak_Demand_with_Solar_Only", "Annual_Peak_Demand_with_Solar_and_Storage", \
"Annual_Total_Energy_Consumption_Baseline", "Annual_Total_Energy_Consumption_with_Solar_Only", "Annual_Total_Energy_Consumption_with_Solar_and_Storage", \
"Output_Summary_Filename", "Output_Description_Filename", "Output_Visualizations_Filename", \
"EV_Use", "EV_Charge", "EV_Gas_Savings", "EV_GHG_Savings"])
Storage_Dispatch_Outputs = np.array([t, -P_ES]).transpose()
Storage_Dispatch_Outputs = pd.DataFrame(Storage_Dispatch_Outputs, columns = ["Date_Time_Pacific_No_DST", "Storage_Output_kW"])
if Export_Data == 1:
Model_Inputs_and_Outputs.to_csv(os.path.join(Output_Directory_Filepath, Output_Summary_Filename), index = False)
Storage_Dispatch_Outputs.to_csv(os.path.join(Output_Directory_Filepath, "Storage Dispatch Profile Output.csv"), index = False)
P_ES_inverted = -P_ES
return P_ES_inverted | true | true |
1c2b73de6e210c7cc184c6ee35f19a2fc5bc0c04 | 2,465 | py | Python | research/cv/single_path_nas/export.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 1 | 2021-11-18T08:17:44.000Z | 2021-11-18T08:17:44.000Z | research/cv/single_path_nas/export.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | null | null | null | research/cv/single_path_nas/export.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 2 | 2019-09-01T06:17:04.000Z | 2019-10-04T08:39:45.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
##############export checkpoint file into air, onnx or mindir model#################
python export.py
"""
import argparse
import numpy as np
from mindspore import Tensor, load_checkpoint, load_param_into_net, export, context
import src.spnasnet as spnasnet
from src.config import imagenet_cfg
parser = argparse.ArgumentParser(description='single-path-nas export')
parser.add_argument("--device_id", type=int, default=0, help="Device id")
parser.add_argument("--batch_size", type=int, default=1, help="batch size")
parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint file path.")
parser.add_argument("--file_name", type=str, default="single-path-nas", help="output file name.")
parser.add_argument('--width', type=int, default=224, help='input width')
parser.add_argument('--height', type=int, default=224, help='input height')
parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="MINDIR", help="file format")
parser.add_argument("--device_target", type=str, default="Ascend",
choices=["Ascend",], help="device target(default: Ascend)")
args = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
if args.device_target == "Ascend":
context.set_context(device_id=args.device_id)
else:
raise ValueError("Unsupported platform.")
if __name__ == '__main__':
net = spnasnet.spnasnet(num_classes=imagenet_cfg.num_classes)
assert args.ckpt_file is not None, "checkpoint_path is None."
param_dict = load_checkpoint(args.ckpt_file)
load_param_into_net(net, param_dict)
input_arr = Tensor(np.zeros([args.batch_size, 3, args.height, args.width], np.float32))
export(net, input_arr, file_name=args.file_name, file_format=args.file_format)
| 44.818182 | 119 | 0.720487 |
import argparse
import numpy as np
from mindspore import Tensor, load_checkpoint, load_param_into_net, export, context
import src.spnasnet as spnasnet
from src.config import imagenet_cfg
parser = argparse.ArgumentParser(description='single-path-nas export')
parser.add_argument("--device_id", type=int, default=0, help="Device id")
parser.add_argument("--batch_size", type=int, default=1, help="batch size")
parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint file path.")
parser.add_argument("--file_name", type=str, default="single-path-nas", help="output file name.")
parser.add_argument('--width', type=int, default=224, help='input width')
parser.add_argument('--height', type=int, default=224, help='input height')
parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="MINDIR", help="file format")
parser.add_argument("--device_target", type=str, default="Ascend",
choices=["Ascend",], help="device target(default: Ascend)")
args = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
if args.device_target == "Ascend":
context.set_context(device_id=args.device_id)
else:
raise ValueError("Unsupported platform.")
if __name__ == '__main__':
net = spnasnet.spnasnet(num_classes=imagenet_cfg.num_classes)
assert args.ckpt_file is not None, "checkpoint_path is None."
param_dict = load_checkpoint(args.ckpt_file)
load_param_into_net(net, param_dict)
input_arr = Tensor(np.zeros([args.batch_size, 3, args.height, args.width], np.float32))
export(net, input_arr, file_name=args.file_name, file_format=args.file_format)
| true | true |
1c2b74326eae540c91b2eb24110cab90bd010e16 | 3,720 | py | Python | scripts/fonts.py | mmotl/cheatsheets | afebd6b32dc3fcfdfde0fe83fb0b74b4b795344c | [
"BSD-2-Clause"
] | 1 | 2021-03-20T18:33:02.000Z | 2021-03-20T18:33:02.000Z | scripts/fonts.py | yuxionghuang/cheatsheets | 404a2fc6675f27dc85c0f952da7864c03058a3c7 | [
"BSD-2-Clause"
] | null | null | null | scripts/fonts.py | yuxionghuang/cheatsheets | 404a2fc6675f27dc85c0f952da7864c03058a3c7 | [
"BSD-2-Clause"
] | 1 | 2021-12-21T17:15:07.000Z | 2021-12-21T17:15:07.000Z | # -----------------------------------------------------------------------------
# Matplotlib cheat sheet
# Released under the BSD License
# -----------------------------------------------------------------------------
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
fig = plt.figure(figsize=(4.25, 3.8))
ax = fig.add_axes([0,0,1,1], frameon=False, xticks=[], yticks=[],
xlim=[0,40], ylim=[0,38])
y = 1
# -----------------------------------------------------------------------------
variants = {
"normal" : "../fonts/delicious-123/Delicious-Roman.otf",
"small-caps" : "../fonts/delicious-123/Delicious-SmallCaps.otf"
}
text = "The quick brown fox jumps over the lazy dog"
for i,variant in enumerate(variants.keys()):
ax.text(1, y, text, size=9, va="center",
fontproperties = FontProperties(fname=variants[variant]))
ax.text(39, y, variant,
color="0.25", va="center", ha="right",
size="small", family = "Source Code Pro", weight = 400)
y += 1.65
y += 1
# -----------------------------------------------------------------------------
styles = ["normal", "italic"]
text = "The quick brown fox jumps over the lazy dog"
for i,style in enumerate(styles):
ax.text(1, y, text, size=9, va="center", style=style,
family = "Source Sans Pro")
ax.text(39, y, style,
color="0.25", va="center", ha="right",
size="small", family = "Source Code Pro", weight = 400)
y += 1.65
y += 1
# -----------------------------------------------------------------------------
families = {
"Pacifico" : "cursive",
"Source Sans Pro" : "sans",
"Source Serif Pro": "serif",
"Source Code Pro" : "monospace" }
text = "The quick brown fox jumps over the lazy dog"
for i,family in enumerate(families):
ax.text(1, y, text,
va="center", size=9, family = family, weight = "regular")
ax.text(39, y,
"%s" % (families[family]),
color="0.25", va="center", ha="right",
size="small", family = "Source Code Pro", weight = 400)
y += 1.65
y += 1
# -----------------------------------------------------------------------------
weights = {
'ultralight' : 100,
'light' : 200,
'normal' : 400, 'regular' : 400, 'book' : 400,
'medium' : 500, 'roman' : 500,
'semibold' : 600, 'demibold' : 600, 'demi' : 600,
'bold' : 700,
'heavy' : 800, 'extra bold' : 800,
'black' : 900 }
text = "The quick brown fox jumps over the lazy dog"
for i,weight in enumerate(["ultralight","normal","semibold","bold","black"]):
ax.text(1, y, text, size=9,
va="center", family = "Source Sans Pro", weight = weight)
ax.text(39, y, "%s (%d)" % (weight, weights[weight]),
color="0.25", va="center", ha="right",
size="small", family = "Source Code Pro", weight = 400)
y += 1.65
y += 1
# -----------------------------------------------------------------------------
sizes = { "xx-small" : 0.579,
"x-small" : 0.694,
"small" : 0.833,
"medium" : 1.0,
"large" : 1.200,
"x-large" : 1.440,
"xx-large" : 1.728 }
text = "The quick brown fox"
for i,size in enumerate(sizes.keys()):
ax.text(1, y, text, size=size,
ha="left", va="center", family = "Source Sans Pro", weight="light")
ax.text(39, y, "%s (%.2f)" % (size, sizes[size]),
color="0.25", va="center", ha="right",
size="small", family = "Source Code Pro", weight = 400)
y += 1.65* max(sizes[size], sizes["small"])
plt.savefig("../figures/fonts.pdf")
# plt.show()
| 33.818182 | 79 | 0.466935 |
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
fig = plt.figure(figsize=(4.25, 3.8))
ax = fig.add_axes([0,0,1,1], frameon=False, xticks=[], yticks=[],
xlim=[0,40], ylim=[0,38])
y = 1
variants = {
"normal" : "../fonts/delicious-123/Delicious-Roman.otf",
"small-caps" : "../fonts/delicious-123/Delicious-SmallCaps.otf"
}
text = "The quick brown fox jumps over the lazy dog"
for i,variant in enumerate(variants.keys()):
ax.text(1, y, text, size=9, va="center",
fontproperties = FontProperties(fname=variants[variant]))
ax.text(39, y, variant,
color="0.25", va="center", ha="right",
size="small", family = "Source Code Pro", weight = 400)
y += 1.65
y += 1
styles = ["normal", "italic"]
text = "The quick brown fox jumps over the lazy dog"
for i,style in enumerate(styles):
ax.text(1, y, text, size=9, va="center", style=style,
family = "Source Sans Pro")
ax.text(39, y, style,
color="0.25", va="center", ha="right",
size="small", family = "Source Code Pro", weight = 400)
y += 1.65
y += 1
families = {
"Pacifico" : "cursive",
"Source Sans Pro" : "sans",
"Source Serif Pro": "serif",
"Source Code Pro" : "monospace" }
text = "The quick brown fox jumps over the lazy dog"
for i,family in enumerate(families):
ax.text(1, y, text,
va="center", size=9, family = family, weight = "regular")
ax.text(39, y,
"%s" % (families[family]),
color="0.25", va="center", ha="right",
size="small", family = "Source Code Pro", weight = 400)
y += 1.65
y += 1
weights = {
'ultralight' : 100,
'light' : 200,
'normal' : 400, 'regular' : 400, 'book' : 400,
'medium' : 500, 'roman' : 500,
'semibold' : 600, 'demibold' : 600, 'demi' : 600,
'bold' : 700,
'heavy' : 800, 'extra bold' : 800,
'black' : 900 }
text = "The quick brown fox jumps over the lazy dog"
for i,weight in enumerate(["ultralight","normal","semibold","bold","black"]):
ax.text(1, y, text, size=9,
va="center", family = "Source Sans Pro", weight = weight)
ax.text(39, y, "%s (%d)" % (weight, weights[weight]),
color="0.25", va="center", ha="right",
size="small", family = "Source Code Pro", weight = 400)
y += 1.65
y += 1
sizes = { "xx-small" : 0.579,
"x-small" : 0.694,
"small" : 0.833,
"medium" : 1.0,
"large" : 1.200,
"x-large" : 1.440,
"xx-large" : 1.728 }
text = "The quick brown fox"
for i,size in enumerate(sizes.keys()):
ax.text(1, y, text, size=size,
ha="left", va="center", family = "Source Sans Pro", weight="light")
ax.text(39, y, "%s (%.2f)" % (size, sizes[size]),
color="0.25", va="center", ha="right",
size="small", family = "Source Code Pro", weight = 400)
y += 1.65* max(sizes[size], sizes["small"])
plt.savefig("../figures/fonts.pdf")
| true | true |
1c2b74e91255ed084190d5b468b11d8bbfcdb81b | 303 | py | Python | data/multilingual/Latn.NNO/Sans_16/pdf_to_json_test_Latn.NNO_Sans_16.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | 1 | 2021-09-19T19:47:35.000Z | 2021-09-19T19:47:35.000Z | data/multilingual/Latn.NNO/Sans_16/pdf_to_json_test_Latn.NNO_Sans_16.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | data/multilingual/Latn.NNO/Sans_16/pdf_to_json_test_Latn.NNO_Sans_16.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.NNO/Sans_16/udhr_Latn.NNO_Sans_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.3 | 73 | 0.811881 | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.NNO/Sans_16/udhr_Latn.NNO_Sans_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| true | true |
1c2b75568bc51e54d3ce8a67a1b8fcb2925bf9a8 | 5,638 | py | Python | vas/shared/Instance.py | spring-operator/vas-python-api | ce7148a2044863e078e78b47abbaafc426f732ee | [
"Apache-2.0"
] | null | null | null | vas/shared/Instance.py | spring-operator/vas-python-api | ce7148a2044863e078e78b47abbaafc426f732ee | [
"Apache-2.0"
] | null | null | null | vas/shared/Instance.py | spring-operator/vas-python-api | ce7148a2044863e078e78b47abbaafc426f732ee | [
"Apache-2.0"
] | null | null | null | # vFabric Administration Server API
# Copyright (c) 2012 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from vas.shared.Deletable import Deletable
from vas.shared.Resource import Resource
from vas.util.LinkUtils import LinkUtils
class Instance(Resource, Deletable):
"""An instance of a middleware component. Created from an installation that provides the binaries that the instance
uses at runtime.
:ivar `vas.shared.Groups.Group` group: The group that contains this instance
:ivar `vas.shared.Installations.Installation` installation: The installation that this instance is using
:ivar `vas.shared.Collection.Collection` live_configurations: The instance's live configurations
:ivar str name: The instance's name
:ivar list node_instances: The instance's individual node instances
:ivar `vas.shared.PendingConfigurations.PendingConfigurations` pending_configurations: The instance's pending
configurations
:ivar `vas.shared.Security.Security` security: The resource's security
:ivar str state: Retrieves the state of the resource from the server.
Will be one of:
* ``STARTING``
* ``STARTED``
* ``STOPPING``
* ``STOPPED``
"""
__group = None
__live_configurations = None
__pending_configurations = None
@property
def group(self):
self.__group = self.__group or self.__group_class(self._client, self.__group_location)
return self.__group
@property
def installation(self):
self.__installation = self.__installation or self.__installation_class(self._client,
self.__installation_location)
return self.__installation
@property
def live_configurations(self):
self.__live_configurations = self.__live_configurations or self.__live_configurations_class(self._client,
self.__live_configurations_location)
return self.__live_configurations
@property
def name(self):
return self.__name
@property
def node_instances(self):
self.__node_instances = self.__node_instances or self._create_resources_from_links(self.__node_instance_type,
self.__node_instance_class)
return self.__node_instances
@property
def pending_configurations(self):
self.__pending_configurations = self.__pending_configurations or self.__pending_configurations_class(
self._client,
self.__pending_configurations_location)
return self.__pending_configurations
@property
def state(self):
return self._client.get(self.__state_location)['status']
def __init__(self, client, location, group_class, installation_class, live_configurations_class,
pending_configurations_class, node_instance_class, node_instance_type):
super(Instance, self).__init__(client, location)
self.__live_configurations_location = LinkUtils.get_link_href(self._details, 'live-configurations')
self.__pending_configurations_location = LinkUtils.get_link_href(self._details, 'pending-configurations')
self.__group_location = LinkUtils.get_link_href(self._details, 'group')
self.__state_location = LinkUtils.get_link_href(self._details, 'state')
self.__group_class = group_class
self.__installation_class = installation_class
self.__node_instance_class = node_instance_class
self.__live_configurations_class = live_configurations_class
self.__pending_configurations_class = pending_configurations_class
self.__node_instance_type = node_instance_type
self.__name = self._details['name']
def reload(self):
"""Reloads the instance's details from the server"""
super(Instance, self).reload()
self.__installation_location = LinkUtils.get_link_href(self._details, 'installation')
self.__installation = None
self.__node_instances = None
def start(self, serial=False):
"""Starts the resource
:param bool serial: Whether to start the node instance serially
"""
self._client.post(self.__state_location, {'status': 'STARTED', 'serial': serial})
def stop(self, serial=False):
"""Stops the resource
:param bool serial: Whether to stop the node instance serially
"""
self._client.post(self.__state_location, {'status': 'STOPPED', 'serial': serial})
def __str__(self):
return "<{} name={}>".format(self.__class__.__name__, self.__name)
| 43.369231 | 120 | 0.64828 |
from vas.shared.Deletable import Deletable
from vas.shared.Resource import Resource
from vas.util.LinkUtils import LinkUtils
class Instance(Resource, Deletable):
__group = None
__live_configurations = None
__pending_configurations = None
@property
def group(self):
self.__group = self.__group or self.__group_class(self._client, self.__group_location)
return self.__group
@property
def installation(self):
self.__installation = self.__installation or self.__installation_class(self._client,
self.__installation_location)
return self.__installation
@property
def live_configurations(self):
self.__live_configurations = self.__live_configurations or self.__live_configurations_class(self._client,
self.__live_configurations_location)
return self.__live_configurations
@property
def name(self):
return self.__name
@property
def node_instances(self):
self.__node_instances = self.__node_instances or self._create_resources_from_links(self.__node_instance_type,
self.__node_instance_class)
return self.__node_instances
@property
def pending_configurations(self):
self.__pending_configurations = self.__pending_configurations or self.__pending_configurations_class(
self._client,
self.__pending_configurations_location)
return self.__pending_configurations
@property
def state(self):
return self._client.get(self.__state_location)['status']
def __init__(self, client, location, group_class, installation_class, live_configurations_class,
pending_configurations_class, node_instance_class, node_instance_type):
super(Instance, self).__init__(client, location)
self.__live_configurations_location = LinkUtils.get_link_href(self._details, 'live-configurations')
self.__pending_configurations_location = LinkUtils.get_link_href(self._details, 'pending-configurations')
self.__group_location = LinkUtils.get_link_href(self._details, 'group')
self.__state_location = LinkUtils.get_link_href(self._details, 'state')
self.__group_class = group_class
self.__installation_class = installation_class
self.__node_instance_class = node_instance_class
self.__live_configurations_class = live_configurations_class
self.__pending_configurations_class = pending_configurations_class
self.__node_instance_type = node_instance_type
self.__name = self._details['name']
def reload(self):
super(Instance, self).reload()
self.__installation_location = LinkUtils.get_link_href(self._details, 'installation')
self.__installation = None
self.__node_instances = None
def start(self, serial=False):
self._client.post(self.__state_location, {'status': 'STARTED', 'serial': serial})
def stop(self, serial=False):
self._client.post(self.__state_location, {'status': 'STOPPED', 'serial': serial})
def __str__(self):
return "<{} name={}>".format(self.__class__.__name__, self.__name)
| true | true |
1c2b794452d028520cdffa11e59d2765dc7c5893 | 14,135 | py | Python | tests/test_eos_mix_methods.py | brunokiyoshi/thermo | 5b31d21fd087dd0fc3302f023c5f3c52d9cbee3b | [
"MIT"
] | null | null | null | tests/test_eos_mix_methods.py | brunokiyoshi/thermo | 5b31d21fd087dd0fc3302f023c5f3c52d9cbee3b | [
"MIT"
] | null | null | null | tests/test_eos_mix_methods.py | brunokiyoshi/thermo | 5b31d21fd087dd0fc3302f023c5f3c52d9cbee3b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2020, Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
import pytest
from thermo.eos import *
from thermo.eos_mix import *
from thermo.eos_alpha_functions import *
from thermo.eos_mix_methods import *
from fluids.constants import R
from fluids.numerics import jacobian, hessian, assert_close, assert_close1d, assert_close2d, assert_close3d, derivative
from math import log, exp, sqrt
import numpy as np
from thermo.eos_mix_methods import a_alpha_quadratic_terms, a_alpha_and_derivatives_quadratic_terms
def test_a_alpha_quadratic_terms():
# useful test case for speed.
expect = [1.018836674553355, 2.191757517626393, 2.563258602852081, 1.5598326706034975, 2.70593281974093, 3.7034025281989855, 4.539954054126808, 4.699007689627005, 5.544738410220301, 5.727506758376061, 6.747016798786708, 7.772541929210375, 8.824329534067225, 9.881609693824497, 10.818879356535186, 11.967885231615968, 13.064056888046336, 14.301191101517293, 15.549382410454996, 16.514506861687853, 17.70128879207487, 18.588871716258463, 19.587383418298344, 21.163882746233718, 22.71677093839829, 23.693174106957997, 24.84638402761533, 26.32710900857889, 27.628174407150638, 27.35173402605858, 30.078139085433158, 29.6938067153124, 30.975794852828585, 31.612211604350215, 37.346889330614765, 5.8657490543188056, 6.918460471177853, 7.885934394505012, 7.987258405203353, 9.096924819311049, 5.4186445304744675, 6.364741674932172, 6.247071329729653, 7.191150355969193]
a_alphas = [0.0865274961332042, 0.4004331347550168, 0.5476837363175464, 0.20281544374537322, 0.610350096562494, 1.1432648066725495, 1.7180979223407897, 1.8405910620140276, 2.56275518543631, 2.734489234665559, 3.794622523842678, 5.035830969924731, 6.490952532386477, 8.139549888291587, 9.756848311930623, 11.939326501216337, 14.226600071224336, 17.048627321670082, 20.154465549725934, 22.73401890914733, 26.118893369963804, 28.803884311242584, 31.98142763556359, 37.33667941647009, 43.0168093920849, 46.79414203338489, 51.460189856771855, 57.77651478272769, 63.62816155455672, 62.36123776101297, 75.41312259487229, 73.4982082371554, 79.98156837889205, 83.30187138391334, 116.2663039720862, 2.8680845884126343, 3.9899175858237754, 5.183836756317098, 5.317903685129213, 6.898175009281366, 2.447520402314526, 3.3768094978613767, 3.2531038444204294, 4.3106398143326805]
a_alpha_roots = [i**0.5 for i in a_alphas]
kijs = np.zeros((44, 44)).tolist()
zs = [9.11975115499676e-05, 9.986813065240533e-05, 0.0010137795304828892, 0.019875879000370657, 0.013528874875432457, 0.021392773691700402, 0.00845450438914824, 0.02500218071904368, 0.016114189201071587, 0.027825798446635016, 0.05583179467176313, 0.0703116540769539, 0.07830577180555454, 0.07236459223729574, 0.0774523322851419, 0.057755091407705975, 0.04030134965162674, 0.03967043780553758, 0.03514481759005302, 0.03175471055284055, 0.025411123554079325, 0.029291866298718154, 0.012084986551713202, 0.01641114551124426, 0.01572454598093482, 0.012145363820829673, 0.01103585282423499, 0.010654818322680342, 0.008777712911254239, 0.008732073853067238, 0.007445155260036595, 0.006402875549212365, 0.0052908087849774296, 0.0048199150683177075, 0.015943943854195963, 0.004452253754752775, 0.01711981267072777, 0.0024032720444511282, 0.032178399403544646, 0.0018219517069058137, 0.003403378548794345, 0.01127516775495176, 0.015133143423489698, 0.029483213283483682]
a_alpha, a_alpha_j_rows = a_alpha_quadratic_terms(a_alphas, a_alpha_roots, 299.0, zs, kijs)
assert_close1d(expect, a_alpha_j_rows, rtol=1e-14)
assert_close(a_alpha, 11.996512274167202, rtol=1e-14)
# Small case but with constant kijs
kijs = [[0,.083],[0.083,0]]
zs = [0.1164203, 0.8835797]
a_alphas = [0.2491099357671155, 0.6486495863528039]
a_alpha_roots = [i**0.5 for i in a_alphas]
a_alpha, a_alpha_j_rows = a_alpha_quadratic_terms(a_alphas, a_alpha_roots, 299.0, zs, kijs)
assert_close1d([0.35469988173420947, 0.6160475723779467], a_alpha_j_rows, rtol=1e-14)
assert_close(a_alpha, 0.5856213958288955, rtol=1e-14)
def test_a_alpha_and_derivatives_quadratic_terms():
expect = [1.018836674553355, 2.191757517626393, 2.563258602852081, 1.5598326706034975, 2.70593281974093, 3.7034025281989855, 4.539954054126808, 4.699007689627005, 5.544738410220301, 5.727506758376061, 6.747016798786708, 7.772541929210375, 8.824329534067225, 9.881609693824497, 10.818879356535186, 11.967885231615968, 13.064056888046336, 14.301191101517293, 15.549382410454996, 16.514506861687853, 17.70128879207487, 18.588871716258463, 19.587383418298344, 21.163882746233718, 22.71677093839829, 23.693174106957997, 24.84638402761533, 26.32710900857889, 27.628174407150638, 27.35173402605858, 30.078139085433158, 29.6938067153124, 30.975794852828585, 31.612211604350215, 37.346889330614765, 5.8657490543188056, 6.918460471177853, 7.885934394505012, 7.987258405203353, 9.096924819311049, 5.4186445304744675, 6.364741674932172, 6.247071329729653, 7.191150355969193]
a_alphas = [0.0865274961332042, 0.4004331347550168, 0.5476837363175464, 0.20281544374537322, 0.610350096562494, 1.1432648066725495, 1.7180979223407897, 1.8405910620140276, 2.56275518543631, 2.734489234665559, 3.794622523842678, 5.035830969924731, 6.490952532386477, 8.139549888291587, 9.756848311930623, 11.939326501216337, 14.226600071224336, 17.048627321670082, 20.154465549725934, 22.73401890914733, 26.118893369963804, 28.803884311242584, 31.98142763556359, 37.33667941647009, 43.0168093920849, 46.79414203338489, 51.460189856771855, 57.77651478272769, 63.62816155455672, 62.36123776101297, 75.41312259487229, 73.4982082371554, 79.98156837889205, 83.30187138391334, 116.2663039720862, 2.8680845884126343, 3.9899175858237754, 5.183836756317098, 5.317903685129213, 6.898175009281366, 2.447520402314526, 3.3768094978613767, 3.2531038444204294, 4.3106398143326805]
a_alpha_roots = [i**0.5 for i in a_alphas]
a_alpha_i_root_invs = [1.0/i for i in a_alphas]
da_alpha_dTs = [-0.00025377859043732546, -0.000934247068461214, -0.000816789460173304, -0.0003641243787874678, -0.0010503058450047169, -0.0019521746900983052, -0.0028718927680108602, -0.0030862530923667516, -0.0043109072968568855, -0.004719357153237089, -0.006631042744989444, -0.008954841106859145, -0.01175296124567969, -0.015014798912202318, -0.018394836388991746, -0.02261696126764091, -0.02691416109598246, -0.03306276569415665, -0.03972067690500332, -0.04434234645435802, -0.05166183446540069, -0.05661884581837739, -0.06384511544740731, -0.07534567027524366, -0.08688546863889157, -0.09454104531596857, -0.1047355386575357, -0.12085503194237243, -0.13251190497391216, -0.13109044690165458, -0.1584965979082082, -0.15738400415699616, -0.1706975126112625, -0.17869250096210298, -0.24786999267933035, -0.0040612961454164305, -0.005861031978967661, -0.007870669654243058, -0.00806706054424201, -0.011089166549563573, -0.0035751401389282128, -0.005057878813908274, -0.004795418755334288, -0.0063951285412122945]
d2a_alpha_dT2s = [7.951210065548482e-07, 2.6469203076280187e-06, 1.970376231974855e-06, 9.337390218103036e-07, 2.654206140072756e-06, 4.920336341685227e-06, 7.186749294919237e-06, 7.73122782691325e-06, 1.0810615491775454e-05, 1.1938080101460763e-05, 1.6845558981373303e-05, 2.288659685773046e-05, 3.022862525081902e-05, 3.887335363056251e-05, 4.799818908733702e-05, 5.9116869795960396e-05, 7.031530412634311e-05, 8.71642719698682e-05, 0.00010534213565791343, 0.00011714843555809333, 0.00013719528984525276, 0.00015001164237180505, 0.00017013611809931108, 0.0002016001519076944, 0.00023255486736407165, 0.0002530719148656703, 0.0002811419418128126, 0.00032782536312720063, 0.000358837713019585, 0.00035626762677964024, 0.00043071802720069994, 0.0004308123103893313, 0.0004666480764343225, 0.0004894792537071127, 0.0006773356550351481, 9.64428714604626e-06, 1.4073199340092461e-05, 1.9092839815989808e-05, 1.956381512959782e-05, 2.739514336342284e-05, 8.569704889318595e-06, 1.2217713526317966e-05, 1.1526841531601815e-05, 1.5402352528062937e-05]
da_alpha_dT_j_rows_expect = [-0.0024659779471849236, -0.0046475548895564215, -0.004356514353727929, -0.002888183050970737, -0.0049094724710971645, -0.0066946247849404734, -0.008125158529797675, -0.008422079528590325, -0.009952764932789312, -0.010406054570834938, -0.012331292438012833, -0.014325077425132872, -0.01640670440194842, -0.01854046658049185, -0.02051894196830183, -0.022751981036326308, -0.02481953443659406, -0.027509548756389217, -0.030155386331164644, -0.031859224259789314, -0.03439180249090889, -0.036002133443470065, -0.0382361992513997, -0.0415431605007282, -0.04461176649968248, -0.046535861707927346, -0.04898614541953604, -0.05264915066454394, -0.055124368695664686, -0.05483970527179004, -0.06030003256343941, -0.06011776608310644, -0.06260298333060192, -0.0640616331561035, -0.07543630216258783, -0.009748518366766266, -0.011681157292387554, -0.013509225924011457, -0.013677421745325026, -0.015989657410498563, -0.009126533178948, -0.010838121814247793, -0.010563651638562304, -0.01219409084892938]
kijs = np.zeros((44, 44)).tolist()
zs = [9.11975115499676e-05, 9.986813065240533e-05, 0.0010137795304828892, 0.019875879000370657, 0.013528874875432457, 0.021392773691700402, 0.00845450438914824, 0.02500218071904368, 0.016114189201071587, 0.027825798446635016, 0.05583179467176313, 0.0703116540769539, 0.07830577180555454, 0.07236459223729574, 0.0774523322851419, 0.057755091407705975, 0.04030134965162674, 0.03967043780553758, 0.03514481759005302, 0.03175471055284055, 0.025411123554079325, 0.029291866298718154, 0.012084986551713202, 0.01641114551124426, 0.01572454598093482, 0.012145363820829673, 0.01103585282423499, 0.010654818322680342, 0.008777712911254239, 0.008732073853067238, 0.007445155260036595, 0.006402875549212365, 0.0052908087849774296, 0.0048199150683177075, 0.015943943854195963, 0.004452253754752775, 0.01711981267072777, 0.0024032720444511282, 0.032178399403544646, 0.0018219517069058137, 0.003403378548794345, 0.01127516775495176, 0.015133143423489698, 0.029483213283483682]
a_alpha, da_alpha_dT, d2a_alpha_dT2, a_alpha_j_rows, da_alpha_dT_j_rows = a_alpha_and_derivatives_quadratic_terms(a_alphas, a_alpha_roots, da_alpha_dTs, d2a_alpha_dT2s, 299.0, zs, kijs)
assert_close1d(expect, a_alpha_j_rows, rtol=1e-14)
assert_close(a_alpha, 11.996512274167202, rtol=1e-14)
assert_close(da_alpha_dT, -0.0228875173310534, rtol=1e-14)
assert_close(d2a_alpha_dT2, 5.9978809895526926e-05, rtol=1e-14)
assert_close1d(da_alpha_dT_j_rows_expect, da_alpha_dT_j_rows, rtol=1e-14)
kijs = [[0,.083],[0.083,0]]
zs = [0.1164203, 0.8835797]
# eos = PRMIX(T=190.0, P=40.53e5, Tcs=[190.63, 373.55], Pcs=[46.17E5, 90.07E5], omegas=[0.01, 0.1], zs=zs, kijs=kijs)
a_alphas = [0.2491099357671155, 0.6486495863528039]
da_alpha_dTs = [-0.0005102028006086241, -0.0011131153520304886]
d2a_alpha_dT2s = [1.8651128859234162e-06, 3.884331923127011e-06]
a_alpha_roots = [i**0.5 for i in a_alphas]
a_alpha, da_alpha_dT, d2a_alpha_dT2, a_alpha_j_rows, da_alpha_dT_j_rows = a_alpha_and_derivatives_quadratic_terms(a_alphas, a_alpha_roots, da_alpha_dTs, d2a_alpha_dT2s, 299.0, zs, kijs)
assert_close(a_alpha, 0.5856213958288957, rtol=1e-14)
assert_close(da_alpha_dT, -0.001018667672891354, rtol=1e-14)
assert_close(d2a_alpha_dT2, 3.5666981785619988e-06, rtol=1e-14)
assert_close1d(a_alpha_j_rows, [0.35469988173420947, 0.6160475723779467], rtol=1e-14)
assert_close1d(da_alpha_dT_j_rows, [-0.0006723873746135188, -0.0010642935017889568], rtol=1e-14)
def test_a_alpha_aijs_composition_independent():
kijs = [[0,.083],[0.083,0]]
a_alphas = [0.2491099357671155, 0.6486495863528039]
a_alpha_ijs, a_alpha_roots, a_alpha_ij_roots_inv = a_alpha_aijs_composition_independent(a_alphas, kijs)
assert_close2d(a_alpha_ijs, [[0.2491099357671155, 0.3686123937424334], [0.3686123937424334, 0.6486495863528038]], rtol=1e-13)
assert_close1d(a_alpha_roots, [0.4991091421393877, 0.8053878484015039], rtol=1e-13)
assert_close1d(a_alpha_ij_roots_inv, [[4.014291910599931, 2.4877079977965977], [2.4877079977965977, 1.5416644379945614]], rtol=1e-13)
def test_PR_lnphis_fastest():
kwargs = dict(Tcs=[190.56400000000002, 305.32, 369.83, 126.2],
Pcs=[4599000.0, 4872000.0, 4248000.0, 3394387.5],
omegas=[0.008, 0.098, 0.152, 0.04],
zs=[.1, .2, .3, .4],
kijs=[[0.0, -0.0059, 0.0119, 0.0289], [-0.0059, 0.0, 0.0011, 0.0533], [0.0119, 0.0011, 0.0, 0.0878], [0.0289, 0.0533, 0.0878, 0.0]])
eos = PRMIX(T=200, P=1e5, **kwargs)
expect = eos.lnphis_l
calc = PR_lnphis_fastest(eos.zs, eos.T, eos.P, eos.kijs, True, False, eos.ais, eos.bs, eos.a_alphas, eos.a_alpha_roots, eos.kappas)
assert_close(expect, calc, rtol=1e-14)
expect = eos.lnphis_g
calc = PR_lnphis_fastest(eos.zs, eos.T, eos.P, eos.kijs, False, True, eos.ais, eos.bs, eos.a_alphas, eos.a_alpha_roots, eos.kappas)
assert_close(expect, calc, rtol=1e-14) | 117.791667 | 1,048 | 0.79908 |
import pytest
from thermo.eos import *
from thermo.eos_mix import *
from thermo.eos_alpha_functions import *
from thermo.eos_mix_methods import *
from fluids.constants import R
from fluids.numerics import jacobian, hessian, assert_close, assert_close1d, assert_close2d, assert_close3d, derivative
from math import log, exp, sqrt
import numpy as np
from thermo.eos_mix_methods import a_alpha_quadratic_terms, a_alpha_and_derivatives_quadratic_terms
def test_a_alpha_quadratic_terms():
expect = [1.018836674553355, 2.191757517626393, 2.563258602852081, 1.5598326706034975, 2.70593281974093, 3.7034025281989855, 4.539954054126808, 4.699007689627005, 5.544738410220301, 5.727506758376061, 6.747016798786708, 7.772541929210375, 8.824329534067225, 9.881609693824497, 10.818879356535186, 11.967885231615968, 13.064056888046336, 14.301191101517293, 15.549382410454996, 16.514506861687853, 17.70128879207487, 18.588871716258463, 19.587383418298344, 21.163882746233718, 22.71677093839829, 23.693174106957997, 24.84638402761533, 26.32710900857889, 27.628174407150638, 27.35173402605858, 30.078139085433158, 29.6938067153124, 30.975794852828585, 31.612211604350215, 37.346889330614765, 5.8657490543188056, 6.918460471177853, 7.885934394505012, 7.987258405203353, 9.096924819311049, 5.4186445304744675, 6.364741674932172, 6.247071329729653, 7.191150355969193]
a_alphas = [0.0865274961332042, 0.4004331347550168, 0.5476837363175464, 0.20281544374537322, 0.610350096562494, 1.1432648066725495, 1.7180979223407897, 1.8405910620140276, 2.56275518543631, 2.734489234665559, 3.794622523842678, 5.035830969924731, 6.490952532386477, 8.139549888291587, 9.756848311930623, 11.939326501216337, 14.226600071224336, 17.048627321670082, 20.154465549725934, 22.73401890914733, 26.118893369963804, 28.803884311242584, 31.98142763556359, 37.33667941647009, 43.0168093920849, 46.79414203338489, 51.460189856771855, 57.77651478272769, 63.62816155455672, 62.36123776101297, 75.41312259487229, 73.4982082371554, 79.98156837889205, 83.30187138391334, 116.2663039720862, 2.8680845884126343, 3.9899175858237754, 5.183836756317098, 5.317903685129213, 6.898175009281366, 2.447520402314526, 3.3768094978613767, 3.2531038444204294, 4.3106398143326805]
a_alpha_roots = [i**0.5 for i in a_alphas]
kijs = np.zeros((44, 44)).tolist()
zs = [9.11975115499676e-05, 9.986813065240533e-05, 0.0010137795304828892, 0.019875879000370657, 0.013528874875432457, 0.021392773691700402, 0.00845450438914824, 0.02500218071904368, 0.016114189201071587, 0.027825798446635016, 0.05583179467176313, 0.0703116540769539, 0.07830577180555454, 0.07236459223729574, 0.0774523322851419, 0.057755091407705975, 0.04030134965162674, 0.03967043780553758, 0.03514481759005302, 0.03175471055284055, 0.025411123554079325, 0.029291866298718154, 0.012084986551713202, 0.01641114551124426, 0.01572454598093482, 0.012145363820829673, 0.01103585282423499, 0.010654818322680342, 0.008777712911254239, 0.008732073853067238, 0.007445155260036595, 0.006402875549212365, 0.0052908087849774296, 0.0048199150683177075, 0.015943943854195963, 0.004452253754752775, 0.01711981267072777, 0.0024032720444511282, 0.032178399403544646, 0.0018219517069058137, 0.003403378548794345, 0.01127516775495176, 0.015133143423489698, 0.029483213283483682]
a_alpha, a_alpha_j_rows = a_alpha_quadratic_terms(a_alphas, a_alpha_roots, 299.0, zs, kijs)
assert_close1d(expect, a_alpha_j_rows, rtol=1e-14)
assert_close(a_alpha, 11.996512274167202, rtol=1e-14)
kijs = [[0,.083],[0.083,0]]
zs = [0.1164203, 0.8835797]
a_alphas = [0.2491099357671155, 0.6486495863528039]
a_alpha_roots = [i**0.5 for i in a_alphas]
a_alpha, a_alpha_j_rows = a_alpha_quadratic_terms(a_alphas, a_alpha_roots, 299.0, zs, kijs)
assert_close1d([0.35469988173420947, 0.6160475723779467], a_alpha_j_rows, rtol=1e-14)
assert_close(a_alpha, 0.5856213958288955, rtol=1e-14)
def test_a_alpha_and_derivatives_quadratic_terms():
expect = [1.018836674553355, 2.191757517626393, 2.563258602852081, 1.5598326706034975, 2.70593281974093, 3.7034025281989855, 4.539954054126808, 4.699007689627005, 5.544738410220301, 5.727506758376061, 6.747016798786708, 7.772541929210375, 8.824329534067225, 9.881609693824497, 10.818879356535186, 11.967885231615968, 13.064056888046336, 14.301191101517293, 15.549382410454996, 16.514506861687853, 17.70128879207487, 18.588871716258463, 19.587383418298344, 21.163882746233718, 22.71677093839829, 23.693174106957997, 24.84638402761533, 26.32710900857889, 27.628174407150638, 27.35173402605858, 30.078139085433158, 29.6938067153124, 30.975794852828585, 31.612211604350215, 37.346889330614765, 5.8657490543188056, 6.918460471177853, 7.885934394505012, 7.987258405203353, 9.096924819311049, 5.4186445304744675, 6.364741674932172, 6.247071329729653, 7.191150355969193]
a_alphas = [0.0865274961332042, 0.4004331347550168, 0.5476837363175464, 0.20281544374537322, 0.610350096562494, 1.1432648066725495, 1.7180979223407897, 1.8405910620140276, 2.56275518543631, 2.734489234665559, 3.794622523842678, 5.035830969924731, 6.490952532386477, 8.139549888291587, 9.756848311930623, 11.939326501216337, 14.226600071224336, 17.048627321670082, 20.154465549725934, 22.73401890914733, 26.118893369963804, 28.803884311242584, 31.98142763556359, 37.33667941647009, 43.0168093920849, 46.79414203338489, 51.460189856771855, 57.77651478272769, 63.62816155455672, 62.36123776101297, 75.41312259487229, 73.4982082371554, 79.98156837889205, 83.30187138391334, 116.2663039720862, 2.8680845884126343, 3.9899175858237754, 5.183836756317098, 5.317903685129213, 6.898175009281366, 2.447520402314526, 3.3768094978613767, 3.2531038444204294, 4.3106398143326805]
a_alpha_roots = [i**0.5 for i in a_alphas]
a_alpha_i_root_invs = [1.0/i for i in a_alphas]
da_alpha_dTs = [-0.00025377859043732546, -0.000934247068461214, -0.000816789460173304, -0.0003641243787874678, -0.0010503058450047169, -0.0019521746900983052, -0.0028718927680108602, -0.0030862530923667516, -0.0043109072968568855, -0.004719357153237089, -0.006631042744989444, -0.008954841106859145, -0.01175296124567969, -0.015014798912202318, -0.018394836388991746, -0.02261696126764091, -0.02691416109598246, -0.03306276569415665, -0.03972067690500332, -0.04434234645435802, -0.05166183446540069, -0.05661884581837739, -0.06384511544740731, -0.07534567027524366, -0.08688546863889157, -0.09454104531596857, -0.1047355386575357, -0.12085503194237243, -0.13251190497391216, -0.13109044690165458, -0.1584965979082082, -0.15738400415699616, -0.1706975126112625, -0.17869250096210298, -0.24786999267933035, -0.0040612961454164305, -0.005861031978967661, -0.007870669654243058, -0.00806706054424201, -0.011089166549563573, -0.0035751401389282128, -0.005057878813908274, -0.004795418755334288, -0.0063951285412122945]
d2a_alpha_dT2s = [7.951210065548482e-07, 2.6469203076280187e-06, 1.970376231974855e-06, 9.337390218103036e-07, 2.654206140072756e-06, 4.920336341685227e-06, 7.186749294919237e-06, 7.73122782691325e-06, 1.0810615491775454e-05, 1.1938080101460763e-05, 1.6845558981373303e-05, 2.288659685773046e-05, 3.022862525081902e-05, 3.887335363056251e-05, 4.799818908733702e-05, 5.9116869795960396e-05, 7.031530412634311e-05, 8.71642719698682e-05, 0.00010534213565791343, 0.00011714843555809333, 0.00013719528984525276, 0.00015001164237180505, 0.00017013611809931108, 0.0002016001519076944, 0.00023255486736407165, 0.0002530719148656703, 0.0002811419418128126, 0.00032782536312720063, 0.000358837713019585, 0.00035626762677964024, 0.00043071802720069994, 0.0004308123103893313, 0.0004666480764343225, 0.0004894792537071127, 0.0006773356550351481, 9.64428714604626e-06, 1.4073199340092461e-05, 1.9092839815989808e-05, 1.956381512959782e-05, 2.739514336342284e-05, 8.569704889318595e-06, 1.2217713526317966e-05, 1.1526841531601815e-05, 1.5402352528062937e-05]
da_alpha_dT_j_rows_expect = [-0.0024659779471849236, -0.0046475548895564215, -0.004356514353727929, -0.002888183050970737, -0.0049094724710971645, -0.0066946247849404734, -0.008125158529797675, -0.008422079528590325, -0.009952764932789312, -0.010406054570834938, -0.012331292438012833, -0.014325077425132872, -0.01640670440194842, -0.01854046658049185, -0.02051894196830183, -0.022751981036326308, -0.02481953443659406, -0.027509548756389217, -0.030155386331164644, -0.031859224259789314, -0.03439180249090889, -0.036002133443470065, -0.0382361992513997, -0.0415431605007282, -0.04461176649968248, -0.046535861707927346, -0.04898614541953604, -0.05264915066454394, -0.055124368695664686, -0.05483970527179004, -0.06030003256343941, -0.06011776608310644, -0.06260298333060192, -0.0640616331561035, -0.07543630216258783, -0.009748518366766266, -0.011681157292387554, -0.013509225924011457, -0.013677421745325026, -0.015989657410498563, -0.009126533178948, -0.010838121814247793, -0.010563651638562304, -0.01219409084892938]
kijs = np.zeros((44, 44)).tolist()
zs = [9.11975115499676e-05, 9.986813065240533e-05, 0.0010137795304828892, 0.019875879000370657, 0.013528874875432457, 0.021392773691700402, 0.00845450438914824, 0.02500218071904368, 0.016114189201071587, 0.027825798446635016, 0.05583179467176313, 0.0703116540769539, 0.07830577180555454, 0.07236459223729574, 0.0774523322851419, 0.057755091407705975, 0.04030134965162674, 0.03967043780553758, 0.03514481759005302, 0.03175471055284055, 0.025411123554079325, 0.029291866298718154, 0.012084986551713202, 0.01641114551124426, 0.01572454598093482, 0.012145363820829673, 0.01103585282423499, 0.010654818322680342, 0.008777712911254239, 0.008732073853067238, 0.007445155260036595, 0.006402875549212365, 0.0052908087849774296, 0.0048199150683177075, 0.015943943854195963, 0.004452253754752775, 0.01711981267072777, 0.0024032720444511282, 0.032178399403544646, 0.0018219517069058137, 0.003403378548794345, 0.01127516775495176, 0.015133143423489698, 0.029483213283483682]
a_alpha, da_alpha_dT, d2a_alpha_dT2, a_alpha_j_rows, da_alpha_dT_j_rows = a_alpha_and_derivatives_quadratic_terms(a_alphas, a_alpha_roots, da_alpha_dTs, d2a_alpha_dT2s, 299.0, zs, kijs)
assert_close1d(expect, a_alpha_j_rows, rtol=1e-14)
assert_close(a_alpha, 11.996512274167202, rtol=1e-14)
assert_close(da_alpha_dT, -0.0228875173310534, rtol=1e-14)
assert_close(d2a_alpha_dT2, 5.9978809895526926e-05, rtol=1e-14)
assert_close1d(da_alpha_dT_j_rows_expect, da_alpha_dT_j_rows, rtol=1e-14)
kijs = [[0,.083],[0.083,0]]
zs = [0.1164203, 0.8835797]
a_alphas = [0.2491099357671155, 0.6486495863528039]
da_alpha_dTs = [-0.0005102028006086241, -0.0011131153520304886]
d2a_alpha_dT2s = [1.8651128859234162e-06, 3.884331923127011e-06]
a_alpha_roots = [i**0.5 for i in a_alphas]
a_alpha, da_alpha_dT, d2a_alpha_dT2, a_alpha_j_rows, da_alpha_dT_j_rows = a_alpha_and_derivatives_quadratic_terms(a_alphas, a_alpha_roots, da_alpha_dTs, d2a_alpha_dT2s, 299.0, zs, kijs)
assert_close(a_alpha, 0.5856213958288957, rtol=1e-14)
assert_close(da_alpha_dT, -0.001018667672891354, rtol=1e-14)
assert_close(d2a_alpha_dT2, 3.5666981785619988e-06, rtol=1e-14)
assert_close1d(a_alpha_j_rows, [0.35469988173420947, 0.6160475723779467], rtol=1e-14)
assert_close1d(da_alpha_dT_j_rows, [-0.0006723873746135188, -0.0010642935017889568], rtol=1e-14)
def test_a_alpha_aijs_composition_independent():
kijs = [[0,.083],[0.083,0]]
a_alphas = [0.2491099357671155, 0.6486495863528039]
a_alpha_ijs, a_alpha_roots, a_alpha_ij_roots_inv = a_alpha_aijs_composition_independent(a_alphas, kijs)
assert_close2d(a_alpha_ijs, [[0.2491099357671155, 0.3686123937424334], [0.3686123937424334, 0.6486495863528038]], rtol=1e-13)
assert_close1d(a_alpha_roots, [0.4991091421393877, 0.8053878484015039], rtol=1e-13)
assert_close1d(a_alpha_ij_roots_inv, [[4.014291910599931, 2.4877079977965977], [2.4877079977965977, 1.5416644379945614]], rtol=1e-13)
def test_PR_lnphis_fastest():
kwargs = dict(Tcs=[190.56400000000002, 305.32, 369.83, 126.2],
Pcs=[4599000.0, 4872000.0, 4248000.0, 3394387.5],
omegas=[0.008, 0.098, 0.152, 0.04],
zs=[.1, .2, .3, .4],
kijs=[[0.0, -0.0059, 0.0119, 0.0289], [-0.0059, 0.0, 0.0011, 0.0533], [0.0119, 0.0011, 0.0, 0.0878], [0.0289, 0.0533, 0.0878, 0.0]])
eos = PRMIX(T=200, P=1e5, **kwargs)
expect = eos.lnphis_l
calc = PR_lnphis_fastest(eos.zs, eos.T, eos.P, eos.kijs, True, False, eos.ais, eos.bs, eos.a_alphas, eos.a_alpha_roots, eos.kappas)
assert_close(expect, calc, rtol=1e-14)
expect = eos.lnphis_g
calc = PR_lnphis_fastest(eos.zs, eos.T, eos.P, eos.kijs, False, True, eos.ais, eos.bs, eos.a_alphas, eos.a_alpha_roots, eos.kappas)
assert_close(expect, calc, rtol=1e-14) | true | true |
1c2b79947d05c95e76c939f6ce3e06fb0419a3ae | 487 | py | Python | git_init.py | xrun0213/beginner | 7fa9bb68dceffa94c15a2c82945961cf61d995fa | [
"Apache-2.0"
] | null | null | null | git_init.py | xrun0213/beginner | 7fa9bb68dceffa94c15a2c82945961cf61d995fa | [
"Apache-2.0"
] | null | null | null | git_init.py | xrun0213/beginner | 7fa9bb68dceffa94c15a2c82945961cf61d995fa | [
"Apache-2.0"
] | null | null | null | #! usr/bin/env python3
#-*- coding:utf-8 -*-
import argparse, io, os, sys
parser = argparse.ArgumentParser()
# parser.add_argument('account', help='the account of GITHUB')
parser.add_argument('repository', help='name of the repository')
args = parser.parse_args()
cmd1 = 'git init'
cmd2 = 'git remote add origin git@github.com:xrun0213/{0}.git'.format(args.repository)
msg = '&'.join( (cmd1, cmd2) )
os.system(msg)
###
#git pull [remote] [branch]
#git push [remote] master:[branch] | 24.35 | 86 | 0.698152 |
import argparse, io, os, sys
parser = argparse.ArgumentParser()
parser.add_argument('repository', help='name of the repository')
args = parser.parse_args()
cmd1 = 'git init'
cmd2 = 'git remote add origin git@github.com:xrun0213/{0}.git'.format(args.repository)
msg = '&'.join( (cmd1, cmd2) )
os.system(msg)
| true | true |
1c2b79dfe93716447fb6e457d9d92a3368f4e808 | 481 | py | Python | Python-code-snippets-101-200/141-List audio devices.py | abartoha/python-snippets-ref | 04e4feada96077f0e849b277204c012194e8fbcd | [
"Unlicense"
] | null | null | null | Python-code-snippets-101-200/141-List audio devices.py | abartoha/python-snippets-ref | 04e4feada96077f0e849b277204c012194e8fbcd | [
"Unlicense"
] | null | null | null | Python-code-snippets-101-200/141-List audio devices.py | abartoha/python-snippets-ref | 04e4feada96077f0e849b277204c012194e8fbcd | [
"Unlicense"
] | null | null | null | '''
141-List audio devices
more Python code snippets
stevepython.wordpress.com
source:
https://pbaumgarten.com/python/audio/
pip3 install pyaudio
Linux:
sudo apt-get install python-pyaudio python3-pyaudio
'''
import pyaudio
def list_devices():
p = pyaudio.PyAudio()
device_count = p.get_device_count()
for i in range(0, device_count):
info = p.get_device_info_by_index(i)
print("Device {} = {}".format(info["index"], info["name"]))
list_devices()
| 19.24 | 67 | 0.704782 |
import pyaudio
def list_devices():
p = pyaudio.PyAudio()
device_count = p.get_device_count()
for i in range(0, device_count):
info = p.get_device_info_by_index(i)
print("Device {} = {}".format(info["index"], info["name"]))
list_devices()
| true | true |
1c2b7a5151f3bc3b5c321f442d7805380c9b1d7d | 2,332 | py | Python | TweetViz/TweetViz/tests/test_filter.py | alperkesen/agile-tweetviz | e5c91b6bb3d40603697da7a33f8f320f78f24867 | [
"MIT"
] | null | null | null | TweetViz/TweetViz/tests/test_filter.py | alperkesen/agile-tweetviz | e5c91b6bb3d40603697da7a33f8f320f78f24867 | [
"MIT"
] | null | null | null | TweetViz/TweetViz/tests/test_filter.py | alperkesen/agile-tweetviz | e5c91b6bb3d40603697da7a33f8f320f78f24867 | [
"MIT"
] | null | null | null | import os
import unittest
import tweepy
from TweetViz import app
class FilterTests(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
self.app = app.test_client()
def tearDown(self):
pass
def test_tweepy_api(self):
tweepyAuth = tweepy.OAuthHandler(
"7kErkRN6gM6hauMct2Olqqwkq",
"yuIZjc5Z5QCGjSss3X10sSBezWk08n4VKAnIumW4Fs5chr0LON")
tweepyAuth.set_access_token(
"3224914785-BwrhbViZQTo6KU3f7KDTHEstESQsM1P4euvlCii",
"oMdYFV6sz9M5lNaSp5qXu7YQg1MruUraT8KXvmvJg3nTA")
tweepyAPI = tweepy.API(tweepyAuth, wait_on_rate_limit=True)
query = "test"
limit = 20
tweets = tweepy.Cursor(tweepyAPI.search, q=query,
tweet_mode="extended").items(limit)
self.assertEqual(len([tweet for tweet in tweets]), limit)
tweets = tweepy.Cursor(tweepyAPI.user_timeline, screen_name="POTUS", tweet_mode="extended").items(limit)
self.assertEqual(len([tweet for tweet in tweets]), limit)
def test_user_tweets(self):
response = self.app.post('/filter_userTweets',
data=dict(userName="POTUS"),
follow_redirects=True)
self.assertNotIn(b"Error", response.data)
self.assertEqual(response.status_code, 200)
def test_fake_user(self):
response = self.app.post('/filter_userTweets',
data=dict(userName="abcdeXYZ12345noUserExistsWithThisUserName"),
follow_redirects=True)
self.assertIn(b"Error", response.data)
self.assertEqual(response.status_code, 200)
def test_generic_query(self):
response = self.app.post('/filter_genericSearch',
data=dict(searchQuery="test"),
follow_redirects=True)
self.assertNotIn(b"Error", response.data)
self.assertEqual(response.status_code, 200)
def test_trend_topics(self):
response = self.app.post('/filter_trendTopics_retrieve',
follow_redirects=True)
self.assertNotIn(b"Error", response.data)
self.assertEqual(response.status_code, 200)
if __name__ == "__main__":
unittest.main()
| 38.229508 | 112 | 0.620497 | import os
import unittest
import tweepy
from TweetViz import app
class FilterTests(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
self.app = app.test_client()
def tearDown(self):
pass
def test_tweepy_api(self):
tweepyAuth = tweepy.OAuthHandler(
"7kErkRN6gM6hauMct2Olqqwkq",
"yuIZjc5Z5QCGjSss3X10sSBezWk08n4VKAnIumW4Fs5chr0LON")
tweepyAuth.set_access_token(
"3224914785-BwrhbViZQTo6KU3f7KDTHEstESQsM1P4euvlCii",
"oMdYFV6sz9M5lNaSp5qXu7YQg1MruUraT8KXvmvJg3nTA")
tweepyAPI = tweepy.API(tweepyAuth, wait_on_rate_limit=True)
query = "test"
limit = 20
tweets = tweepy.Cursor(tweepyAPI.search, q=query,
tweet_mode="extended").items(limit)
self.assertEqual(len([tweet for tweet in tweets]), limit)
tweets = tweepy.Cursor(tweepyAPI.user_timeline, screen_name="POTUS", tweet_mode="extended").items(limit)
self.assertEqual(len([tweet for tweet in tweets]), limit)
def test_user_tweets(self):
response = self.app.post('/filter_userTweets',
data=dict(userName="POTUS"),
follow_redirects=True)
self.assertNotIn(b"Error", response.data)
self.assertEqual(response.status_code, 200)
def test_fake_user(self):
response = self.app.post('/filter_userTweets',
data=dict(userName="abcdeXYZ12345noUserExistsWithThisUserName"),
follow_redirects=True)
self.assertIn(b"Error", response.data)
self.assertEqual(response.status_code, 200)
def test_generic_query(self):
response = self.app.post('/filter_genericSearch',
data=dict(searchQuery="test"),
follow_redirects=True)
self.assertNotIn(b"Error", response.data)
self.assertEqual(response.status_code, 200)
def test_trend_topics(self):
response = self.app.post('/filter_trendTopics_retrieve',
follow_redirects=True)
self.assertNotIn(b"Error", response.data)
self.assertEqual(response.status_code, 200)
if __name__ == "__main__":
unittest.main()
| true | true |
1c2b7abd38a5e3f4e0acc5577af2eeab442d813b | 5,147 | py | Python | sharpy/plans/step_gas.py | MadManSC2/sharpy-sc2 | 13950357df2db58033daab24f076e3ae83f0b2a8 | [
"MIT"
] | 1 | 2020-03-05T19:21:56.000Z | 2020-03-05T19:21:56.000Z | sharpy/plans/step_gas.py | MadManSC2/sharpy-sc2 | 13950357df2db58033daab24f076e3ae83f0b2a8 | [
"MIT"
] | null | null | null | sharpy/plans/step_gas.py | MadManSC2/sharpy-sc2 | 13950357df2db58033daab24f076e3ae83f0b2a8 | [
"MIT"
] | null | null | null | import sc2
from sharpy.plans.acts import ActBase
from sharpy.plans.require import RequireBase
from sc2 import UnitTypeId, BotAI, Race
from sc2.constants import ALL_GAS
from sc2.unit import Unit
from sharpy.knowledges import Knowledge
class StepBuildGas(ActBase):
"""Builds a new gas mining facility closest to vespene geyser with closest worker"""
def __init__(self, to_count: int, requirement=None, skip=None):
assert to_count is not None and isinstance(to_count, int)
assert requirement is None or isinstance(requirement, RequireBase)
assert skip is None or isinstance(skip, RequireBase)
super().__init__()
self.requirement: RequireBase = requirement
self.skip: RequireBase = skip
self.to_count = to_count
self.best_gas: Unit = None
self.knowledge: Knowledge = None
self.ai: BotAI = None
self.all_types = ALL_GAS
self.unit_type: UnitTypeId = None
async def debug_draw(self):
if self.requirement is not None:
await self.requirement.debug_draw()
if self.skip is not None:
await self.skip.debug_draw()
async def start(self, knowledge: Knowledge):
await super().start(knowledge)
self.unit_type = sc2.race_gas.get(knowledge.my_race)
if self.requirement is not None and hasattr(self.requirement, "start"):
await self.requirement.start(knowledge)
if self.skip is not None and hasattr(self.skip, "start"):
await self.skip.start(knowledge)
@property
def active_harvester_count(self):
def harvester_is_active(harvester: Unit) -> bool:
if harvester.vespene_contents > 100 or not harvester.is_ready:
return True
return False
active_harvesters = self.ai.gas_buildings.filter(harvester_is_active)
count = self.pending_build(self.unit_type)
return len(active_harvesters) + count
async def is_done(self):
active_harvester_count = self.active_harvester_count
unit: Unit
harvesters_own = self.ai.gas_buildings
# We have more than requested amount of harvesters
if active_harvester_count > self.to_count:
return True
# If harvester has just finished, we need to move the worker away from it, thus delaying done.
delayed = False
if active_harvester_count == self.to_count:
for unit in harvesters_own.not_ready:
if unit.build_progress < 0.05:
delayed = True
if not delayed:
return True
# No point in building harvester in somewhere with less than 50 gas left
best_score = 50
self.best_gas = None
harvesters: list = []
for unit in self.ai.all_units:
# We need to check for all races, in case gas was stolen in order to not break here
if unit.type_id in self.all_types:
harvesters.append(unit)
for townhall in self.ai.townhalls: # type: Unit
if not townhall.is_ready or townhall.build_progress < 0.9:
# Only build gas for bases that are almost finished
continue
for geyser in self.ai.vespene_geyser.closer_than(15, townhall): # type: Unit
exists = False
for harvester in harvesters: # type: Unit
if harvester.position.distance_to(geyser.position) <= 1:
exists = True
break
if not exists:
score = geyser.vespene_contents
if score > best_score:
self.best_gas = geyser
return self.best_gas is None and not delayed
async def ready(self):
if self.requirement is None:
return True
return self.requirement.check()
async def execute(self) -> bool:
# External check prevents us from building harvesters
if self.skip is not None and self.skip.check():
return True
if self.requirement is not None and not self.requirement.check():
return False
if await self.is_done():
return True
workers = self.knowledge.roles.free_workers
should_build = self.active_harvester_count < self.to_count
can_build = workers.exists and self.knowledge.can_afford(self.unit_type)
if self.best_gas is not None and should_build and can_build:
target = self.best_gas
worker = workers.closest_to(target.position)
self.ai.do(worker.build_gas(target))
if self.ai.race == Race.Protoss:
# Protoss only do something else after starting gas
mf = self.ai.mineral_field.closest_to(worker)
self.ai.do(worker.gather(mf, queue=True))
self.knowledge.print(f'Building {self.unit_type.name} to {target.position}')
return False
| 38.125926 | 103 | 0.612201 | import sc2
from sharpy.plans.acts import ActBase
from sharpy.plans.require import RequireBase
from sc2 import UnitTypeId, BotAI, Race
from sc2.constants import ALL_GAS
from sc2.unit import Unit
from sharpy.knowledges import Knowledge
class StepBuildGas(ActBase):
def __init__(self, to_count: int, requirement=None, skip=None):
assert to_count is not None and isinstance(to_count, int)
assert requirement is None or isinstance(requirement, RequireBase)
assert skip is None or isinstance(skip, RequireBase)
super().__init__()
self.requirement: RequireBase = requirement
self.skip: RequireBase = skip
self.to_count = to_count
self.best_gas: Unit = None
self.knowledge: Knowledge = None
self.ai: BotAI = None
self.all_types = ALL_GAS
self.unit_type: UnitTypeId = None
async def debug_draw(self):
if self.requirement is not None:
await self.requirement.debug_draw()
if self.skip is not None:
await self.skip.debug_draw()
async def start(self, knowledge: Knowledge):
await super().start(knowledge)
self.unit_type = sc2.race_gas.get(knowledge.my_race)
if self.requirement is not None and hasattr(self.requirement, "start"):
await self.requirement.start(knowledge)
if self.skip is not None and hasattr(self.skip, "start"):
await self.skip.start(knowledge)
@property
def active_harvester_count(self):
def harvester_is_active(harvester: Unit) -> bool:
if harvester.vespene_contents > 100 or not harvester.is_ready:
return True
return False
active_harvesters = self.ai.gas_buildings.filter(harvester_is_active)
count = self.pending_build(self.unit_type)
return len(active_harvesters) + count
async def is_done(self):
active_harvester_count = self.active_harvester_count
unit: Unit
harvesters_own = self.ai.gas_buildings
if active_harvester_count > self.to_count:
return True
delayed = False
if active_harvester_count == self.to_count:
for unit in harvesters_own.not_ready:
if unit.build_progress < 0.05:
delayed = True
if not delayed:
return True
best_score = 50
self.best_gas = None
harvesters: list = []
for unit in self.ai.all_units:
if unit.type_id in self.all_types:
harvesters.append(unit)
for townhall in self.ai.townhalls:
if not townhall.is_ready or townhall.build_progress < 0.9:
continue
for geyser in self.ai.vespene_geyser.closer_than(15, townhall):
exists = False
for harvester in harvesters:
if harvester.position.distance_to(geyser.position) <= 1:
exists = True
break
if not exists:
score = geyser.vespene_contents
if score > best_score:
self.best_gas = geyser
return self.best_gas is None and not delayed
async def ready(self):
if self.requirement is None:
return True
return self.requirement.check()
async def execute(self) -> bool:
if self.skip is not None and self.skip.check():
return True
if self.requirement is not None and not self.requirement.check():
return False
if await self.is_done():
return True
workers = self.knowledge.roles.free_workers
should_build = self.active_harvester_count < self.to_count
can_build = workers.exists and self.knowledge.can_afford(self.unit_type)
if self.best_gas is not None and should_build and can_build:
target = self.best_gas
worker = workers.closest_to(target.position)
self.ai.do(worker.build_gas(target))
if self.ai.race == Race.Protoss:
mf = self.ai.mineral_field.closest_to(worker)
self.ai.do(worker.gather(mf, queue=True))
self.knowledge.print(f'Building {self.unit_type.name} to {target.position}')
return False
| true | true |
1c2b7b3937375f7903705e74282e9b50748d9d58 | 2,515 | py | Python | first_app/routes.py | Stocastico/tutorial_flask | c387420a8cc7b4756bfb970636315ce03ed34b94 | [
"MIT"
] | null | null | null | first_app/routes.py | Stocastico/tutorial_flask | c387420a8cc7b4756bfb970636315ce03ed34b94 | [
"MIT"
] | null | null | null | first_app/routes.py | Stocastico/tutorial_flask | c387420a8cc7b4756bfb970636315ce03ed34b94 | [
"MIT"
] | null | null | null | from flask import render_template, flash, redirect, request, url_for
from flask_login import current_user, login_user, logout_user, login_required
from werkzeug.urls import url_parse
from flask_dance.consumer import oauth_authorized
from flask_dance.contrib.twitter import twitter
from sqlalchemy.orm.exc import NoResultFound
from first_app import app, db, twitter_blueprint
from first_app.forms import LoginForm, RegistrationForm
from first_app.models import User
@app.route('/')
@app.route('/index')
@login_required
def index():
posts = [
{
'author': {'username': 'John'},
'body': 'Bla bla bla'
},
{
'author': {'username': 'Susan'},
'body': 'Foo bar baz'
},
]
return render_template('index.html', title='Home', posts=posts)
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid Username or Password')
return redirect(url_for('login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('index')
return redirect(next_page)
return render_template('login.html', title='Sign In', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash('Congratulations, you are now a registered user!')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route('/user/<username>')
@login_required
def user(username):
user = User.query.filter_by(username=username).first_or_404()
posts = [
{'author': user, 'body': 'Test post #1'},
{'author': user, 'body': 'Test post #2'}
]
return render_template('user.html', user=user, posts=posts)
| 32.662338 | 77 | 0.669185 | from flask import render_template, flash, redirect, request, url_for
from flask_login import current_user, login_user, logout_user, login_required
from werkzeug.urls import url_parse
from flask_dance.consumer import oauth_authorized
from flask_dance.contrib.twitter import twitter
from sqlalchemy.orm.exc import NoResultFound
from first_app import app, db, twitter_blueprint
from first_app.forms import LoginForm, RegistrationForm
from first_app.models import User
@app.route('/')
@app.route('/index')
@login_required
def index():
posts = [
{
'author': {'username': 'John'},
'body': 'Bla bla bla'
},
{
'author': {'username': 'Susan'},
'body': 'Foo bar baz'
},
]
return render_template('index.html', title='Home', posts=posts)
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid Username or Password')
return redirect(url_for('login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('index')
return redirect(next_page)
return render_template('login.html', title='Sign In', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash('Congratulations, you are now a registered user!')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route('/user/<username>')
@login_required
def user(username):
user = User.query.filter_by(username=username).first_or_404()
posts = [
{'author': user, 'body': 'Test post #1'},
{'author': user, 'body': 'Test post #2'}
]
return render_template('user.html', user=user, posts=posts)
| true | true |
1c2b7bae31d71feb8023b416cd5fc16ab6aca20e | 911 | py | Python | python/mxnet/gluon/data/vision/__init__.py | Vikas-kum/incubator-mxnet | ba02bf2fe2da423caa59ddb3fd5e433b90b730bf | [
"Apache-2.0"
] | 228 | 2018-12-06T09:34:01.000Z | 2022-03-08T17:02:02.000Z | python/mxnet/gluon/data/vision/__init__.py | Vikas-kum/incubator-mxnet | ba02bf2fe2da423caa59ddb3fd5e433b90b730bf | [
"Apache-2.0"
] | 187 | 2018-03-16T23:44:43.000Z | 2021-12-14T21:19:54.000Z | python/mxnet/gluon/data/vision/__init__.py | Vikas-kum/incubator-mxnet | ba02bf2fe2da423caa59ddb3fd5e433b90b730bf | [
"Apache-2.0"
] | 58 | 2016-10-27T07:37:08.000Z | 2021-07-03T16:50:17.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=wildcard-import
"""Vision utilities."""
from .datasets import *
from . import transforms
| 36.44 | 62 | 0.765093 |
from .datasets import *
from . import transforms
| true | true |
1c2b7c9c6596d06788472fa738d06376431b0fb9 | 1,854 | py | Python | tests/lists_tests/test_index.py | lycantropos/cppbuiltins | c1facfe06f8af33201cd64e713da93bbc14763f2 | [
"MIT"
] | 1 | 2021-08-15T11:35:45.000Z | 2021-08-15T11:35:45.000Z | tests/lists_tests/test_index.py | lycantropos/cppbuiltins | c1facfe06f8af33201cd64e713da93bbc14763f2 | [
"MIT"
] | null | null | null | tests/lists_tests/test_index.py | lycantropos/cppbuiltins | c1facfe06f8af33201cd64e713da93bbc14763f2 | [
"MIT"
] | null | null | null | from typing import (Any,
Tuple)
import pytest
from hypothesis import given
from tests.utils import AlternativeNativeListsPair
from . import strategies
@given(strategies.non_empty_lists_pairs_with_their_elements)
def test_defaults(pair_with_value: Tuple[AlternativeNativeListsPair, Any]
) -> None:
(alternative, native), value = pair_with_value
alternative_result, native_result = (alternative.index(value),
native.index(value))
assert alternative_result == native_result
@given(strategies.non_empty_lists_pairs_with_starts_stops_and_their_elements)
def test_full(pair_with_start_stop_and_value
: Tuple[AlternativeNativeListsPair, int, int, Any]) -> None:
(alternative, native), start, stop, value = pair_with_start_stop_and_value
alternative_result, native_result = (alternative.index(value, start, stop),
native.index(value, start, stop))
assert alternative_result == native_result
@given(strategies.lists_pairs_with_non_their_elements)
def test_defaults_missing(pair_with_value
: Tuple[AlternativeNativeListsPair, Any]) -> None:
(alternative, native), value = pair_with_value
with pytest.raises(ValueError):
alternative.index(value)
with pytest.raises(ValueError):
native.index(value)
@given(strategies.lists_pairs_with_starts_stops_and_non_their_elements)
def test_full_missing(pair_with_value
: Tuple[AlternativeNativeListsPair, int, int, Any]
) -> None:
(alternative, native), start, stop, value = pair_with_value
with pytest.raises(ValueError):
alternative.index(value, start, stop)
with pytest.raises(ValueError):
native.index(value, start, stop)
| 34.333333 | 79 | 0.695254 | from typing import (Any,
Tuple)
import pytest
from hypothesis import given
from tests.utils import AlternativeNativeListsPair
from . import strategies
@given(strategies.non_empty_lists_pairs_with_their_elements)
def test_defaults(pair_with_value: Tuple[AlternativeNativeListsPair, Any]
) -> None:
(alternative, native), value = pair_with_value
alternative_result, native_result = (alternative.index(value),
native.index(value))
assert alternative_result == native_result
@given(strategies.non_empty_lists_pairs_with_starts_stops_and_their_elements)
def test_full(pair_with_start_stop_and_value
: Tuple[AlternativeNativeListsPair, int, int, Any]) -> None:
(alternative, native), start, stop, value = pair_with_start_stop_and_value
alternative_result, native_result = (alternative.index(value, start, stop),
native.index(value, start, stop))
assert alternative_result == native_result
@given(strategies.lists_pairs_with_non_their_elements)
def test_defaults_missing(pair_with_value
: Tuple[AlternativeNativeListsPair, Any]) -> None:
(alternative, native), value = pair_with_value
with pytest.raises(ValueError):
alternative.index(value)
with pytest.raises(ValueError):
native.index(value)
@given(strategies.lists_pairs_with_starts_stops_and_non_their_elements)
def test_full_missing(pair_with_value
: Tuple[AlternativeNativeListsPair, int, int, Any]
) -> None:
(alternative, native), start, stop, value = pair_with_value
with pytest.raises(ValueError):
alternative.index(value, start, stop)
with pytest.raises(ValueError):
native.index(value, start, stop)
| true | true |
1c2b7cdc7e4f5fc598df2df5d55ec25514d76efa | 16,287 | py | Python | elasticsearch_dsl/document.py | shentianyi/elasticsearch-dsl-py | eec61aa7205ca9a6b21d6f9aa604fcb4b8d0f113 | [
"Apache-2.0"
] | 1 | 2021-02-25T04:35:51.000Z | 2021-02-25T04:35:51.000Z | elasticsearch_dsl/document.py | shentianyi/elasticsearch-dsl-py | eec61aa7205ca9a6b21d6f9aa604fcb4b8d0f113 | [
"Apache-2.0"
] | null | null | null | elasticsearch_dsl/document.py | shentianyi/elasticsearch-dsl-py | eec61aa7205ca9a6b21d6f9aa604fcb4b8d0f113 | [
"Apache-2.0"
] | 1 | 2019-05-30T06:24:31.000Z | 2019-05-30T06:24:31.000Z | try:
import collections.abc as collections_abc # only works on python 3.3+
except ImportError:
import collections as collections_abc
from fnmatch import fnmatch
from elasticsearch.exceptions import NotFoundError, RequestError
from six import iteritems, add_metaclass, string_types
from .field import Field
from .mapping import Mapping
from .utils import ObjectBase, merge, DOC_META_FIELDS, META_FIELDS
from .search import Search
from .connections import connections
from .exceptions import ValidationException, IllegalOperation
from .index import Index
class MetaField(object):
def __init__(self, *args, **kwargs):
self.args, self.kwargs = args, kwargs
class DocumentMeta(type):
def __new__(cls, name, bases, attrs):
# DocumentMeta filters attrs in place
attrs['_doc_type'] = DocumentOptions(name, bases, attrs)
return super(DocumentMeta, cls).__new__(cls, name, bases, attrs)
class IndexMeta(DocumentMeta):
# global flag to guard us from associating an Index with the base Document
# class, only user defined subclasses should have an _index attr
_document_initialized = False
def __new__(cls, name, bases, attrs):
new_cls = super(IndexMeta, cls).__new__(cls, name, bases, attrs)
if cls._document_initialized:
index_opts = attrs.pop('Index', None)
new_cls._index = cls.construct_index(index_opts, bases)
new_cls._index.document(new_cls)
cls._document_initialized = True
return new_cls
@classmethod
def construct_index(cls, opts, bases):
if opts is None:
for b in bases:
if hasattr(b, '_index'):
return b._index
# create an all-matching index pattern
return Index('*')
i = Index(
getattr(opts, 'name', '*'),
using=getattr(opts, 'using', 'default')
)
i.settings(**getattr(opts, 'settings', {}))
i.aliases(**getattr(opts, 'aliases', {}))
for a in getattr(opts, 'analyzers', ()):
i.analyzer(a)
return i
class DocumentOptions(object):
def __init__(self, name, bases, attrs):
meta = attrs.pop('Meta', None)
# get doc_type name, if not defined use 'doc'
doc_type = getattr(meta, 'doc_type', 'doc')
# create the mapping instance
self.mapping = getattr(meta, 'mapping', Mapping(doc_type))
# register all declared fields into the mapping
for name, value in list(iteritems(attrs)):
if isinstance(value, Field):
self.mapping.field(name, value)
del attrs[name]
# add all the mappings for meta fields
for name in dir(meta):
if isinstance(getattr(meta, name, None), MetaField):
params = getattr(meta, name)
self.mapping.meta(name, *params.args, **params.kwargs)
# document inheritance - include the fields from parents' mappings
for b in bases:
if hasattr(b, '_doc_type') and hasattr(b._doc_type, 'mapping'):
self.mapping.update(b._doc_type.mapping, update_only=True)
@property
def name(self):
return self.mapping.properties.name
@add_metaclass(DocumentMeta)
class InnerDoc(ObjectBase):
"""
Common class for inner documents like Object or Nested
"""
@classmethod
def from_es(cls, data, data_only=False):
if data_only:
data = {'_source': data}
return super(InnerDoc, cls).from_es(data)
@add_metaclass(IndexMeta)
class Document(ObjectBase):
"""
Model-like class for persisting documents in elasticsearch.
"""
@classmethod
def _matches(cls, hit):
return fnmatch(hit.get('_index', ''), cls._index._name) \
and cls._doc_type.name == hit.get('_type')
@classmethod
def _get_using(cls, using=None):
return using or cls._index._using
@classmethod
def _get_connection(cls, using=None):
return connections.get_connection(cls._get_using(using))
@classmethod
def _default_index(cls, index=None):
return index or cls._index._name
@classmethod
def init(cls, index=None, using=None):
"""
Create the index and populate the mappings in elasticsearch.
"""
i = cls._index
if index:
i = i.clone(name=index)
i.save(using=using)
def _get_index(self, index=None, required=True):
if index is None:
index = getattr(self.meta, 'index', None)
if index is None:
index = getattr(self._index, '_name', None)
if index is None and required:
raise ValidationException('No index')
if index and '*' in index:
raise ValidationException('You cannot write to a wildcard index.')
return index
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join('%s=%r' % (key, getattr(self.meta, key)) for key in
('index', 'doc_type', 'id') if key in self.meta)
)
@classmethod
def search(cls, using=None, index=None):
"""
Create an :class:`~elasticsearch_dsl.Search` instance that will search
over this ``Document``.
"""
return Search(
using=cls._get_using(using),
index=cls._default_index(index),
doc_type=[cls]
)
@classmethod
def get(cls, id, using=None, index=None, **kwargs):
"""
Retrieve a single document from elasticsearch using it's ``id``.
:arg id: ``id`` of the document to be retireved
:arg index: elasticsearch index to use, if the ``Document`` is
associated with an index this can be omitted.
:arg using: connection alias to use, defaults to ``'default'``
Any additional keyword arguments will be passed to
``Elasticsearch.get`` unchanged.
"""
es = cls._get_connection(using)
doc = es.get(
index=cls._default_index(index),
doc_type=cls._doc_type.name,
id=id,
**kwargs
)
if not doc.get('found', False):
return None
return cls.from_es(doc)
@classmethod
def mget(cls, docs, using=None, index=None, raise_on_error=True,
missing='none', **kwargs):
"""
Retrieve multiple document by their ``id``\s. Returns a list of instances
in the same order as requested.
:arg docs: list of ``id``\s of the documents to be retireved or a list
of document specifications as per
https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html
:arg index: elasticsearch index to use, if the ``Document`` is
associated with an index this can be omitted.
:arg using: connection alias to use, defaults to ``'default'``
:arg missing: what to do when one of the documents requested is not
found. Valid options are ``'none'`` (use ``None``), ``'raise'`` (raise
``NotFoundError``) or ``'skip'`` (ignore the missing document).
Any additional keyword arguments will be passed to
``Elasticsearch.mget`` unchanged.
"""
if missing not in ('raise', 'skip', 'none'):
raise ValueError("'missing' must be 'raise', 'skip', or 'none'.")
es = cls._get_connection(using)
body = {
'docs': [
doc if isinstance(doc, collections_abc.Mapping) else {'_id': doc}
for doc in docs
]
}
results = es.mget(
body,
index=cls._default_index(index),
doc_type=cls._doc_type.name,
**kwargs
)
objs, error_docs, missing_docs = [], [], []
for doc in results['docs']:
if doc.get('found'):
if error_docs or missing_docs:
# We're going to raise an exception anyway, so avoid an
# expensive call to cls.from_es().
continue
objs.append(cls.from_es(doc))
elif doc.get('error'):
if raise_on_error:
error_docs.append(doc)
if missing == 'none':
objs.append(None)
# The doc didn't cause an error, but the doc also wasn't found.
elif missing == 'raise':
missing_docs.append(doc)
elif missing == 'none':
objs.append(None)
if error_docs:
error_ids = [doc['_id'] for doc in error_docs]
message = 'Required routing not provided for documents %s.'
message %= ', '.join(error_ids)
raise RequestError(400, message, error_docs)
if missing_docs:
missing_ids = [doc['_id'] for doc in missing_docs]
message = 'Documents %s not found.' % ', '.join(missing_ids)
raise NotFoundError(404, message, {'docs': missing_docs})
return objs
def delete(self, using=None, index=None, **kwargs):
"""
Delete the instance in elasticsearch.
:arg index: elasticsearch index to use, if the ``Document`` is
associated with an index this can be omitted.
:arg using: connection alias to use, defaults to ``'default'``
Any additional keyword arguments will be passed to
``Elasticsearch.delete`` unchanged.
"""
es = self._get_connection(using)
# extract routing etc from meta
doc_meta = dict(
(k, self.meta[k])
for k in DOC_META_FIELDS
if k in self.meta
)
doc_meta.update(kwargs)
es.delete(
index=self._get_index(index),
doc_type=self._doc_type.name,
**doc_meta
)
def to_dict(self, include_meta=False, skip_empty=True):
"""
Serialize the instance into a dictionary so that it can be saved in elasticsearch.
:arg include_meta: if set to ``True`` will include all the metadata
(``_index``, ``_type``, ``_id`` etc). Otherwise just the document's
data is serialized. This is useful when passing multiple instances into
``elasticsearch.helpers.bulk``.
:arg skip_empty: if set to ``False`` will cause empty values (``None``,
``[]``, ``{}``) to be left on the document. Those values will be
stripped out otherwise as they make no difference in elasticsearch.
"""
d = super(Document, self).to_dict(skip_empty=skip_empty)
if not include_meta:
return d
meta = dict(
('_' + k, self.meta[k])
for k in DOC_META_FIELDS
if k in self.meta
)
# in case of to_dict include the index unlike save/update/delete
index = self._get_index(required=False)
if index is not None:
meta['_index'] = index
meta['_type'] = self._doc_type.name
meta['_source'] = d
return meta
def update(self, using=None, index=None, detect_noop=True,
doc_as_upsert=False, refresh=False, retry_on_conflict=None,
script=None, script_id=None, scripted_upsert=False, upsert=None,
**fields):
"""
Partial update of the document, specify fields you wish to update and
both the instance and the document in elasticsearch will be updated::
doc = MyDocument(title='Document Title!')
doc.save()
doc.update(title='New Document Title!')
:arg index: elasticsearch index to use, if the ``Document`` is
associated with an index this can be omitted.
:arg using: connection alias to use, defaults to ``'default'``
:arg detect_noop: Set to ``False`` to disable noop detection.
:arg refresh: Control when the changes made by this request are visible
to search. Set to ``True`` for immediate effect.
:arg retry_on_conflict: In between the get and indexing phases of the
update, it is possible that another process might have already
updated the same document. By default, the update will fail with a
version conflict exception. The retry_on_conflict parameter
controls how many times to retry the update before finally throwing
an exception.
:arg doc_as_upsert: Instead of sending a partial doc plus an upsert
doc, setting doc_as_upsert to true will use the contents of doc as
the upsert value
"""
body = {
'doc_as_upsert': doc_as_upsert,
'detect_noop': detect_noop,
}
# scripted update
if script or script_id:
if upsert is not None:
body['upsert'] = upsert
if script:
script = {'source': script}
else:
script = {'id': script_id}
script['params'] = fields
body['script'] = script
body['scripted_upsert'] = scripted_upsert
# partial document update
else:
if not fields:
raise IllegalOperation('You cannot call update() without updating individual fields or a script. '
'If you wish to update the entire object use save().')
# update given fields locally
merge(self, fields)
# prepare data for ES
values = self.to_dict()
# if fields were given: partial update
body['doc'] = dict(
(k, values.get(k))
for k in fields.keys()
)
# extract routing etc from meta
doc_meta = dict(
(k, self.meta[k])
for k in DOC_META_FIELDS
if k in self.meta
)
if retry_on_conflict is not None:
doc_meta['retry_on_conflict'] = retry_on_conflict
meta = self._get_connection(using).update(
index=self._get_index(index),
doc_type=self._doc_type.name,
body=body,
refresh=refresh,
**doc_meta
)
# update meta information from ES
for k in META_FIELDS:
if '_' + k in meta:
setattr(self.meta, k, meta['_' + k])
def save(self, using=None, index=None, validate=True, skip_empty=True, **kwargs):
"""
Save the document into elasticsearch. If the document doesn't exist it
is created, it is overwritten otherwise. Returns ``True`` if this
operations resulted in new document being created.
:arg index: elasticsearch index to use, if the ``Document`` is
associated with an index this can be omitted.
:arg using: connection alias to use, defaults to ``'default'``
:arg validate: set to ``False`` to skip validating the document
:arg skip_empty: if set to ``False`` will cause empty values (``None``,
``[]``, ``{}``) to be left on the document. Those values will be
stripped out otherwise as they make no difference in elasticsearch.
Any additional keyword arguments will be passed to
``Elasticsearch.index`` unchanged.
"""
if validate:
self.full_clean()
es = self._get_connection(using)
# extract routing etc from meta
doc_meta = dict(
(k, self.meta[k])
for k in DOC_META_FIELDS
if k in self.meta
)
doc_meta.update(kwargs)
meta = es.index(
index=self._get_index(index),
doc_type=self._doc_type.name,
body=self.to_dict(skip_empty=skip_empty),
**doc_meta
)
# update meta information from ES
for k in META_FIELDS:
if '_' + k in meta:
setattr(self.meta, k, meta['_' + k])
# return True/False if the document has been created/updated
return meta['result'] == 'created'
# limited backwards compatibility, to be removed in 7.0.0
DocType = Document
| 36.113082 | 114 | 0.585498 | try:
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
from fnmatch import fnmatch
from elasticsearch.exceptions import NotFoundError, RequestError
from six import iteritems, add_metaclass, string_types
from .field import Field
from .mapping import Mapping
from .utils import ObjectBase, merge, DOC_META_FIELDS, META_FIELDS
from .search import Search
from .connections import connections
from .exceptions import ValidationException, IllegalOperation
from .index import Index
class MetaField(object):
def __init__(self, *args, **kwargs):
self.args, self.kwargs = args, kwargs
class DocumentMeta(type):
def __new__(cls, name, bases, attrs):
attrs['_doc_type'] = DocumentOptions(name, bases, attrs)
return super(DocumentMeta, cls).__new__(cls, name, bases, attrs)
class IndexMeta(DocumentMeta):
_document_initialized = False
def __new__(cls, name, bases, attrs):
new_cls = super(IndexMeta, cls).__new__(cls, name, bases, attrs)
if cls._document_initialized:
index_opts = attrs.pop('Index', None)
new_cls._index = cls.construct_index(index_opts, bases)
new_cls._index.document(new_cls)
cls._document_initialized = True
return new_cls
@classmethod
def construct_index(cls, opts, bases):
if opts is None:
for b in bases:
if hasattr(b, '_index'):
return b._index
return Index('*')
i = Index(
getattr(opts, 'name', '*'),
using=getattr(opts, 'using', 'default')
)
i.settings(**getattr(opts, 'settings', {}))
i.aliases(**getattr(opts, 'aliases', {}))
for a in getattr(opts, 'analyzers', ()):
i.analyzer(a)
return i
class DocumentOptions(object):
def __init__(self, name, bases, attrs):
meta = attrs.pop('Meta', None)
doc_type = getattr(meta, 'doc_type', 'doc')
self.mapping = getattr(meta, 'mapping', Mapping(doc_type))
for name, value in list(iteritems(attrs)):
if isinstance(value, Field):
self.mapping.field(name, value)
del attrs[name]
for name in dir(meta):
if isinstance(getattr(meta, name, None), MetaField):
params = getattr(meta, name)
self.mapping.meta(name, *params.args, **params.kwargs)
for b in bases:
if hasattr(b, '_doc_type') and hasattr(b._doc_type, 'mapping'):
self.mapping.update(b._doc_type.mapping, update_only=True)
@property
def name(self):
return self.mapping.properties.name
@add_metaclass(DocumentMeta)
class InnerDoc(ObjectBase):
@classmethod
def from_es(cls, data, data_only=False):
if data_only:
data = {'_source': data}
return super(InnerDoc, cls).from_es(data)
@add_metaclass(IndexMeta)
class Document(ObjectBase):
@classmethod
def _matches(cls, hit):
return fnmatch(hit.get('_index', ''), cls._index._name) \
and cls._doc_type.name == hit.get('_type')
@classmethod
def _get_using(cls, using=None):
return using or cls._index._using
@classmethod
def _get_connection(cls, using=None):
return connections.get_connection(cls._get_using(using))
@classmethod
def _default_index(cls, index=None):
return index or cls._index._name
@classmethod
def init(cls, index=None, using=None):
i = cls._index
if index:
i = i.clone(name=index)
i.save(using=using)
def _get_index(self, index=None, required=True):
if index is None:
index = getattr(self.meta, 'index', None)
if index is None:
index = getattr(self._index, '_name', None)
if index is None and required:
raise ValidationException('No index')
if index and '*' in index:
raise ValidationException('You cannot write to a wildcard index.')
return index
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join('%s=%r' % (key, getattr(self.meta, key)) for key in
('index', 'doc_type', 'id') if key in self.meta)
)
@classmethod
def search(cls, using=None, index=None):
return Search(
using=cls._get_using(using),
index=cls._default_index(index),
doc_type=[cls]
)
@classmethod
def get(cls, id, using=None, index=None, **kwargs):
es = cls._get_connection(using)
doc = es.get(
index=cls._default_index(index),
doc_type=cls._doc_type.name,
id=id,
**kwargs
)
if not doc.get('found', False):
return None
return cls.from_es(doc)
@classmethod
def mget(cls, docs, using=None, index=None, raise_on_error=True,
missing='none', **kwargs):
if missing not in ('raise', 'skip', 'none'):
raise ValueError("'missing' must be 'raise', 'skip', or 'none'.")
es = cls._get_connection(using)
body = {
'docs': [
doc if isinstance(doc, collections_abc.Mapping) else {'_id': doc}
for doc in docs
]
}
results = es.mget(
body,
index=cls._default_index(index),
doc_type=cls._doc_type.name,
**kwargs
)
objs, error_docs, missing_docs = [], [], []
for doc in results['docs']:
if doc.get('found'):
if error_docs or missing_docs:
# We're going to raise an exception anyway, so avoid an
continue
objs.append(cls.from_es(doc))
elif doc.get('error'):
if raise_on_error:
error_docs.append(doc)
if missing == 'none':
objs.append(None)
elif missing == 'raise':
missing_docs.append(doc)
elif missing == 'none':
objs.append(None)
if error_docs:
error_ids = [doc['_id'] for doc in error_docs]
message = 'Required routing not provided for documents %s.'
message %= ', '.join(error_ids)
raise RequestError(400, message, error_docs)
if missing_docs:
missing_ids = [doc['_id'] for doc in missing_docs]
message = 'Documents %s not found.' % ', '.join(missing_ids)
raise NotFoundError(404, message, {'docs': missing_docs})
return objs
def delete(self, using=None, index=None, **kwargs):
es = self._get_connection(using)
doc_meta = dict(
(k, self.meta[k])
for k in DOC_META_FIELDS
if k in self.meta
)
doc_meta.update(kwargs)
es.delete(
index=self._get_index(index),
doc_type=self._doc_type.name,
**doc_meta
)
def to_dict(self, include_meta=False, skip_empty=True):
d = super(Document, self).to_dict(skip_empty=skip_empty)
if not include_meta:
return d
meta = dict(
('_' + k, self.meta[k])
for k in DOC_META_FIELDS
if k in self.meta
)
index = self._get_index(required=False)
if index is not None:
meta['_index'] = index
meta['_type'] = self._doc_type.name
meta['_source'] = d
return meta
def update(self, using=None, index=None, detect_noop=True,
doc_as_upsert=False, refresh=False, retry_on_conflict=None,
script=None, script_id=None, scripted_upsert=False, upsert=None,
**fields):
body = {
'doc_as_upsert': doc_as_upsert,
'detect_noop': detect_noop,
}
if script or script_id:
if upsert is not None:
body['upsert'] = upsert
if script:
script = {'source': script}
else:
script = {'id': script_id}
script['params'] = fields
body['script'] = script
body['scripted_upsert'] = scripted_upsert
else:
if not fields:
raise IllegalOperation('You cannot call update() without updating individual fields or a script. '
'If you wish to update the entire object use save().')
merge(self, fields)
values = self.to_dict()
body['doc'] = dict(
(k, values.get(k))
for k in fields.keys()
)
doc_meta = dict(
(k, self.meta[k])
for k in DOC_META_FIELDS
if k in self.meta
)
if retry_on_conflict is not None:
doc_meta['retry_on_conflict'] = retry_on_conflict
meta = self._get_connection(using).update(
index=self._get_index(index),
doc_type=self._doc_type.name,
body=body,
refresh=refresh,
**doc_meta
)
for k in META_FIELDS:
if '_' + k in meta:
setattr(self.meta, k, meta['_' + k])
def save(self, using=None, index=None, validate=True, skip_empty=True, **kwargs):
if validate:
self.full_clean()
es = self._get_connection(using)
doc_meta = dict(
(k, self.meta[k])
for k in DOC_META_FIELDS
if k in self.meta
)
doc_meta.update(kwargs)
meta = es.index(
index=self._get_index(index),
doc_type=self._doc_type.name,
body=self.to_dict(skip_empty=skip_empty),
**doc_meta
)
for k in META_FIELDS:
if '_' + k in meta:
setattr(self.meta, k, meta['_' + k])
return meta['result'] == 'created'
DocType = Document
| true | true |
1c2b7e0b08d6ae42b877f6e8ad11f555c397a023 | 14,260 | py | Python | neutron/tests/unit/nuage/test_syncmanager.py | venkataanil/juno_neutron | 2e62e150c264ccae2dd75fb78caae453eaa77e9f | [
"Apache-2.0"
] | 1 | 2021-02-19T05:54:04.000Z | 2021-02-19T05:54:04.000Z | neutron/tests/unit/nuage/test_syncmanager.py | venkataanil/juno_neutron | 2e62e150c264ccae2dd75fb78caae453eaa77e9f | [
"Apache-2.0"
] | null | null | null | neutron/tests/unit/nuage/test_syncmanager.py | venkataanil/juno_neutron | 2e62e150c264ccae2dd75fb78caae453eaa77e9f | [
"Apache-2.0"
] | 2 | 2016-11-29T11:22:58.000Z | 2016-11-29T11:54:41.000Z | # Copyright 2014 Alcatel-Lucent USA Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import contextlib
from neutron import context
from neutron.openstack.common import uuidutils
from neutron.plugins.nuage import nuage_models
from neutron.plugins.nuage import syncmanager as sync
from neutron.tests.unit.nuage import test_netpartition
from neutron.tests.unit.nuage import test_nuage_plugin
from neutron.tests.unit import test_extension_extraroute as extraroute_test
from neutron.tests.unit import test_extension_security_group as test_sg
from neutron.tests.unit import test_l3_plugin
_uuid = uuidutils.generate_uuid
class TestL3Sync(test_nuage_plugin.NuagePluginV2TestCase,
test_l3_plugin.L3NatDBIntTestCase):
def setUp(self):
self.session = context.get_admin_context().session
self.syncmanager = sync.SyncManager(
test_nuage_plugin.getNuageClient())
super(TestL3Sync, self).setUp()
def _make_floatingip_for_tenant_port(self, net_id, port_id, tenant_id):
data = {'floatingip': {'floating_network_id': net_id,
'tenant_id': tenant_id,
'port_id': port_id}}
floatingip_req = self.new_create_request('floatingips', data, self.fmt)
res = floatingip_req.get_response(self.ext_api)
return self.deserialize(self.fmt, res)
def test_router_sync(self):
# If the router exists in neutron and not in VSD,
# sync will create it in VSD. But the nuage_router_id
# will now change and will be updated in neutron
# accordingly
rtr_res = self._create_router('json', 'foo', 'test-router', True)
router = self.deserialize('json', rtr_res)
self.syncmanager.synchronize('250')
# Check that the nuage_router_id is updated in entrtrmapping table
router_db = self.session.query(
nuage_models.NetPartitionRouter).filter_by(
router_id=router['router']['id']).first()
self.assertEqual('2d782c02-b88e-44ad-a79b-4bdf11f7df3d',
router_db['nuage_router_id'])
self._delete('routers', router['router']['id'])
def test_router_deleted_get(self):
data = self.syncmanager._get_router_data(_uuid())
self.assertIsNone(data[0])
self.assertIsNone(data[1])
def test_fip_sync(self):
with self.subnet(cidr='200.0.0.0/24') as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
with contextlib.nested(self.port(), self.port(), self.port()) as (
p1, p2, p3):
p1_id = p1['port']['id']
p2_id = p2['port']['id']
p3_id = p3['port']['id']
with contextlib.nested(self.floatingip_with_assoc(
port_id=p1_id), self.floatingip_with_assoc(
port_id=p2_id), self.floatingip_with_assoc(
port_id=p3_id)) as (fip1, fip2, fip3):
fip_dict = {'fip': {
'add': [fip1['floatingip']['id']],
'associate': [fip2['floatingip']['id']],
'disassociate': [fip3['floatingip']['id']]
}}
self.syncmanager._sync_fips(fip_dict)
def test_deleted_fip_sync(self):
fip_dict = {'fip': {
'add': [_uuid()],
'associate': [_uuid()],
'disassociate': [_uuid()]
}}
self.syncmanager._sync_fips(fip_dict)
def test_fip_and_ipalloc_get(self):
with self.subnet(cidr='200.0.0.0/24') as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
with self.port() as port:
p_id = port['port']['id']
with self.floatingip_with_assoc(port_id=p_id) as fip:
data = self.syncmanager._get_fip_data(
fip['floatingip']['id'])
self.assertEqual(fip['floatingip']['id'], data['id'])
data = self.syncmanager._get_ipalloc_for_fip(
fip['floatingip'])
self.assertEqual(fip['floatingip']['floating_ip_address'],
data['ip_address'])
def test_fip_and_ipalloc_deleted_get(self):
data = self.syncmanager._get_fip_data(_uuid())
self.assertIsNone(data)
fip = {
'id': _uuid(),
'floating_network_id': _uuid(),
'floating_ip_address': '176.176.10.10'
}
data = self.syncmanager._get_ipalloc_for_fip(fip)
self.assertIsNone(data)
def test_domainsubnet_sync(self):
with self.subnet() as s1:
with contextlib.nested(
self.router(),
self.port()) as (r1, p1):
self._router_interface_action(
'add', r1['router']['id'],
s1['subnet']['id'], p1['port']['id'])
domainsubn_dict = {
'domainsubnet': {'add': [s1['subnet']['id']]},
'port': {'sub_rtr_intf_port_dict': {s1['subnet']['id']:
p1['port']['id']}}}
self.syncmanager.sync_domainsubnets(domainsubn_dict)
self._router_interface_action('remove', r1['router']['id'],
s1['subnet']['id'], None)
def test_floatingip_update_different_router(self):
self._test_floatingip_update_different_router()
def test_floatingip_update_different_fixed_ip_same_port(self):
self._test_floatingip_update_different_fixed_ip_same_port()
def test_floatingip_create_different_fixed_ip_same_port(self):
self._test_floatingip_create_different_fixed_ip_same_port()
def test_network_update_external_failure(self):
self._test_network_update_external_failure()
class TestExtraRouteSync(extraroute_test.ExtraRouteDBIntTestCase):
def setUp(self):
self.session = context.get_admin_context().session
self.syncmanager = sync.SyncManager(
test_nuage_plugin.getNuageClient())
super(TestExtraRouteSync, self).setUp()
def test_route_sync(self):
route = {'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
net_id = s['subnet']['network_id']
res = self._create_port('json', net_id)
p = self.deserialize(self.fmt, res)
self._routes_update_prepare(r['router']['id'],
None, p['port']['id'], [route])
route_dict = {'route': {'add': [route]}}
self.syncmanager.sync_routes(route_dict)
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])
def test_route_get(self):
routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}]
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
net_id = s['subnet']['network_id']
res = self._create_port('json', net_id)
p = self.deserialize(self.fmt, res)
self._routes_update_prepare(r['router']['id'],
None, p['port']['id'], routes)
data = self.syncmanager._get_route_data(routes[0])
self.assertEqual(routes[0]['destination'], data['destination'])
self.assertEqual(routes[0]['nexthop'], data['nexthop'])
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])
def test_route_deleted_get(self):
route = {'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}
data = self.syncmanager._get_route_data(route)
self.assertIsNone(data)
class TestNetPartSync(test_netpartition.NetPartitionTestCase):
def setUp(self):
self.session = context.get_admin_context().session
self.syncmanager = sync.SyncManager(
test_nuage_plugin.getNuageClient())
super(TestNetPartSync, self).setUp()
def test_net_partition_sync(self):
# If the net-partition exists in neutron and not in VSD,
# sync will create it in VSD. But the net-partition
# id will now change and has to be updated in neutron
# accordingly
netpart = self._make_netpartition('json', 'sync-new-netpartition')
self.syncmanager.synchronize('250')
# Check that the net-partition id is updated in db
netpart_db = self.session.query(
nuage_models.NetPartition).filter_by(name=netpart['net_partition'][
'name']).first()
self.assertEqual('a917924f-3139-4bdb-a4c3-ea7c8011582f',
netpart_db['id'])
self._del_netpartition(netpart_db['id'])
def test_net_partition_deleted_get(self):
data = self.syncmanager._get_netpart_data(_uuid())
self.assertIsNone(data)
class TestL2Sync(test_nuage_plugin.NuagePluginV2TestCase):
def setUp(self):
self.session = context.get_admin_context().session
self.syncmanager = sync.SyncManager(
test_nuage_plugin.getNuageClient())
super(TestL2Sync, self).setUp()
def test_subnet_sync(self):
# If the subnet exists in neutron and not in VSD,
# sync will create it in VSD. But the nuage_subnet_id
# will now change and will be updated in neutron
# accordingly
net_res = self._create_network("json", "pub", True)
network = self.deserialize('json', net_res)
sub_res = self._create_subnet("json", network['network']['id'],
'10.0.0.0/24')
subnet = self.deserialize('json', sub_res)
self.syncmanager.synchronize('250')
# Check that the nuage_subnet_id is updated in db
subl2dom_db = self.session.query(
nuage_models.SubnetL2Domain).filter_by(subnet_id=subnet[
'subnet']['id']).first()
self.assertEqual('52daa465-cf33-4efd-91d3-f5bc2aebd',
subl2dom_db['nuage_subnet_id'])
self._delete('subnets', subnet['subnet']['id'])
self._delete('networks', network['network']['id'])
def test_subnet_deleted_get(self):
data = self.syncmanager._get_subnet_data(_uuid())
self.assertIsNone(data[0])
self.assertIsNone(data[1])
def test_sharednetwork_sync(self):
with self.subnet(cidr='200.0.0.0/24') as public_sub:
sharednet_dict = {'sharednetwork': {'add': [public_sub['subnet'][
'id']]}}
self.syncmanager.sync_sharednetworks(sharednet_dict)
def test_vm_sync(self):
with self.port() as p:
port_dict = {'port': {'vm': [p['port']['id']]}}
self.syncmanager.sync_vms(port_dict)
class TestSecurityGroupSync(test_sg.TestSecurityGroups):
def setUp(self):
self.session = context.get_admin_context().session
self.syncmanager = sync.SyncManager(
test_nuage_plugin.getNuageClient())
super(TestSecurityGroupSync, self).setUp()
def test_sg_get(self):
with self.security_group() as sg:
data = self.syncmanager._get_sec_grp_data(
sg['security_group']['id'])
self.assertEqual(sg['security_group']['id'], data['id'])
def test_sg_deleted_get(self):
data = self.syncmanager._get_sec_grp_data(_uuid())
self.assertIsNone(data)
def test_sg_rule_get(self):
with self.security_group() as sg:
sg_rule_id = sg['security_group']['security_group_rules'][0]['id']
data = self.syncmanager._get_sec_grp_rule_data(sg_rule_id)
self.assertEqual(sg_rule_id, data['id'])
def test_sg_rule_deleted_get(self):
data = self.syncmanager._get_sec_grp_rule_data(_uuid())
self.assertIsNone(data)
def test_sg_grp_sync(self):
with contextlib.nested(self.security_group(),
self.security_group()) as (sg1, sg2):
sg1_id = sg1['security_group']['id']
sg2_id = sg2['security_group']['id']
sg_dict = {'security': {'secgroup': {'l2domain': {'add': {sg1_id: [
_uuid()]}}, 'domain': {'add': {sg2_id: [_uuid()]}}}}}
self.syncmanager.sync_secgrps(sg_dict)
def test_deleted_sg_grp_sync(self):
sg_dict = {'security': {'secgroup': {'l2domain': {'add': {_uuid(): [
_uuid()]}}, 'domain': {'add': {_uuid(): [_uuid()]}}}}}
self.syncmanager.sync_secgrps(sg_dict)
def test_sg_rule_sync(self):
with contextlib.nested(self.security_group(),
self.security_group()) as (sg1, sg2):
sg1_rule_id = (
sg1['security_group']['security_group_rules'][0]['id'])
sg2_rule_id = (
sg2['security_group']['security_group_rules'][0]['id'])
sg_dict = {'security': {'secgrouprule': {'l2domain': {
'add': [sg1_rule_id]}, 'domain': {'add': [sg2_rule_id]}}}}
self.syncmanager.sync_secgrp_rules(sg_dict)
def test_deleted_sg_grp_rule_sync(self):
sg_dict = {'security': {'secgrouprule':
{'l2domain': {'add': [_uuid()]},
'domain': {'add': [_uuid()]}}}}
self.syncmanager.sync_secgrp_rules(sg_dict)
| 41.574344 | 79 | 0.591865 |
import contextlib
from neutron import context
from neutron.openstack.common import uuidutils
from neutron.plugins.nuage import nuage_models
from neutron.plugins.nuage import syncmanager as sync
from neutron.tests.unit.nuage import test_netpartition
from neutron.tests.unit.nuage import test_nuage_plugin
from neutron.tests.unit import test_extension_extraroute as extraroute_test
from neutron.tests.unit import test_extension_security_group as test_sg
from neutron.tests.unit import test_l3_plugin
_uuid = uuidutils.generate_uuid
class TestL3Sync(test_nuage_plugin.NuagePluginV2TestCase,
test_l3_plugin.L3NatDBIntTestCase):
def setUp(self):
self.session = context.get_admin_context().session
self.syncmanager = sync.SyncManager(
test_nuage_plugin.getNuageClient())
super(TestL3Sync, self).setUp()
def _make_floatingip_for_tenant_port(self, net_id, port_id, tenant_id):
data = {'floatingip': {'floating_network_id': net_id,
'tenant_id': tenant_id,
'port_id': port_id}}
floatingip_req = self.new_create_request('floatingips', data, self.fmt)
res = floatingip_req.get_response(self.ext_api)
return self.deserialize(self.fmt, res)
def test_router_sync(self):
rtr_res = self._create_router('json', 'foo', 'test-router', True)
router = self.deserialize('json', rtr_res)
self.syncmanager.synchronize('250')
router_db = self.session.query(
nuage_models.NetPartitionRouter).filter_by(
router_id=router['router']['id']).first()
self.assertEqual('2d782c02-b88e-44ad-a79b-4bdf11f7df3d',
router_db['nuage_router_id'])
self._delete('routers', router['router']['id'])
def test_router_deleted_get(self):
data = self.syncmanager._get_router_data(_uuid())
self.assertIsNone(data[0])
self.assertIsNone(data[1])
def test_fip_sync(self):
with self.subnet(cidr='200.0.0.0/24') as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
with contextlib.nested(self.port(), self.port(), self.port()) as (
p1, p2, p3):
p1_id = p1['port']['id']
p2_id = p2['port']['id']
p3_id = p3['port']['id']
with contextlib.nested(self.floatingip_with_assoc(
port_id=p1_id), self.floatingip_with_assoc(
port_id=p2_id), self.floatingip_with_assoc(
port_id=p3_id)) as (fip1, fip2, fip3):
fip_dict = {'fip': {
'add': [fip1['floatingip']['id']],
'associate': [fip2['floatingip']['id']],
'disassociate': [fip3['floatingip']['id']]
}}
self.syncmanager._sync_fips(fip_dict)
def test_deleted_fip_sync(self):
fip_dict = {'fip': {
'add': [_uuid()],
'associate': [_uuid()],
'disassociate': [_uuid()]
}}
self.syncmanager._sync_fips(fip_dict)
def test_fip_and_ipalloc_get(self):
with self.subnet(cidr='200.0.0.0/24') as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
with self.port() as port:
p_id = port['port']['id']
with self.floatingip_with_assoc(port_id=p_id) as fip:
data = self.syncmanager._get_fip_data(
fip['floatingip']['id'])
self.assertEqual(fip['floatingip']['id'], data['id'])
data = self.syncmanager._get_ipalloc_for_fip(
fip['floatingip'])
self.assertEqual(fip['floatingip']['floating_ip_address'],
data['ip_address'])
def test_fip_and_ipalloc_deleted_get(self):
data = self.syncmanager._get_fip_data(_uuid())
self.assertIsNone(data)
fip = {
'id': _uuid(),
'floating_network_id': _uuid(),
'floating_ip_address': '176.176.10.10'
}
data = self.syncmanager._get_ipalloc_for_fip(fip)
self.assertIsNone(data)
def test_domainsubnet_sync(self):
with self.subnet() as s1:
with contextlib.nested(
self.router(),
self.port()) as (r1, p1):
self._router_interface_action(
'add', r1['router']['id'],
s1['subnet']['id'], p1['port']['id'])
domainsubn_dict = {
'domainsubnet': {'add': [s1['subnet']['id']]},
'port': {'sub_rtr_intf_port_dict': {s1['subnet']['id']:
p1['port']['id']}}}
self.syncmanager.sync_domainsubnets(domainsubn_dict)
self._router_interface_action('remove', r1['router']['id'],
s1['subnet']['id'], None)
def test_floatingip_update_different_router(self):
self._test_floatingip_update_different_router()
def test_floatingip_update_different_fixed_ip_same_port(self):
self._test_floatingip_update_different_fixed_ip_same_port()
def test_floatingip_create_different_fixed_ip_same_port(self):
self._test_floatingip_create_different_fixed_ip_same_port()
def test_network_update_external_failure(self):
self._test_network_update_external_failure()
class TestExtraRouteSync(extraroute_test.ExtraRouteDBIntTestCase):
def setUp(self):
self.session = context.get_admin_context().session
self.syncmanager = sync.SyncManager(
test_nuage_plugin.getNuageClient())
super(TestExtraRouteSync, self).setUp()
def test_route_sync(self):
route = {'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
net_id = s['subnet']['network_id']
res = self._create_port('json', net_id)
p = self.deserialize(self.fmt, res)
self._routes_update_prepare(r['router']['id'],
None, p['port']['id'], [route])
route_dict = {'route': {'add': [route]}}
self.syncmanager.sync_routes(route_dict)
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])
def test_route_get(self):
routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}]
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
net_id = s['subnet']['network_id']
res = self._create_port('json', net_id)
p = self.deserialize(self.fmt, res)
self._routes_update_prepare(r['router']['id'],
None, p['port']['id'], routes)
data = self.syncmanager._get_route_data(routes[0])
self.assertEqual(routes[0]['destination'], data['destination'])
self.assertEqual(routes[0]['nexthop'], data['nexthop'])
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])
def test_route_deleted_get(self):
route = {'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}
data = self.syncmanager._get_route_data(route)
self.assertIsNone(data)
class TestNetPartSync(test_netpartition.NetPartitionTestCase):
def setUp(self):
self.session = context.get_admin_context().session
self.syncmanager = sync.SyncManager(
test_nuage_plugin.getNuageClient())
super(TestNetPartSync, self).setUp()
def test_net_partition_sync(self):
netpart = self._make_netpartition('json', 'sync-new-netpartition')
self.syncmanager.synchronize('250')
netpart_db = self.session.query(
nuage_models.NetPartition).filter_by(name=netpart['net_partition'][
'name']).first()
self.assertEqual('a917924f-3139-4bdb-a4c3-ea7c8011582f',
netpart_db['id'])
self._del_netpartition(netpart_db['id'])
def test_net_partition_deleted_get(self):
data = self.syncmanager._get_netpart_data(_uuid())
self.assertIsNone(data)
class TestL2Sync(test_nuage_plugin.NuagePluginV2TestCase):
def setUp(self):
self.session = context.get_admin_context().session
self.syncmanager = sync.SyncManager(
test_nuage_plugin.getNuageClient())
super(TestL2Sync, self).setUp()
def test_subnet_sync(self):
net_res = self._create_network("json", "pub", True)
network = self.deserialize('json', net_res)
sub_res = self._create_subnet("json", network['network']['id'],
'10.0.0.0/24')
subnet = self.deserialize('json', sub_res)
self.syncmanager.synchronize('250')
subl2dom_db = self.session.query(
nuage_models.SubnetL2Domain).filter_by(subnet_id=subnet[
'subnet']['id']).first()
self.assertEqual('52daa465-cf33-4efd-91d3-f5bc2aebd',
subl2dom_db['nuage_subnet_id'])
self._delete('subnets', subnet['subnet']['id'])
self._delete('networks', network['network']['id'])
def test_subnet_deleted_get(self):
data = self.syncmanager._get_subnet_data(_uuid())
self.assertIsNone(data[0])
self.assertIsNone(data[1])
def test_sharednetwork_sync(self):
with self.subnet(cidr='200.0.0.0/24') as public_sub:
sharednet_dict = {'sharednetwork': {'add': [public_sub['subnet'][
'id']]}}
self.syncmanager.sync_sharednetworks(sharednet_dict)
def test_vm_sync(self):
with self.port() as p:
port_dict = {'port': {'vm': [p['port']['id']]}}
self.syncmanager.sync_vms(port_dict)
class TestSecurityGroupSync(test_sg.TestSecurityGroups):
def setUp(self):
self.session = context.get_admin_context().session
self.syncmanager = sync.SyncManager(
test_nuage_plugin.getNuageClient())
super(TestSecurityGroupSync, self).setUp()
def test_sg_get(self):
with self.security_group() as sg:
data = self.syncmanager._get_sec_grp_data(
sg['security_group']['id'])
self.assertEqual(sg['security_group']['id'], data['id'])
def test_sg_deleted_get(self):
data = self.syncmanager._get_sec_grp_data(_uuid())
self.assertIsNone(data)
def test_sg_rule_get(self):
with self.security_group() as sg:
sg_rule_id = sg['security_group']['security_group_rules'][0]['id']
data = self.syncmanager._get_sec_grp_rule_data(sg_rule_id)
self.assertEqual(sg_rule_id, data['id'])
def test_sg_rule_deleted_get(self):
data = self.syncmanager._get_sec_grp_rule_data(_uuid())
self.assertIsNone(data)
def test_sg_grp_sync(self):
with contextlib.nested(self.security_group(),
self.security_group()) as (sg1, sg2):
sg1_id = sg1['security_group']['id']
sg2_id = sg2['security_group']['id']
sg_dict = {'security': {'secgroup': {'l2domain': {'add': {sg1_id: [
_uuid()]}}, 'domain': {'add': {sg2_id: [_uuid()]}}}}}
self.syncmanager.sync_secgrps(sg_dict)
def test_deleted_sg_grp_sync(self):
sg_dict = {'security': {'secgroup': {'l2domain': {'add': {_uuid(): [
_uuid()]}}, 'domain': {'add': {_uuid(): [_uuid()]}}}}}
self.syncmanager.sync_secgrps(sg_dict)
def test_sg_rule_sync(self):
with contextlib.nested(self.security_group(),
self.security_group()) as (sg1, sg2):
sg1_rule_id = (
sg1['security_group']['security_group_rules'][0]['id'])
sg2_rule_id = (
sg2['security_group']['security_group_rules'][0]['id'])
sg_dict = {'security': {'secgrouprule': {'l2domain': {
'add': [sg1_rule_id]}, 'domain': {'add': [sg2_rule_id]}}}}
self.syncmanager.sync_secgrp_rules(sg_dict)
def test_deleted_sg_grp_rule_sync(self):
sg_dict = {'security': {'secgrouprule':
{'l2domain': {'add': [_uuid()]},
'domain': {'add': [_uuid()]}}}}
self.syncmanager.sync_secgrp_rules(sg_dict)
| true | true |
1c2b803d760833ae4012d084fc3dcf2af46c29c1 | 5,009 | py | Python | docs/conf.py | rpatil524/mlrun | bb2259a959f871d7a479834ddc55ad1470e6c2c0 | [
"Apache-2.0"
] | null | null | null | docs/conf.py | rpatil524/mlrun | bb2259a959f871d7a479834ddc55ad1470e6c2c0 | [
"Apache-2.0"
] | 1 | 2020-12-31T14:36:29.000Z | 2020-12-31T14:36:29.000Z | docs/conf.py | rpatil524/mlrun | bb2259a959f871d7a479834ddc55ad1470e6c2c0 | [
"Apache-2.0"
] | 1 | 2019-12-10T01:54:27.000Z | 2019-12-10T01:54:27.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import re
import sys
from os import path
sys.path.insert(0, "..")
def current_version():
root = path.dirname(path.dirname(path.abspath(__file__)))
with open(f"{root}/mlrun/__init__.py") as fp:
for line in fp:
# __version__ = '0.4.6'
match = re.search(r"__version__\s*=\s*'([^']+)'", line)
if match:
return match.group(1)
return "UNKNOWN"
# -- Project information -----------------------------------------------------
project = "mlrun"
copyright = "2021, Iguazio"
author = "Iguazio"
master_doc = "index"
# The short X.Y version
version = current_version()
version = version[: version.rfind(".")]
# The full version, including alpha/beta/rc tags
release = current_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"myst_nb",
"sphinx.ext.napoleon",
"sphinx.ext.autodoc",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx_copybutton",
"sphinx_togglebutton",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"]
source_suffix = {
".rst": "restructuredtext",
".ipynb": "myst-nb",
".myst": "myst-nb",
".md": "myst-nb",
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_book_theme"
html_title = ""
html_logo = "./MLRun_Character.png"
html_favicon = "./favicon.ico"
extra_navbar = "<p>Your HTML</p>"
jupyter_execute_notebooks = "off"
html_sourcelink_suffix = ""
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_theme_options = {
"github_url": "https://github.com/mlrun/mlrun",
"repository_url": "https://github.com/mlrun/mlrun",
"use_repository_button": True,
"use_issues_button": True,
"use_edit_page_button": True,
"path_to_docs": "docs",
"home_page_in_toc": False,
"repository_branch": "development",
"show_navbar_depth": 1,
"extra_navbar": 'By <a href="https://www.iguazio.com/">Iguazio</a>',
"extra_footer": "",
"google_analytics_id": "",
}
copybutton_selector = "div:not(.output) > div.highlight pre"
myst_enable_extensions = [
"colon_fence",
"deflist",
"html_image",
"html_admonition",
"smartquotes",
"replacements",
"linkify",
"substitution",
]
myst_url_schemes = ("http", "https", "mailto")
panels_add_bootstrap_css = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
def copy_doc(src, dest, title=""):
"""Copy over .md documentation from other parts of the project"""
with open(dest, "w") as out:
with open(src) as fp:
changed = False
for line in fp:
if title and re.match("^# .*", line) and not changed:
line = f"# {title}"
changed = True
out.write(line)
def setup(app):
pass
# project_root = path.dirname(path.dirname(path.abspath(__file__)))
# copy_doc(f"{project_root}/examples/remote.md", "external/remote.md")
# copy_doc(
# f'{project_root}/README.md', 'external/general.md', 'Introduction')
# copy_doc(
# f'{project_root}/hack/local/README.md', 'external/install.md')
# check_call([
# 'jupyter', 'nbconvert',
# '--output', f'{project_root}/docs/external/basics.html',
# f'{project_root}/examples/mlrun_basics.ipynb',
# ])
| 29.994012 | 79 | 0.643641 |
import re
import sys
from os import path
sys.path.insert(0, "..")
def current_version():
root = path.dirname(path.dirname(path.abspath(__file__)))
with open(f"{root}/mlrun/__init__.py") as fp:
for line in fp:
match = re.search(r"__version__\s*=\s*'([^']+)'", line)
if match:
return match.group(1)
return "UNKNOWN"
# -- Project information -----------------------------------------------------
project = "mlrun"
copyright = "2021, Iguazio"
author = "Iguazio"
master_doc = "index"
# The short X.Y version
version = current_version()
version = version[: version.rfind(".")]
# The full version, including alpha/beta/rc tags
release = current_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"myst_nb",
"sphinx.ext.napoleon",
"sphinx.ext.autodoc",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx_copybutton",
"sphinx_togglebutton",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"]
source_suffix = {
".rst": "restructuredtext",
".ipynb": "myst-nb",
".myst": "myst-nb",
".md": "myst-nb",
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_book_theme"
html_title = ""
html_logo = "./MLRun_Character.png"
html_favicon = "./favicon.ico"
extra_navbar = "<p>Your HTML</p>"
jupyter_execute_notebooks = "off"
html_sourcelink_suffix = ""
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_theme_options = {
"github_url": "https://github.com/mlrun/mlrun",
"repository_url": "https://github.com/mlrun/mlrun",
"use_repository_button": True,
"use_issues_button": True,
"use_edit_page_button": True,
"path_to_docs": "docs",
"home_page_in_toc": False,
"repository_branch": "development",
"show_navbar_depth": 1,
"extra_navbar": 'By <a href="https://www.iguazio.com/">Iguazio</a>',
"extra_footer": "",
"google_analytics_id": "",
}
copybutton_selector = "div:not(.output) > div.highlight pre"
myst_enable_extensions = [
"colon_fence",
"deflist",
"html_image",
"html_admonition",
"smartquotes",
"replacements",
"linkify",
"substitution",
]
myst_url_schemes = ("http", "https", "mailto")
panels_add_bootstrap_css = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
def copy_doc(src, dest, title=""):
with open(dest, "w") as out:
with open(src) as fp:
changed = False
for line in fp:
if title and re.match("^# .*", line) and not changed:
line = f"# {title}"
changed = True
out.write(line)
def setup(app):
pass
# project_root = path.dirname(path.dirname(path.abspath(__file__)))
# copy_doc(f"{project_root}/examples/remote.md", "external/remote.md")
# copy_doc(
# f'{project_root}/README.md', 'external/general.md', 'Introduction')
# copy_doc(
# f'{project_root}/hack/local/README.md', 'external/install.md')
# check_call([
# 'jupyter', 'nbconvert',
# '--output', f'{project_root}/docs/external/basics.html',
# f'{project_root}/examples/mlrun_basics.ipynb',
# ])
| true | true |
1c2b80a86cbda2d9e203138fd259cb3685938830 | 4,335 | py | Python | python/orca/test/bigdl/orca/tfpark/test_tfnet.py | DirkFi/BigDL | 7493209165c046116470b9a1e1c8f527915d6f1e | [
"Apache-2.0"
] | 3 | 2021-07-14T01:28:47.000Z | 2022-03-02T01:16:32.000Z | python/orca/test/bigdl/orca/tfpark/test_tfnet.py | DirkFi/BigDL | 7493209165c046116470b9a1e1c8f527915d6f1e | [
"Apache-2.0"
] | null | null | null | python/orca/test/bigdl/orca/tfpark/test_tfnet.py | DirkFi/BigDL | 7493209165c046116470b9a1e1c8f527915d6f1e | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from bigdl.orca.test_zoo_utils import ZooTestCase
from bigdl.orca.tfpark import TFNet, TFDataset
from bigdl.dllib.utils.common import *
np.random.seed(1337) # for reproducibility
class TestTF(ZooTestCase):
resource_path = os.path.join(os.path.split(__file__)[0], "../resources")
def test_init_tf_net(self):
tfnet_path = os.path.join(TestTF.resource_path, "tfnet")
net = TFNet.from_export_folder(tfnet_path)
output = net.forward(np.random.rand(2, 4))
assert output.shape == (2, 2)
def test_for_scalar(self):
import tensorflow as tf
with tf.Graph().as_default():
input1 = tf.placeholder(dtype=tf.float32, shape=())
output = input1 + 1
sess = tf.Session()
net = TFNet.from_session(sess, [input1], [output])
sess.close()
out_value = net.forward(np.array(1.0))
assert len(out_value.shape) == 0
# the following test would fail on bigdl 0.6.0 due to a bug in bigdl,
# comment it out for now
# out_value = net.predict(np.array([1.0])).first()
# assert len(out_value.shape) == 0
def test_init_tfnet_from_session(self):
import tensorflow as tf
with tf.Graph().as_default():
input1 = tf.placeholder(dtype=tf.float32, shape=(None, 2))
label1 = tf.placeholder(dtype=tf.float32, shape=(None, 1))
hidden = tf.layers.dense(input1, 4)
output = tf.layers.dense(hidden, 1)
loss = tf.reduce_mean(tf.square(output - label1))
grad_inputs = tf.gradients(loss, input1)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
data = np.random.rand(2, 2)
output_value_ref = sess.run(output, feed_dict={input1: data})
label_value = output_value_ref - 1.0
grad_input_value_ref = sess.run(grad_inputs[0],
feed_dict={input1: data,
label1: label_value})
net = TFNet.from_session(sess, [input1], [output], generate_backward=True)
output_value = net.forward(data)
grad_input_value = net.backward(data, np.ones(shape=(2, 1)))
self.assert_allclose(output_value, output_value_ref)
self.assert_allclose(grad_input_value, grad_input_value_ref)
def test_init_tfnet_from_saved_model(self):
model_path = os.path.join(TestTF.resource_path, "saved-model-resource")
tfnet = TFNet.from_saved_model(model_path, inputs=["flatten_input:0"],
outputs=["dense_2/Softmax:0"])
result = tfnet.predict(np.ones(dtype=np.float32, shape=(20, 28, 28, 1)))
result.collect()
def test_tf_net_predict(self):
tfnet_path = os.path.join(TestTF.resource_path, "tfnet")
import tensorflow as tf
tf_session_config = tf.ConfigProto(inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
net = TFNet.from_export_folder(tfnet_path, tf_session_config=tf_session_config)
output = net.predict(np.random.rand(16, 4), batch_per_thread=5, distributed=False)
assert output.shape == (16, 2)
def test_tf_net_predict_dataset(self):
tfnet_path = os.path.join(TestTF.resource_path, "tfnet")
net = TFNet.from_export_folder(tfnet_path)
dataset = TFDataset.from_ndarrays((np.random.rand(16, 4),))
output = net.predict(dataset)
output = np.stack(output.collect())
assert output.shape == (16, 2)
if __name__ == "__main__":
pytest.main([__file__])
| 40.514019 | 90 | 0.634371 |
import pytest
from bigdl.orca.test_zoo_utils import ZooTestCase
from bigdl.orca.tfpark import TFNet, TFDataset
from bigdl.dllib.utils.common import *
np.random.seed(1337)
class TestTF(ZooTestCase):
resource_path = os.path.join(os.path.split(__file__)[0], "../resources")
def test_init_tf_net(self):
tfnet_path = os.path.join(TestTF.resource_path, "tfnet")
net = TFNet.from_export_folder(tfnet_path)
output = net.forward(np.random.rand(2, 4))
assert output.shape == (2, 2)
def test_for_scalar(self):
import tensorflow as tf
with tf.Graph().as_default():
input1 = tf.placeholder(dtype=tf.float32, shape=())
output = input1 + 1
sess = tf.Session()
net = TFNet.from_session(sess, [input1], [output])
sess.close()
out_value = net.forward(np.array(1.0))
assert len(out_value.shape) == 0
def test_init_tfnet_from_session(self):
import tensorflow as tf
with tf.Graph().as_default():
input1 = tf.placeholder(dtype=tf.float32, shape=(None, 2))
label1 = tf.placeholder(dtype=tf.float32, shape=(None, 1))
hidden = tf.layers.dense(input1, 4)
output = tf.layers.dense(hidden, 1)
loss = tf.reduce_mean(tf.square(output - label1))
grad_inputs = tf.gradients(loss, input1)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
data = np.random.rand(2, 2)
output_value_ref = sess.run(output, feed_dict={input1: data})
label_value = output_value_ref - 1.0
grad_input_value_ref = sess.run(grad_inputs[0],
feed_dict={input1: data,
label1: label_value})
net = TFNet.from_session(sess, [input1], [output], generate_backward=True)
output_value = net.forward(data)
grad_input_value = net.backward(data, np.ones(shape=(2, 1)))
self.assert_allclose(output_value, output_value_ref)
self.assert_allclose(grad_input_value, grad_input_value_ref)
def test_init_tfnet_from_saved_model(self):
model_path = os.path.join(TestTF.resource_path, "saved-model-resource")
tfnet = TFNet.from_saved_model(model_path, inputs=["flatten_input:0"],
outputs=["dense_2/Softmax:0"])
result = tfnet.predict(np.ones(dtype=np.float32, shape=(20, 28, 28, 1)))
result.collect()
def test_tf_net_predict(self):
tfnet_path = os.path.join(TestTF.resource_path, "tfnet")
import tensorflow as tf
tf_session_config = tf.ConfigProto(inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
net = TFNet.from_export_folder(tfnet_path, tf_session_config=tf_session_config)
output = net.predict(np.random.rand(16, 4), batch_per_thread=5, distributed=False)
assert output.shape == (16, 2)
def test_tf_net_predict_dataset(self):
tfnet_path = os.path.join(TestTF.resource_path, "tfnet")
net = TFNet.from_export_folder(tfnet_path)
dataset = TFDataset.from_ndarrays((np.random.rand(16, 4),))
output = net.predict(dataset)
output = np.stack(output.collect())
assert output.shape == (16, 2)
if __name__ == "__main__":
pytest.main([__file__])
| true | true |
1c2b811d48251a2858ecac893eb856d9182a7692 | 1,802 | py | Python | function/python/brightics/function/transform/sample.py | sharon1321/studio | c5ce7f6db5503f5020b2aa0c6f2e6acfc61c90c5 | [
"Apache-2.0"
] | null | null | null | function/python/brightics/function/transform/sample.py | sharon1321/studio | c5ce7f6db5503f5020b2aa0c6f2e6acfc61c90c5 | [
"Apache-2.0"
] | null | null | null | function/python/brightics/function/transform/sample.py | sharon1321/studio | c5ce7f6db5503f5020b2aa0c6f2e6acfc61c90c5 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from brightics.common.groupby import _function_by_group
from brightics.common.utils import check_required_parameters
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.validation import validate
from brightics.common.validation import greater_than_or_equal_to
def random_sampling(table, group_by=None, **params):
check_required_parameters(_random_sampling, params, ['table'])
params = get_default_from_parameters_if_required(params, _random_sampling)
param_validation_check = [greater_than_or_equal_to(params, 1, 'num')]
validate(*param_validation_check)
if group_by is not None:
return _function_by_group(_random_sampling, table, group_by=group_by, **params)
else:
return _random_sampling(table, **params)
def _random_sampling(table, num_or_frac='num', num=1, frac=50, replace=False, seed=None):
if num_or_frac == 'num':
out_table = table.sample(n=num, replace=replace, random_state=seed)
else: # 'frac'
out_table = table.sample(frac=frac / 100, replace=replace, random_state=seed)
return {'table' : out_table}
| 40.954545 | 90 | 0.727525 |
from brightics.common.groupby import _function_by_group
from brightics.common.utils import check_required_parameters
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.validation import validate
from brightics.common.validation import greater_than_or_equal_to
def random_sampling(table, group_by=None, **params):
check_required_parameters(_random_sampling, params, ['table'])
params = get_default_from_parameters_if_required(params, _random_sampling)
param_validation_check = [greater_than_or_equal_to(params, 1, 'num')]
validate(*param_validation_check)
if group_by is not None:
return _function_by_group(_random_sampling, table, group_by=group_by, **params)
else:
return _random_sampling(table, **params)
def _random_sampling(table, num_or_frac='num', num=1, frac=50, replace=False, seed=None):
if num_or_frac == 'num':
out_table = table.sample(n=num, replace=replace, random_state=seed)
else:
out_table = table.sample(frac=frac / 100, replace=replace, random_state=seed)
return {'table' : out_table}
| true | true |
1c2b815b6bb28739d8726c91bc08bc3601d2ba4e | 5,781 | py | Python | test/test_selectLog.py | s-naoya/plotlog | 278c7e1d6f2af90a55bb9fa121051e00e976c1c0 | [
"MIT"
] | null | null | null | test/test_selectLog.py | s-naoya/plotlog | 278c7e1d6f2af90a55bb9fa121051e00e976c1c0 | [
"MIT"
] | null | null | null | test/test_selectLog.py | s-naoya/plotlog | 278c7e1d6f2af90a55bb9fa121051e00e976c1c0 | [
"MIT"
] | null | null | null | import unittest
from plotlog.selectlog import SelectLog
import create_exlog as ce
class TestSelectLog(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_get_paths_of_all(self):
self.sl = SelectLog("log/", "graph/", "YYMMDDhhmmss")
ce.create_exlog(log_date_type=0)
all_paths = [f[1]+f[0][2:]+".csv" for f in ce.default_files]
get_all_paths = self.sl.get_paths_of_all()
for all_path in all_paths:
self.assertIn(all_path, get_all_paths)
for get_all_path in get_all_paths:
self.assertIn(get_all_path, all_paths)
def test_get_paths_of_after(self):
self.sl = SelectLog("log/", "graph/", "YYMMDDhhmmss")
ce.create_exlog(log_date_type=0)
after_date = "170102200000"
get_after_paths = self.sl.get_paths_of_after(after_date)
after_paths = [f[1]+f[0][2:]+".csv" for f in ce.default_files if int(f[0][2:]) >= int(after_date)]
for after_path in after_paths:
self.assertIn(after_path, get_after_paths)
for get_after_path in get_after_paths:
self.assertIn(get_after_path, after_paths)
def test_get_paths_of_select(self):
self.sl = SelectLog("log/", "graph/", "YYMMDDhhmmss")
ce.create_exlog(log_date_type=0)
sel_dates = ["170102200000", "170102000000"]
get_sel_paths = self.sl.get_paths_of_select(sel_dates)
sel_paths = []
for sel_date in sel_dates:
for f in ce.default_files:
if int(f[0][2:]) == int(sel_date):
sel_paths.append(f[1] + f[0][2:] + ".csv")
for sel_path in sel_paths:
self.assertIn(sel_path, get_sel_paths)
for get_sel_path in get_sel_paths:
self.assertIn(get_sel_path, sel_paths)
def test_get_paths_of_new(self):
self.sl = SelectLog("log/", "graph/", "YYMMDDhhmmss")
ce.create_exlog(log_date_type=0)
if ce.isdir("./graph"):
ce.rmtree("./graph")
sel_dates = ["170102200000", "170101120000", "170102180000"]
get_sel_paths = self.sl.get_paths_of_select(sel_dates)
for path in get_sel_paths:
self.sl.setup_save_dir(self.sl.get_fn(path))
get_new_paths = self.sl.get_paths_of_new()
new_paths = [f[1]+f[0][2:]+".csv" for f in ce.default_files]
for get_sel_path in get_sel_paths:
new_paths.remove(get_sel_path)
for new_path in new_paths:
self.assertIn(new_path, get_new_paths)
for get_new_path in get_new_paths:
self.assertIn(get_new_path, new_paths)
def test_filename_to_date(self):
self.sl = SelectLog("log/", "graph/", "YY-MM-DD_hh-mm-ss")
log_file_name = "17-01-02 20,00,00.csv"
log_file_date = self.sl.fn_to_datetime(log_file_name)
self.assertEqual("170102200000", log_file_date)
def test_get_paths_of_all_other_type(self):
self.sl = SelectLog("log/", "graph/", "YY-MM-DD_hh-mm-ss")
ce.create_exlog(log_date_type=4)
all_paths = [ce.type_four(f) for f in ce.default_files]
get_all_paths = self.sl.get_paths_of_all()
for all_path in all_paths:
self.assertIn(all_path, get_all_paths)
for get_all_path in get_all_paths:
self.assertIn(get_all_path, all_paths)
def test_is_date_in_fn(self):
self.sl = SelectLog("log/", "graph/", "YY-MM-DD_hh-mm-ss")
self.assertTrue(self.sl.is_date_in_fn("17-10-31_01-00-00"))
def test_get_paths_of_after_other_type(self):
self.sl = SelectLog("log/", "graph/", "YY-MM-DD_hh-mm-ss")
ce.create_exlog(log_date_type=4)
after_date = "17-01-02_20-00-00"
get_after_paths = self.sl.get_paths_of_after(after_date)
after_paths = []
for f in ce.default_files:
if int(self.sl.fn_to_datetime(f[0][2:])) >= int(self.sl.fn_to_datetime(after_date)):
after_paths.append(ce.type_four(f))
for after_path in after_paths:
self.assertIn(after_path, get_after_paths)
for get_after_path in get_after_paths:
self.assertIn(get_after_path, after_paths)
def test_get_paths_of_select_other_type(self):
self.sl = SelectLog("log/", "graph/", "YY-MM-DD_hh-mm-ss")
ce.create_exlog(log_date_type=4)
sel_dates = ["17-01-02_20-00-00", "17-01-02_00-00-00"]
get_sel_paths = self.sl.get_paths_of_select(sel_dates)
sel_paths = []
for sel_date in sel_dates:
for f in ce.default_files:
if int(self.sl.fn_to_datetime(f[0][2:])) == int(self.sl.fn_to_datetime(sel_date)):
sel_paths.append(ce.type_four(f))
for sel_path in sel_paths:
self.assertIn(sel_path, get_sel_paths)
for get_sel_path in get_sel_paths:
self.assertIn(get_sel_path, sel_paths)
def test_get_paths_of_new_other_type(self):
self.sl = SelectLog("log/", "graph/", "YY-MM-DD_hh-mm-ss")
ce.create_exlog(log_date_type=4)
if ce.isdir("./graph"):
ce.rmtree("./graph")
sel_dates = ["17-01-02_20-00-00", "17-01-01_12-00-00", "17-01-02_18-00-00"]
get_sel_paths = self.sl.get_paths_of_select(sel_dates)
for path in get_sel_paths:
self.sl.setup_save_dir(self.sl.get_fn(path))
get_new_paths = self.sl.get_paths_of_new()
new_paths = [ce.type_four(f) for f in ce.default_files]
for get_sel_path in get_sel_paths:
new_paths.remove(get_sel_path)
for new_path in new_paths:
self.assertIn(new_path, get_new_paths)
for get_new_path in get_new_paths:
self.assertIn(get_new_path, new_paths)
| 40.711268 | 106 | 0.636222 | import unittest
from plotlog.selectlog import SelectLog
import create_exlog as ce
class TestSelectLog(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_get_paths_of_all(self):
self.sl = SelectLog("log/", "graph/", "YYMMDDhhmmss")
ce.create_exlog(log_date_type=0)
all_paths = [f[1]+f[0][2:]+".csv" for f in ce.default_files]
get_all_paths = self.sl.get_paths_of_all()
for all_path in all_paths:
self.assertIn(all_path, get_all_paths)
for get_all_path in get_all_paths:
self.assertIn(get_all_path, all_paths)
def test_get_paths_of_after(self):
self.sl = SelectLog("log/", "graph/", "YYMMDDhhmmss")
ce.create_exlog(log_date_type=0)
after_date = "170102200000"
get_after_paths = self.sl.get_paths_of_after(after_date)
after_paths = [f[1]+f[0][2:]+".csv" for f in ce.default_files if int(f[0][2:]) >= int(after_date)]
for after_path in after_paths:
self.assertIn(after_path, get_after_paths)
for get_after_path in get_after_paths:
self.assertIn(get_after_path, after_paths)
def test_get_paths_of_select(self):
self.sl = SelectLog("log/", "graph/", "YYMMDDhhmmss")
ce.create_exlog(log_date_type=0)
sel_dates = ["170102200000", "170102000000"]
get_sel_paths = self.sl.get_paths_of_select(sel_dates)
sel_paths = []
for sel_date in sel_dates:
for f in ce.default_files:
if int(f[0][2:]) == int(sel_date):
sel_paths.append(f[1] + f[0][2:] + ".csv")
for sel_path in sel_paths:
self.assertIn(sel_path, get_sel_paths)
for get_sel_path in get_sel_paths:
self.assertIn(get_sel_path, sel_paths)
def test_get_paths_of_new(self):
self.sl = SelectLog("log/", "graph/", "YYMMDDhhmmss")
ce.create_exlog(log_date_type=0)
if ce.isdir("./graph"):
ce.rmtree("./graph")
sel_dates = ["170102200000", "170101120000", "170102180000"]
get_sel_paths = self.sl.get_paths_of_select(sel_dates)
for path in get_sel_paths:
self.sl.setup_save_dir(self.sl.get_fn(path))
get_new_paths = self.sl.get_paths_of_new()
new_paths = [f[1]+f[0][2:]+".csv" for f in ce.default_files]
for get_sel_path in get_sel_paths:
new_paths.remove(get_sel_path)
for new_path in new_paths:
self.assertIn(new_path, get_new_paths)
for get_new_path in get_new_paths:
self.assertIn(get_new_path, new_paths)
def test_filename_to_date(self):
self.sl = SelectLog("log/", "graph/", "YY-MM-DD_hh-mm-ss")
log_file_name = "17-01-02 20,00,00.csv"
log_file_date = self.sl.fn_to_datetime(log_file_name)
self.assertEqual("170102200000", log_file_date)
def test_get_paths_of_all_other_type(self):
self.sl = SelectLog("log/", "graph/", "YY-MM-DD_hh-mm-ss")
ce.create_exlog(log_date_type=4)
all_paths = [ce.type_four(f) for f in ce.default_files]
get_all_paths = self.sl.get_paths_of_all()
for all_path in all_paths:
self.assertIn(all_path, get_all_paths)
for get_all_path in get_all_paths:
self.assertIn(get_all_path, all_paths)
def test_is_date_in_fn(self):
self.sl = SelectLog("log/", "graph/", "YY-MM-DD_hh-mm-ss")
self.assertTrue(self.sl.is_date_in_fn("17-10-31_01-00-00"))
def test_get_paths_of_after_other_type(self):
self.sl = SelectLog("log/", "graph/", "YY-MM-DD_hh-mm-ss")
ce.create_exlog(log_date_type=4)
after_date = "17-01-02_20-00-00"
get_after_paths = self.sl.get_paths_of_after(after_date)
after_paths = []
for f in ce.default_files:
if int(self.sl.fn_to_datetime(f[0][2:])) >= int(self.sl.fn_to_datetime(after_date)):
after_paths.append(ce.type_four(f))
for after_path in after_paths:
self.assertIn(after_path, get_after_paths)
for get_after_path in get_after_paths:
self.assertIn(get_after_path, after_paths)
def test_get_paths_of_select_other_type(self):
self.sl = SelectLog("log/", "graph/", "YY-MM-DD_hh-mm-ss")
ce.create_exlog(log_date_type=4)
sel_dates = ["17-01-02_20-00-00", "17-01-02_00-00-00"]
get_sel_paths = self.sl.get_paths_of_select(sel_dates)
sel_paths = []
for sel_date in sel_dates:
for f in ce.default_files:
if int(self.sl.fn_to_datetime(f[0][2:])) == int(self.sl.fn_to_datetime(sel_date)):
sel_paths.append(ce.type_four(f))
for sel_path in sel_paths:
self.assertIn(sel_path, get_sel_paths)
for get_sel_path in get_sel_paths:
self.assertIn(get_sel_path, sel_paths)
def test_get_paths_of_new_other_type(self):
self.sl = SelectLog("log/", "graph/", "YY-MM-DD_hh-mm-ss")
ce.create_exlog(log_date_type=4)
if ce.isdir("./graph"):
ce.rmtree("./graph")
sel_dates = ["17-01-02_20-00-00", "17-01-01_12-00-00", "17-01-02_18-00-00"]
get_sel_paths = self.sl.get_paths_of_select(sel_dates)
for path in get_sel_paths:
self.sl.setup_save_dir(self.sl.get_fn(path))
get_new_paths = self.sl.get_paths_of_new()
new_paths = [ce.type_four(f) for f in ce.default_files]
for get_sel_path in get_sel_paths:
new_paths.remove(get_sel_path)
for new_path in new_paths:
self.assertIn(new_path, get_new_paths)
for get_new_path in get_new_paths:
self.assertIn(get_new_path, new_paths)
| true | true |
1c2b816c781d5855b8f1a09c54df81e93d75adfb | 115 | py | Python | autotune/__version__.py | liuyangzhuan/autotune | bc24177a617025d2a47bc79563538cc6da45cfa9 | [
"BSD-2-Clause"
] | 2 | 2021-01-11T01:55:33.000Z | 2022-03-06T15:39:18.000Z | autotune/__version__.py | liuyangzhuan/autotune | bc24177a617025d2a47bc79563538cc6da45cfa9 | [
"BSD-2-Clause"
] | 2 | 2021-11-02T04:32:27.000Z | 2021-12-01T17:36:09.000Z | autotune/__version__.py | liuyangzhuan/autotune | bc24177a617025d2a47bc79563538cc6da45cfa9 | [
"BSD-2-Clause"
] | 5 | 2020-04-11T16:56:48.000Z | 2021-05-19T18:08:45.000Z | VERSION = (0, 0, 1)
__version__ = '.'.join(map(str, VERSION))
# alpha/beta/rc tags
__version_suffix__ = 'alpha0'
| 16.428571 | 41 | 0.669565 | VERSION = (0, 0, 1)
__version__ = '.'.join(map(str, VERSION))
__version_suffix__ = 'alpha0'
| true | true |
1c2b81ceac98cbc20aa92d8bf5f43895fe279a5c | 4,632 | py | Python | analysis/word2vec.py | chuajiesheng/twitter-sentiment-analysis | 7617243c953a20c517a737c79fe0f54e55aef140 | [
"Apache-2.0"
] | null | null | null | analysis/word2vec.py | chuajiesheng/twitter-sentiment-analysis | 7617243c953a20c517a737c79fe0f54e55aef140 | [
"Apache-2.0"
] | null | null | null | analysis/word2vec.py | chuajiesheng/twitter-sentiment-analysis | 7617243c953a20c517a737c79fe0f54e55aef140 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from sklearn.model_selection import *
from sklearn.ensemble import *
def get_dataset():
files = ['./analysis/input/negative_tweets.txt', './analysis/input/neutral_tweets.txt', './analysis/input/positive_tweets.txt']
x = []
for file in files:
s = []
with open(file, 'r') as f:
for line in f:
s.append(line.strip())
assert len(s) == 1367
x.extend(s)
y = np.array([-1] * 1367 + [0] * 1367 + [1] * 1367)
return x, y
# gensim modules
from gensim import utils
from gensim.models.doc2vec import TaggedDocument
from gensim.models import Doc2Vec
# random shuffle
from random import shuffle
# numpy
import numpy
# classifier
from sklearn.linear_model import LogisticRegression
import logging
import sys
log = logging.getLogger()
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
class TaggedLineSentence(object):
def __init__(self, sources):
self.sources = sources
flipped = {}
# make sure that keys are unique
for key, value in sources.items():
if value not in flipped:
flipped[value] = [key]
else:
raise Exception('Non-unique prefix encountered')
def __iter__(self):
for source, prefix in self.sources.items():
with utils.smart_open(source) as fin:
for item_no, line in enumerate(fin):
yield TaggedDocument(utils.to_unicode(line).split(), [prefix + '_%s' % item_no])
def to_array(self):
self.sentences = []
for source, prefix in self.sources.items():
with utils.smart_open(source) as fin:
for item_no, line in enumerate(fin):
self.sentences.append(TaggedDocument(utils.to_unicode(line).split(), [prefix + '_%s' % item_no]))
return self.sentences
def sentences_perm(self):
shuffle(self.sentences)
return self.sentences
log.info('source load')
sources = {'./analysis/input/negative_tweets.txt': 'NEG', './analysis/input/neutral_tweets.txt': 'NEU', './analysis/input/positive_tweets.txt': 'POS'}
log.info('TaggedDocument')
sentences = TaggedLineSentence(sources)
log.info('D2V')
model = Doc2Vec(min_count=1, window=60, size=100, sample=1e-4, negative=5, workers=7)
model.build_vocab(sentences.to_array())
log.info('Epoch')
for epoch in range(10):
log.info('EPOCH: {}'.format(epoch))
model.train(sentences.sentences_perm())
import code; code.interact(local=dict(globals(), **locals()))
log.info('Model Save')
model.save('./imdb.d2v')
model = Doc2Vec.load('./imdb.d2v')
log.info('Sentiment')
X, Y = get_dataset()
ss = ShuffleSplit(n_splits=10, test_size=0.2, random_state=10)
for train, test in ss.split(X, Y):
size_train = len(train)
size_test = len(test)
train_arrays = numpy.zeros((size_train, 100))
train_labels = numpy.zeros(size_train)
X_train = np.array(X)[train]
y_train = Y[train]
X_test = np.array(X)[test]
y_test = Y[test]
for index, i in enumerate(train):
if Y[i] == 1:
prefix = 'POS_' + str(i - 1367 - 1367)
elif Y[i] == 0:
prefix = 'NEU_' + str(i - 1367)
else:
prefix = 'NEG_' + str(i)
train_arrays[index] = model.docvecs[prefix]
train_labels[index] = Y[i]
test_arrays = numpy.zeros((size_test, 100))
test_labels = numpy.zeros(size_test)
for index, i in enumerate(test):
if Y[i] == 1:
prefix = 'POS_' + str(i - 1367 - 1367)
elif Y[i] == 0:
prefix = 'NEU_' + str(i - 1367)
else:
prefix = 'NEG_' + str(i)
test_arrays[index] = model.docvecs[prefix]
test_labels[index] = Y[i]
log.info('Fitting')
classifier = LogisticRegression(C=1.0, dual=False, fit_intercept=True, intercept_scaling=1, penalty='l2', random_state=None, tol=0.00001)
classifier.fit(train_arrays, train_labels)
print(classifier.score(test_arrays, test_labels))
clf = RandomForestClassifier(random_state=0, n_estimators=80, class_weight='auto').fit(train_arrays, train_labels)
print(clf.score(test_arrays, test_labels))
def parts(str, current, elements):
if len(str) < 1:
return elements + [current]
if current == '' or current.startswith(str[0]):
return parts(str[1:], current + str[0], elements)
return parts(str[1:], str[0], elements + [current]) | 29.692308 | 150 | 0.634283 | import numpy as np
from sklearn.model_selection import *
from sklearn.ensemble import *
def get_dataset():
files = ['./analysis/input/negative_tweets.txt', './analysis/input/neutral_tweets.txt', './analysis/input/positive_tweets.txt']
x = []
for file in files:
s = []
with open(file, 'r') as f:
for line in f:
s.append(line.strip())
assert len(s) == 1367
x.extend(s)
y = np.array([-1] * 1367 + [0] * 1367 + [1] * 1367)
return x, y
from gensim import utils
from gensim.models.doc2vec import TaggedDocument
from gensim.models import Doc2Vec
from random import shuffle
import numpy
from sklearn.linear_model import LogisticRegression
import logging
import sys
log = logging.getLogger()
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
class TaggedLineSentence(object):
def __init__(self, sources):
self.sources = sources
flipped = {}
for key, value in sources.items():
if value not in flipped:
flipped[value] = [key]
else:
raise Exception('Non-unique prefix encountered')
def __iter__(self):
for source, prefix in self.sources.items():
with utils.smart_open(source) as fin:
for item_no, line in enumerate(fin):
yield TaggedDocument(utils.to_unicode(line).split(), [prefix + '_%s' % item_no])
def to_array(self):
self.sentences = []
for source, prefix in self.sources.items():
with utils.smart_open(source) as fin:
for item_no, line in enumerate(fin):
self.sentences.append(TaggedDocument(utils.to_unicode(line).split(), [prefix + '_%s' % item_no]))
return self.sentences
def sentences_perm(self):
shuffle(self.sentences)
return self.sentences
log.info('source load')
sources = {'./analysis/input/negative_tweets.txt': 'NEG', './analysis/input/neutral_tweets.txt': 'NEU', './analysis/input/positive_tweets.txt': 'POS'}
log.info('TaggedDocument')
sentences = TaggedLineSentence(sources)
log.info('D2V')
model = Doc2Vec(min_count=1, window=60, size=100, sample=1e-4, negative=5, workers=7)
model.build_vocab(sentences.to_array())
log.info('Epoch')
for epoch in range(10):
log.info('EPOCH: {}'.format(epoch))
model.train(sentences.sentences_perm())
import code; code.interact(local=dict(globals(), **locals()))
log.info('Model Save')
model.save('./imdb.d2v')
model = Doc2Vec.load('./imdb.d2v')
log.info('Sentiment')
X, Y = get_dataset()
ss = ShuffleSplit(n_splits=10, test_size=0.2, random_state=10)
for train, test in ss.split(X, Y):
size_train = len(train)
size_test = len(test)
train_arrays = numpy.zeros((size_train, 100))
train_labels = numpy.zeros(size_train)
X_train = np.array(X)[train]
y_train = Y[train]
X_test = np.array(X)[test]
y_test = Y[test]
for index, i in enumerate(train):
if Y[i] == 1:
prefix = 'POS_' + str(i - 1367 - 1367)
elif Y[i] == 0:
prefix = 'NEU_' + str(i - 1367)
else:
prefix = 'NEG_' + str(i)
train_arrays[index] = model.docvecs[prefix]
train_labels[index] = Y[i]
test_arrays = numpy.zeros((size_test, 100))
test_labels = numpy.zeros(size_test)
for index, i in enumerate(test):
if Y[i] == 1:
prefix = 'POS_' + str(i - 1367 - 1367)
elif Y[i] == 0:
prefix = 'NEU_' + str(i - 1367)
else:
prefix = 'NEG_' + str(i)
test_arrays[index] = model.docvecs[prefix]
test_labels[index] = Y[i]
log.info('Fitting')
classifier = LogisticRegression(C=1.0, dual=False, fit_intercept=True, intercept_scaling=1, penalty='l2', random_state=None, tol=0.00001)
classifier.fit(train_arrays, train_labels)
print(classifier.score(test_arrays, test_labels))
clf = RandomForestClassifier(random_state=0, n_estimators=80, class_weight='auto').fit(train_arrays, train_labels)
print(clf.score(test_arrays, test_labels))
def parts(str, current, elements):
if len(str) < 1:
return elements + [current]
if current == '' or current.startswith(str[0]):
return parts(str[1:], current + str[0], elements)
return parts(str[1:], str[0], elements + [current]) | true | true |
1c2b81e7ab90bffaa94a18139ec9ba87b0252dae | 12,712 | py | Python | visualization/tokens_listener.py | suhasini-gesis/IWAAN | 343b48908198019e9be25332639cded204f8e7b4 | [
"MIT"
] | null | null | null | visualization/tokens_listener.py | suhasini-gesis/IWAAN | 343b48908198019e9be25332639cded204f8e7b4 | [
"MIT"
] | null | null | null | visualization/tokens_listener.py | suhasini-gesis/IWAAN | 343b48908198019e9be25332639cded204f8e7b4 | [
"MIT"
] | null | null | null | import copy
import qgrid
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import display, Markdown as md, clear_output, HTML
from ipywidgets import Output, fixed
from .wordclouder import WordClouder
from .editors_listener import remove_stopwords
from datetime import datetime, timedelta
import plotly
import plotly.graph_objects as go
from metrics.token import TokensManager
from metrics.conflict import ConflictManager
class TokensListener():
def __init__(self, agg, sources, lng):
self.editors = agg[["editor_str", "editor"]].drop_duplicates().rename({"editor_str": "editor_id",
"editor": "name"}, axis=1).reset_index(drop=True)
self.sources = sources
self.lng = lng
self.page_title = sources["tokens_all"]["article_title"].unique()[0]
def get_columns(self):
#create columns 'time_diff' (Time in sec between this action and the last action on the token)
# and 'reverted_editor' (editor's name who made a previous action on the token)
self.token_source.sort_values(['token_id', 'rev_time'], ascending = True, inplace=True)
self.token_source['time_diff'] = self.token_source['rev_time'] - self.token_source.shift(1)['rev_time']
self.token_source['reverted_editor'] = self.token_source.shift(1)['name']
to_delete = (
#First row of each token
(self.token_source['o_rev_id'] == self.token_source['rev_id']))
# delete but keep the row
self.token_source.loc[to_delete, 'time_diff'] = np.nan
self.token_source.loc[to_delete, 'reverted_editor'] = np.nan
def convert_oadd(self):
#convert 'action' of first insertion to 'oadd'
#self.token_source['action'] = self.token_source.apply(lambda x: 'oadd' if x['o_rev_id'] == x['rev_id'] else x['action'], axis=1)
mask_add = self.token_source["o_rev_id"] == self.token_source["rev_id"]
self.token_source.loc[mask_add, "action"] = "oadd"
def get_editor_names(self):
#get editor names by editor id
self.token_source = self.token_source.rename(columns={"editor":'editor_id'})
self.token_source['editor_id'] = self.token_source['editor_id'].astype(str)
tokens_merged = self.editors[['editor_id', 'name']].merge(self.token_source, right_index=True, on='editor_id', how='outer')
self.token_source = tokens_merged[tokens_merged['token'].notnull()].copy()
def convert_time_diff(time_diff):
#convert time_diff to display as time in days:hours:min:sec format
try:
s = time_diff.seconds
hours, remainder = divmod(s, 3600)
minutes, seconds = divmod(remainder, 60)
return '{:02}:{:02}:{:02}:{}'.format(int(time_diff.days), int(hours), int(minutes), int(seconds))
except ValueError:
return None
def on_selection_change(self, change):
#show link to wikipedia diff when clicking on a row
with self.out213:
clear_output()
# Extract the rev_id selected and convert it to string.
diff = self.qgrid_selected_revision.get_selected_df().reset_index()['rev_id'].iloc[0]
# Print URL.
url = f"https://{self.lng}.wikipedia.org/w/index.php?&title={self.page_title}&diff={diff}"
print('Link to the wikipedia diff: ')
print(url)
def listen(self, revid, stopwords):
# Get source data through ConflictManager.
if stopwords == 'Not included':
link_token = remove_stopwords(self.sources["tokens_all"], self.lng)
self.token_source = link_token
del link_token
else:
link_token = self.sources["tokens_all"]
self.token_source = link_token
del link_token
self.token_source = self.token_source.reset_index(drop=True)
#selected revision id:
#self.rev_id = int(rev_id)
#extract editor name and timestamp to display before the table
self.rev_id = revid
self.filtered_df = self.token_source[self.token_source['rev_id']==self.rev_id]
if len(self.filtered_df) != 0:
editor_name = self.editors.loc[self.editors['editor_id'] == self.filtered_df['editor'].values[0], 'name'].values[0]
else:
return display(md("No tokens in this revision!"))
timestamp = pd.DatetimeIndex(self.token_source[self.token_source['rev_id']==self.rev_id]['rev_time'])[0]
display(md(f"***Selected revision: ID: {self.rev_id}, editor name: {str(editor_name)}, timestamp: {str(timestamp.date())} {str(timestamp.time())}***"))
# Print URL to wikipedia diff.
url = f"https://{self.lng}.wikipedia.org/w/index.php?title={self.page_title}&diff={self.rev_id}"
display(HTML(f'<a href="{url}" target="_blank">Click here to see the Wikipedia Text DIFF</a>'))
if self.rev_id != None:
#add necessary columns and process the dataframe:
self.convert_oadd()
self.get_editor_names()
self.get_columns()
#self.token_source['time_diff'] = self.token_source['time_diff'].apply(lambda x: TokensListener.convert_time_diff(x))
#sort the dataframe by timestamp and token_id:
self.token_source.sort_values(['rev_time', 'token_id'], ascending = True, inplace=True)
#get tokens from the selected revision (from previous and future revisions as well):
rev_tokens = self.token_source.loc[self.token_source['rev_id'] == self.rev_id, 'token_id'].values
tokens_for_grid = self.token_source.loc[self.token_source['token_id'].isin(rev_tokens), ['token', 'token_id', 'action', 'rev_id', 'rev_time', 'name', 'o_rev_id', 'reverted_editor', 'time_diff' ]].rename(columns={'token': 'string', 'name': 'editor'})
#convert the format of columns to display:
tokens_for_grid['rev_id'] = tokens_for_grid['rev_id'].astype(int).astype(str)
tokens_for_grid['time_diff'] = tokens_for_grid['time_diff'].apply(lambda x: TokensListener.convert_time_diff(x))
tokens_for_grid['time_diff'] = tokens_for_grid['time_diff'].astype(str)
tokens_for_grid['token_id'] = tokens_for_grid['token_id'].astype(int).astype(str)
tokens_for_grid.sort_values(["token_id", "rev_time"], inplace=True)
tokens_for_grid.set_index('token_id', inplace=True)
self.tokens_for_grid = tokens_for_grid.copy()
#qgrid widget:
columns_set = {"rev_time": {"width": 180}, "action": {"width": 65}, "string": {"width": 100}, "token_id": {"width": 94}}
qgrid_selected_revision = qgrid.show_grid(self.tokens_for_grid, column_definitions=columns_set)
self.qgrid_selected_revision = qgrid_selected_revision
display(self.qgrid_selected_revision)
self.out213 = Output()
display(self.out213)
self.qgrid_selected_revision.observe(self.on_selection_change, names=['_selected_rows'])
else:
display(md(f'**The selected revision does not exist for this page. Try another**'))
class TokensOwnedListener():
def __init__(self, agg, sources, lng):
self.editors = agg[["editor_str", "editor"]].drop_duplicates().rename({"editor_str": "editor_id",
"editor": "name"}, axis=1).reset_index(drop=True)
self.sources = sources
self.lng = lng
self.page_title = sources["tokens_all"]["article_title"].unique()[0]
def get_editor_names(self):
#get editor names by editor id
# self.token_source = self.token_source.rename(columns={"editor":'editor_id'})
self.editors['o_editor'] = self.editors['editor_id'].astype(str)
self.token_source['o_editor'] = self.token_source['o_editor'].astype(str)
tokens_merged = self.editors[['o_editor', 'name']].merge(self.token_source, right_index=True, on='o_editor', how='outer')
self.token_source = tokens_merged[tokens_merged['token'].notnull()].copy()
def listen(self,_range1, _range2, stopwords, granularity):
# Get source data through ConflictManager.
if stopwords == 'Not included':
link_token = remove_stopwords(self.sources["tokens_all"], self.lng)
self.token_source = link_token
del link_token
else:
link_token = self.sources["tokens_all"]
self.token_source = link_token
del link_token
self.token_source = self.token_source.reset_index(drop=True)
if (len(str(_range1.year)) < 4) | (len(str(_range2.year)) < 4):
return display(md("Please enter the correct date!"))
if _range1 > _range2:
return display(md("Please enter the correct date!"))
else:
self.token_source = self.token_source[(self.token_source.rev_time.dt.date >= _range1) & (self.token_source.rev_time.dt.date <= _range2)]
self.token_source['rev_time'] = pd.to_datetime(self.token_source['rev_time']).dt.tz_localize(None)
self.get_editor_names()
days = self.token_source['rev_time'].dt.to_period(granularity[0]).unique() #getting unique days
today = pd.Period(datetime.today(), freq=granularity[0])
days = pd.Series(np.append(days, today)).sort_values(ascending=False) #adding today
if len(days) > 0:
days = days.dt.to_timestamp(granularity[0]) + pd.DateOffset(1) #converting and adding one day for extracting previous dates from dataframe
self.summ = pd.DataFrame(columns=['name', 'action', 'rev_time'])
_abs = []
df = self.token_source
for rev_time in days:
df = df[df['rev_time'] <= rev_time]
last_action = df.groupby('token_id').last() #last of group values for each token id
surv = last_action[last_action['action'] != 'out'].groupby('name')['action'].agg('count').reset_index()
surv['rev_time'] = rev_time - pd.DateOffset(1)
self.summ = self.summ.append(surv)
#getting top editors among the token owners over all time
top_editors = self.summ.groupby('name')['action'].agg('sum').sort_values(ascending=False).reset_index()[:15]
first_date = self.summ.groupby('name').last().reset_index() #first date of oadd for every editor
top_editors_merged = pd.merge(top_editors, first_date[['name', 'rev_time']], on='name').sort_values('rev_time') #adding first date for each editor and sorting by date of first oadd
#plot
fig = go.Figure()
for editor in top_editors_merged['name']:
x = self.summ.loc[self.summ['name']==editor, 'rev_time']
y = self.summ.loc[self.summ['name']==editor, 'action']
fig.add_trace(go.Scatter(x=x, y=y, name = editor, stackgroup='one'))
fig.update_layout(hovermode='x unified', showlegend=True, margin=go.layout.Margin(l=50,
r=50,
b=150,
t=10,
pad=3))
fig.show()
# data = []
# for editor in top_editors_merged['name']:
# x = self.summ.loc[self.summ['name']==editor, 'rev_time']
# y = self.summ.loc[self.summ['name']==editor, 'action']
# data.append(go.Scatter(x=x, y=y, name = editor, stackgroup='one'))
# layout = go.Layout(hovermode='x unified', showlegend=True, margin=go.layout.Margin(l=50,
# r=50,
# b=150,
# t=10,
# pad=3))
# plotly.offline.init_notebook_mode(connected=True)
# plotly.offline.iplot({"data": data, "layout": layout})
| 51.674797 | 261 | 0.590387 | import copy
import qgrid
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import display, Markdown as md, clear_output, HTML
from ipywidgets import Output, fixed
from .wordclouder import WordClouder
from .editors_listener import remove_stopwords
from datetime import datetime, timedelta
import plotly
import plotly.graph_objects as go
from metrics.token import TokensManager
from metrics.conflict import ConflictManager
class TokensListener():
def __init__(self, agg, sources, lng):
self.editors = agg[["editor_str", "editor"]].drop_duplicates().rename({"editor_str": "editor_id",
"editor": "name"}, axis=1).reset_index(drop=True)
self.sources = sources
self.lng = lng
self.page_title = sources["tokens_all"]["article_title"].unique()[0]
def get_columns(self):
self.token_source.sort_values(['token_id', 'rev_time'], ascending = True, inplace=True)
self.token_source['time_diff'] = self.token_source['rev_time'] - self.token_source.shift(1)['rev_time']
self.token_source['reverted_editor'] = self.token_source.shift(1)['name']
to_delete = (
#First row of each token
(self.token_source['o_rev_id'] == self.token_source['rev_id']))
# delete but keep the row
self.token_source.loc[to_delete, 'time_diff'] = np.nan
self.token_source.loc[to_delete, 'reverted_editor'] = np.nan
def convert_oadd(self):
#convert 'action' of first insertion to 'oadd'
#self.token_source['action'] = self.token_source.apply(lambda x: 'oadd' if x['o_rev_id'] == x['rev_id'] else x['action'], axis=1)
mask_add = self.token_source["o_rev_id"] == self.token_source["rev_id"]
self.token_source.loc[mask_add, "action"] = "oadd"
def get_editor_names(self):
#get editor names by editor id
self.token_source = self.token_source.rename(columns={"editor":'editor_id'})
self.token_source['editor_id'] = self.token_source['editor_id'].astype(str)
tokens_merged = self.editors[['editor_id', 'name']].merge(self.token_source, right_index=True, on='editor_id', how='outer')
self.token_source = tokens_merged[tokens_merged['token'].notnull()].copy()
def convert_time_diff(time_diff):
#convert time_diff to display as time in days:hours:min:sec format
try:
s = time_diff.seconds
hours, remainder = divmod(s, 3600)
minutes, seconds = divmod(remainder, 60)
return '{:02}:{:02}:{:02}:{}'.format(int(time_diff.days), int(hours), int(minutes), int(seconds))
except ValueError:
return None
def on_selection_change(self, change):
#show link to wikipedia diff when clicking on a row
with self.out213:
clear_output()
# Extract the rev_id selected and convert it to string.
diff = self.qgrid_selected_revision.get_selected_df().reset_index()['rev_id'].iloc[0]
# Print URL.
url = f"https://{self.lng}.wikipedia.org/w/index.php?&title={self.page_title}&diff={diff}"
print('Link to the wikipedia diff: ')
print(url)
def listen(self, revid, stopwords):
# Get source data through ConflictManager.
if stopwords == 'Not included':
link_token = remove_stopwords(self.sources["tokens_all"], self.lng)
self.token_source = link_token
del link_token
else:
link_token = self.sources["tokens_all"]
self.token_source = link_token
del link_token
self.token_source = self.token_source.reset_index(drop=True)
#selected revision id:
#self.rev_id = int(rev_id)
#extract editor name and timestamp to display before the table
self.rev_id = revid
self.filtered_df = self.token_source[self.token_source['rev_id']==self.rev_id]
if len(self.filtered_df) != 0:
editor_name = self.editors.loc[self.editors['editor_id'] == self.filtered_df['editor'].values[0], 'name'].values[0]
else:
return display(md("No tokens in this revision!"))
timestamp = pd.DatetimeIndex(self.token_source[self.token_source['rev_id']==self.rev_id]['rev_time'])[0]
display(md(f"***Selected revision: ID: {self.rev_id}, editor name: {str(editor_name)}, timestamp: {str(timestamp.date())} {str(timestamp.time())}***"))
# Print URL to wikipedia diff.
url = f"https://{self.lng}.wikipedia.org/w/index.php?title={self.page_title}&diff={self.rev_id}"
display(HTML(f'<a href="{url}" target="_blank">Click here to see the Wikipedia Text DIFF</a>'))
if self.rev_id != None:
#add necessary columns and process the dataframe:
self.convert_oadd()
self.get_editor_names()
self.get_columns()
#self.token_source['time_diff'] = self.token_source['time_diff'].apply(lambda x: TokensListener.convert_time_diff(x))
#sort the dataframe by timestamp and token_id:
self.token_source.sort_values(['rev_time', 'token_id'], ascending = True, inplace=True)
#get tokens from the selected revision (from previous and future revisions as well):
rev_tokens = self.token_source.loc[self.token_source['rev_id'] == self.rev_id, 'token_id'].values
tokens_for_grid = self.token_source.loc[self.token_source['token_id'].isin(rev_tokens), ['token', 'token_id', 'action', 'rev_id', 'rev_time', 'name', 'o_rev_id', 'reverted_editor', 'time_diff' ]].rename(columns={'token': 'string', 'name': 'editor'})
#convert the format of columns to display:
tokens_for_grid['rev_id'] = tokens_for_grid['rev_id'].astype(int).astype(str)
tokens_for_grid['time_diff'] = tokens_for_grid['time_diff'].apply(lambda x: TokensListener.convert_time_diff(x))
tokens_for_grid['time_diff'] = tokens_for_grid['time_diff'].astype(str)
tokens_for_grid['token_id'] = tokens_for_grid['token_id'].astype(int).astype(str)
tokens_for_grid.sort_values(["token_id", "rev_time"], inplace=True)
tokens_for_grid.set_index('token_id', inplace=True)
self.tokens_for_grid = tokens_for_grid.copy()
#qgrid widget:
columns_set = {"rev_time": {"width": 180}, "action": {"width": 65}, "string": {"width": 100}, "token_id": {"width": 94}}
qgrid_selected_revision = qgrid.show_grid(self.tokens_for_grid, column_definitions=columns_set)
self.qgrid_selected_revision = qgrid_selected_revision
display(self.qgrid_selected_revision)
self.out213 = Output()
display(self.out213)
self.qgrid_selected_revision.observe(self.on_selection_change, names=['_selected_rows'])
else:
display(md(f'**The selected revision does not exist for this page. Try another**'))
class TokensOwnedListener():
def __init__(self, agg, sources, lng):
self.editors = agg[["editor_str", "editor"]].drop_duplicates().rename({"editor_str": "editor_id",
"editor": "name"}, axis=1).reset_index(drop=True)
self.sources = sources
self.lng = lng
self.page_title = sources["tokens_all"]["article_title"].unique()[0]
def get_editor_names(self):
#get editor names by editor id
# self.token_source = self.token_source.rename(columns={"editor":'editor_id'})
self.editors['o_editor'] = self.editors['editor_id'].astype(str)
self.token_source['o_editor'] = self.token_source['o_editor'].astype(str)
tokens_merged = self.editors[['o_editor', 'name']].merge(self.token_source, right_index=True, on='o_editor', how='outer')
self.token_source = tokens_merged[tokens_merged['token'].notnull()].copy()
def listen(self,_range1, _range2, stopwords, granularity):
# Get source data through ConflictManager.
if stopwords == 'Not included':
link_token = remove_stopwords(self.sources["tokens_all"], self.lng)
self.token_source = link_token
del link_token
else:
link_token = self.sources["tokens_all"]
self.token_source = link_token
del link_token
self.token_source = self.token_source.reset_index(drop=True)
if (len(str(_range1.year)) < 4) | (len(str(_range2.year)) < 4):
return display(md("Please enter the correct date!"))
if _range1 > _range2:
return display(md("Please enter the correct date!"))
else:
self.token_source = self.token_source[(self.token_source.rev_time.dt.date >= _range1) & (self.token_source.rev_time.dt.date <= _range2)]
self.token_source['rev_time'] = pd.to_datetime(self.token_source['rev_time']).dt.tz_localize(None)
self.get_editor_names()
days = self.token_source['rev_time'].dt.to_period(granularity[0]).unique() #getting unique days
today = pd.Period(datetime.today(), freq=granularity[0])
days = pd.Series(np.append(days, today)).sort_values(ascending=False) #adding today
if len(days) > 0:
days = days.dt.to_timestamp(granularity[0]) + pd.DateOffset(1) #converting and adding one day for extracting previous dates from dataframe
self.summ = pd.DataFrame(columns=['name', 'action', 'rev_time'])
_abs = []
df = self.token_source
for rev_time in days:
df = df[df['rev_time'] <= rev_time]
last_action = df.groupby('token_id').last() #last of group values for each token id
surv = last_action[last_action['action'] != 'out'].groupby('name')['action'].agg('count').reset_index()
surv['rev_time'] = rev_time - pd.DateOffset(1)
self.summ = self.summ.append(surv)
#getting top editors among the token owners over all time
top_editors = self.summ.groupby('name')['action'].agg('sum').sort_values(ascending=False).reset_index()[:15]
first_date = self.summ.groupby('name').last().reset_index() #first date of oadd for every editor
top_editors_merged = pd.merge(top_editors, first_date[['name', 'rev_time']], on='name').sort_values('rev_time') #adding first date for each editor and sorting by date of first oadd
#plot
fig = go.Figure()
for editor in top_editors_merged['name']:
x = self.summ.loc[self.summ['name']==editor, 'rev_time']
y = self.summ.loc[self.summ['name']==editor, 'action']
fig.add_trace(go.Scatter(x=x, y=y, name = editor, stackgroup='one'))
fig.update_layout(hovermode='x unified', showlegend=True, margin=go.layout.Margin(l=50,
r=50,
b=150,
t=10,
pad=3))
fig.show()
# data = []
# for editor in top_editors_merged['name']:
# x = self.summ.loc[self.summ['name']==editor, 'rev_time']
# y = self.summ.loc[self.summ['name']==editor, 'action']
# data.append(go.Scatter(x=x, y=y, name = editor, stackgroup='one'))
# layout = go.Layout(hovermode='x unified', showlegend=True, margin=go.layout.Margin(l=50,
# r=50,
# b=150,
# t=10,
# pad=3))
# plotly.offline.init_notebook_mode(connected=True)
# plotly.offline.iplot({"data": data, "layout": layout})
| true | true |
1c2b825ce775bcdbbcb3a728e961d8a28b823a22 | 921 | py | Python | backend/game/models.py | Daanvdk/duunbox | 7ea3397a48cf34faefb856b511526bffc88598be | [
"MIT"
] | null | null | null | backend/game/models.py | Daanvdk/duunbox | 7ea3397a48cf34faefb856b511526bffc88598be | [
"MIT"
] | 5 | 2021-03-30T12:57:03.000Z | 2021-09-22T18:47:27.000Z | backend/game/models.py | daanvdk/duunbox | 7ea3397a48cf34faefb856b511526bffc88598be | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.postgres.fields import JSONField
from django.conf import settings
class Room(models.Model):
code = models.TextField(primary_key=True)
game = models.TextField(
choices=[(name, name) for name in settings.INSTALLED_GAMES],
blank=True, null=True,
)
started = models.BooleanField(default=False)
state = JSONField(blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Player(models.Model):
room = models.ForeignKey(
'Room', on_delete=models.CASCADE,
related_name='players',
)
name = models.TextField()
admin = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
unique_together = [('room', 'name')]
| 27.088235 | 68 | 0.703583 | from django.db import models
from django.contrib.postgres.fields import JSONField
from django.conf import settings
class Room(models.Model):
code = models.TextField(primary_key=True)
game = models.TextField(
choices=[(name, name) for name in settings.INSTALLED_GAMES],
blank=True, null=True,
)
started = models.BooleanField(default=False)
state = JSONField(blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Player(models.Model):
room = models.ForeignKey(
'Room', on_delete=models.CASCADE,
related_name='players',
)
name = models.TextField()
admin = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
unique_together = [('room', 'name')]
| true | true |
1c2b838887442a3c302c3d5dc6f729de862f8c98 | 13,865 | py | Python | model/extract.py | iwangjian/ByteCup2018 | c59c6a495f81c493eaaf7fda710c8acd7ef148b9 | [
"MIT"
] | 80 | 2018-09-08T01:11:36.000Z | 2022-01-18T13:41:30.000Z | model/extract.py | Whoolly/ByteCup2018 | 348bdee3215c146ef7d6e4fe1fecbe4598798c8a | [
"MIT"
] | 3 | 2018-12-02T15:08:05.000Z | 2020-02-10T04:11:28.000Z | model/extract.py | Whoolly/ByteCup2018 | 348bdee3215c146ef7d6e4fe1fecbe4598798c8a | [
"MIT"
] | 21 | 2018-10-27T07:40:25.000Z | 2022-03-28T12:30:01.000Z | import torch
import numpy as np
from torch import nn
from torch.nn import init
from torch.nn import functional as F
from .rnn import MultiLayerLSTMCells
from .rnn import lstm_encoder
from .util import sequence_mean, len_mask
from .attention import prob_normalize
from .embed_regularize import embedded_dropout
from .rnn import MultiLayerLSTMCells_abs_enc
from .dropout import LockedDropout
INI = 1e-2
class ConvSentEncoder(nn.Module):
"""
Convolutional word-level sentence encoder
w/ max-over-time pooling, [3, 4, 5] kernel sizes, ReLU activation
"""
def __init__(self, vocab_size, emb_dim, n_hidden, dropout, dropoute):
super().__init__()
self._embedding = nn.Embedding(vocab_size, emb_dim, padding_idx=0)
self._convs = nn.ModuleList([nn.Conv1d(emb_dim, n_hidden, i)
for i in range(3, 6)])
self._dropout = dropout
self._dropoute = dropoute
self._grad_handle = None
def forward(self, input_):
# emb_input = self._embedding(input_)
emb_input = embedded_dropout(self._embedding, input_, dropout=self._dropoute if self.training else 0)
conv_in = F.dropout(emb_input.transpose(1, 2), self._dropout, training=self.training)
if conv_in.size(2) < 6:
print("conv: ", conv_in.size())
if conv_in.is_cuda:
conv_in = torch.cat((conv_in,
torch.autograd.Variable(
torch.cuda.FloatTensor(np.zeros([conv_in.size(0), conv_in.size(1), 6 - conv_in.size(2)])))
),2)
else:
conv_in = torch.cat((conv_in,
torch.autograd.Variable(torch.zeros(conv_in.size(0), conv_in.size(1), 6 - conv_in.size(2)))
), 2)
print("af-conv: ", conv_in.size())
output = torch.cat([F.relu(conv(conv_in)).max(dim=2)[0]
for conv in self._convs], dim=1)
return output
def set_embedding(self, embedding):
"""embedding is the weight matrix"""
assert self._embedding.weight.size() == embedding.size()
self._embedding.weight.data.copy_(embedding)
#self._embedding.weight.requires_grad = False
class LSTMEncoder(nn.Module):
def __init__(self, input_dim, n_hidden, n_layer, dropout, wdrop, dropouth, bidirectional):
super().__init__()
self._init_h = nn.Parameter(
torch.Tensor(n_layer*(2 if bidirectional else 1), n_hidden))
self._init_c = nn.Parameter(
torch.Tensor(n_layer*(2 if bidirectional else 1), n_hidden))
init.uniform_(self._init_h, -INI, INI)
init.uniform_(self._init_c, -INI, INI)
# weight_dropoutput
# self._lstm = nn.LSTM(input_dim, n_hidden, n_layer,
# dropout=dropout, bidirectional=bidirectional)
self.lockdrop = LockedDropout()
self._lstm = MultiLayerLSTMCells_abs_enc(
input_dim, n_hidden, n_layer,
dropout=dropout, wdrop=wdrop, dropouth=dropouth, bidirectional=bidirectional,
lockdrop=self.lockdrop
)
def forward(self, input_, in_lens=None):
""" [batch_size, max_num_sent, input_dim] Tensor"""
size = (self._init_h.size(0), input_.size(0), self._init_h.size(1))
init_states = (self._init_h.unsqueeze(1).expand(*size),
self._init_c.unsqueeze(1).expand(*size))
lstm_out, _ = lstm_encoder(
input_, self._lstm, in_lens, init_states)
return lstm_out.transpose(0, 1)
@property
def input_size(self):
return self._lstm.input_size
@property
def hidden_size(self):
return self._lstm.hidden_size
@property
def num_layers(self):
return self._lstm.num_layers
@property
def bidirectional(self):
return self._lstm.bidirectional
class ExtractSumm(nn.Module):
""" ff-ext """
def __init__(self, vocab_size, emb_dim,
conv_hidden, lstm_hidden, lstm_layer,
bidirectional, dropout=0.0, dropoute=0.1, wdrop=0.5, dropouth=0.3):
super().__init__()
self._sent_enc = ConvSentEncoder(
vocab_size, emb_dim, conv_hidden, dropout, dropoute)
self._art_enc = LSTMEncoder(
3*conv_hidden, lstm_hidden, lstm_layer,
dropout=dropout, wdrop=wdrop, dropouth=dropouth, bidirectional=bidirectional
)
lstm_out_dim = lstm_hidden * (2 if bidirectional else 1)
self._sent_linear = nn.Linear(lstm_out_dim, 1)
self._art_linear = nn.Linear(lstm_out_dim, lstm_out_dim)
def forward(self, article_sents, sent_nums):
enc_sent, enc_art = self._encode(article_sents, sent_nums)
saliency = torch.matmul(enc_sent, enc_art.unsqueeze(2))
saliency = torch.cat(
[s[:n] for s, n in zip(saliency, sent_nums)], dim=0)
content = self._sent_linear(
torch.cat([s[:n] for s, n in zip(enc_sent, sent_nums)], dim=0)
)
logit = (content + saliency).squeeze(1)
return logit
def extract(self, article_sents, sent_nums=None, k=4):
""" extract top-k scored sentences from article (eval only)"""
enc_sent, enc_art = self._encode(article_sents, sent_nums)
saliency = torch.matmul(enc_sent, enc_art.unsqueeze(2))
content = self._sent_linear(enc_sent)
logit = (content + saliency).squeeze(2)
if sent_nums is None: # test-time extract only
assert len(article_sents) == 1
n_sent = logit.size(1)
extracted = logit[0].topk(
k if k < n_sent else n_sent, sorted=False # original order
)[1].tolist()
else:
extracted = [l[:n].topk(k if k < n else n)[1].tolist()
for n, l in zip(sent_nums, logit)]
return extracted
def _encode(self, article_sents, sent_nums):
if sent_nums is None: # test-time extract only
enc_sent = self._sent_enc(article_sents[0]).unsqueeze(0)
else:
max_n = max(sent_nums)
enc_sents = [self._sent_enc(art_sent)
for art_sent in article_sents]
def zero(n, device):
z = torch.zeros(n, self._art_enc.input_size).to(device)
return z
enc_sent = torch.stack(
[torch.cat([s, zero(max_n-n, s.device)],
dim=0) if n != max_n
else s
for s, n in zip(enc_sents, sent_nums)],
dim=0
)
lstm_out = self._art_enc(enc_sent, sent_nums)
enc_art = F.tanh(
self._art_linear(sequence_mean(lstm_out, sent_nums, dim=1)))
return lstm_out, enc_art
def set_embedding(self, embedding):
self._sent_enc.set_embedding(embedding)
class LSTMPointerNet(nn.Module):
"""Pointer network as in Vinyals et al """
def __init__(self, input_dim, n_hidden, n_layer,
dropout, n_hop):
super().__init__()
self._init_h = nn.Parameter(torch.Tensor(n_layer, n_hidden))
self._init_c = nn.Parameter(torch.Tensor(n_layer, n_hidden))
self._init_i = nn.Parameter(torch.Tensor(input_dim))
init.uniform_(self._init_h, -INI, INI)
init.uniform_(self._init_c, -INI, INI)
init.uniform_(self._init_i, -0.1, 0.1)
self._lstm = nn.LSTM(
input_dim, n_hidden, n_layer,
bidirectional=False, dropout=dropout
)
self._lstm_cell = None
# attention parameters
self._attn_wm = nn.Parameter(torch.Tensor(input_dim, n_hidden))
self._attn_wq = nn.Parameter(torch.Tensor(n_hidden, n_hidden))
self._attn_v = nn.Parameter(torch.Tensor(n_hidden))
init.xavier_normal_(self._attn_wm)
init.xavier_normal_(self._attn_wq)
init.uniform_(self._attn_v, -INI, INI)
# hop parameters
self._hop_wm = nn.Parameter(torch.Tensor(input_dim, n_hidden))
self._hop_wq = nn.Parameter(torch.Tensor(n_hidden, n_hidden))
self._hop_v = nn.Parameter(torch.Tensor(n_hidden))
init.xavier_normal_(self._hop_wm)
init.xavier_normal_(self._hop_wq)
init.uniform_(self._hop_v, -INI, INI)
self._n_hop = n_hop
def forward(self, attn_mem, mem_sizes, lstm_in):
"""atten_mem: Tensor of size [batch_size, max_sent_num, input_dim]"""
attn_feat, hop_feat, lstm_states, init_i = self._prepare(attn_mem)
# lstm_in = torch.cat([init_i, lstm_in], dim=1).transpose(0, 1)
lstm_in[:,0,:] = init_i.squeeze(1)
lstm_in = lstm_in.transpose(0, 1)
query, final_states = self._lstm(lstm_in, lstm_states)
query = query.transpose(0, 1)
for _ in range(self._n_hop):
query = LSTMPointerNet.attention(
hop_feat, query, self._hop_v, self._hop_wq, mem_sizes)
output = LSTMPointerNet.attention_score(
attn_feat, query, self._attn_v, self._attn_wq)
return output # unormalized extraction logit
def extract(self, attn_mem, mem_sizes, k):
"""extract k sentences, decode only, batch_size==1"""
attn_feat, hop_feat, lstm_states, lstm_in = self._prepare(attn_mem)
lstm_in = lstm_in.squeeze(1)
if self._lstm_cell is None:
self._lstm_cell = MultiLayerLSTMCells.convert(
self._lstm).to(attn_mem.device)
extracts = []
for _ in range(k):
h, c = self._lstm_cell(lstm_in, lstm_states)
query = h[-1]
for _ in range(self._n_hop):
query = LSTMPointerNet.attention(
hop_feat, query, self._hop_v, self._hop_wq, mem_sizes)
score = LSTMPointerNet.attention_score(
attn_feat, query, self._attn_v, self._attn_wq)
score = score.squeeze()
for e in extracts:
score[e] = -1e6
ext = score.max(dim=0)[1].item()
extracts.append(ext)
lstm_states = (h, c)
lstm_in = attn_mem[:, ext, :]
return extracts
def _prepare(self, attn_mem):
attn_feat = torch.matmul(attn_mem, self._attn_wm.unsqueeze(0))
hop_feat = torch.matmul(attn_mem, self._hop_wm.unsqueeze(0))
bs = attn_mem.size(0)
n_l, d = self._init_h.size()
size = (n_l, bs, d)
lstm_states = (self._init_h.unsqueeze(1).expand(*size).contiguous(),
self._init_c.unsqueeze(1).expand(*size).contiguous())
d = self._init_i.size(0)
init_i = self._init_i.unsqueeze(0).unsqueeze(1).expand(bs, 1, d)
return attn_feat, hop_feat, lstm_states, init_i
@staticmethod
def attention_score(attention, query, v, w):
""" unnormalized attention score"""
sum_ = attention.unsqueeze(1) + torch.matmul(
query, w.unsqueeze(0)
).unsqueeze(2) # [B, Nq, Ns, D]
score = torch.matmul(
F.tanh(sum_), v.unsqueeze(0).unsqueeze(1).unsqueeze(3)
).squeeze(3) # [B, Nq, Ns]
return score
@staticmethod
def attention(attention, query, v, w, mem_sizes):
""" attention context vector"""
score = LSTMPointerNet.attention_score(attention, query, v, w)
if mem_sizes is None:
norm_score = F.softmax(score, dim=-1)
else:
mask = len_mask(mem_sizes, score.device).unsqueeze(-2)
norm_score = prob_normalize(score, mask)
output = torch.matmul(norm_score, attention)
return output
class PtrExtractSumm(nn.Module):
""" rnn-ext"""
def __init__(self, emb_dim, vocab_size, conv_hidden,
lstm_hidden, lstm_layer, bidirectional,
n_hop=1, dropout=0.0, dropoute=0.1, wdrop=0.5, dropouth=0.3):
super().__init__()
self._sent_enc = ConvSentEncoder(
vocab_size, emb_dim, conv_hidden, dropout, dropoute)
self._art_enc = LSTMEncoder(
3*conv_hidden, lstm_hidden, lstm_layer,
dropout=dropout, wdrop=wdrop, dropouth=dropouth, bidirectional=bidirectional
)
enc_out_dim = lstm_hidden * (2 if bidirectional else 1)
self._extractor = LSTMPointerNet(
enc_out_dim, lstm_hidden, lstm_layer,
dropout, n_hop
)
def forward(self, article_sents, sent_nums, target):
enc_out = self._encode(article_sents, sent_nums)
bs, nt = target.size()
d = enc_out.size(2)
ptr_in = torch.gather(
enc_out, dim=1, index=target.unsqueeze(2).expand(bs, nt, d)
)
output = self._extractor(enc_out, sent_nums, ptr_in)
return output
def extract(self, article_sents, sent_nums=None, k=4):
enc_out = self._encode(article_sents, sent_nums)
output = self._extractor.extract(enc_out, sent_nums, k)
return output
def _encode(self, article_sents, sent_nums):
if sent_nums is None: # test-time excode only
enc_sent = self._sent_enc(article_sents[0]).unsqueeze(0)
else:
max_n = max(sent_nums)
enc_sents = [self._sent_enc(art_sent)
for art_sent in article_sents]
def zero(n, device):
z = torch.zeros(n, self._art_enc.input_size).to(device)
return z
enc_sent = torch.stack(
[torch.cat([s, zero(max_n-n, s.device)], dim=0)
if n != max_n
else s
for s, n in zip(enc_sents, sent_nums)],
dim=0
)
lstm_out = self._art_enc(enc_sent, sent_nums)
return lstm_out
def set_embedding(self, embedding):
self._sent_enc.set_embedding(embedding)
| 40.422741 | 114 | 0.605337 | import torch
import numpy as np
from torch import nn
from torch.nn import init
from torch.nn import functional as F
from .rnn import MultiLayerLSTMCells
from .rnn import lstm_encoder
from .util import sequence_mean, len_mask
from .attention import prob_normalize
from .embed_regularize import embedded_dropout
from .rnn import MultiLayerLSTMCells_abs_enc
from .dropout import LockedDropout
INI = 1e-2
class ConvSentEncoder(nn.Module):
def __init__(self, vocab_size, emb_dim, n_hidden, dropout, dropoute):
super().__init__()
self._embedding = nn.Embedding(vocab_size, emb_dim, padding_idx=0)
self._convs = nn.ModuleList([nn.Conv1d(emb_dim, n_hidden, i)
for i in range(3, 6)])
self._dropout = dropout
self._dropoute = dropoute
self._grad_handle = None
def forward(self, input_):
emb_input = embedded_dropout(self._embedding, input_, dropout=self._dropoute if self.training else 0)
conv_in = F.dropout(emb_input.transpose(1, 2), self._dropout, training=self.training)
if conv_in.size(2) < 6:
print("conv: ", conv_in.size())
if conv_in.is_cuda:
conv_in = torch.cat((conv_in,
torch.autograd.Variable(
torch.cuda.FloatTensor(np.zeros([conv_in.size(0), conv_in.size(1), 6 - conv_in.size(2)])))
),2)
else:
conv_in = torch.cat((conv_in,
torch.autograd.Variable(torch.zeros(conv_in.size(0), conv_in.size(1), 6 - conv_in.size(2)))
), 2)
print("af-conv: ", conv_in.size())
output = torch.cat([F.relu(conv(conv_in)).max(dim=2)[0]
for conv in self._convs], dim=1)
return output
def set_embedding(self, embedding):
assert self._embedding.weight.size() == embedding.size()
self._embedding.weight.data.copy_(embedding)
class LSTMEncoder(nn.Module):
def __init__(self, input_dim, n_hidden, n_layer, dropout, wdrop, dropouth, bidirectional):
super().__init__()
self._init_h = nn.Parameter(
torch.Tensor(n_layer*(2 if bidirectional else 1), n_hidden))
self._init_c = nn.Parameter(
torch.Tensor(n_layer*(2 if bidirectional else 1), n_hidden))
init.uniform_(self._init_h, -INI, INI)
init.uniform_(self._init_c, -INI, INI)
self.lockdrop = LockedDropout()
self._lstm = MultiLayerLSTMCells_abs_enc(
input_dim, n_hidden, n_layer,
dropout=dropout, wdrop=wdrop, dropouth=dropouth, bidirectional=bidirectional,
lockdrop=self.lockdrop
)
def forward(self, input_, in_lens=None):
size = (self._init_h.size(0), input_.size(0), self._init_h.size(1))
init_states = (self._init_h.unsqueeze(1).expand(*size),
self._init_c.unsqueeze(1).expand(*size))
lstm_out, _ = lstm_encoder(
input_, self._lstm, in_lens, init_states)
return lstm_out.transpose(0, 1)
@property
def input_size(self):
return self._lstm.input_size
@property
def hidden_size(self):
return self._lstm.hidden_size
@property
def num_layers(self):
return self._lstm.num_layers
@property
def bidirectional(self):
return self._lstm.bidirectional
class ExtractSumm(nn.Module):
def __init__(self, vocab_size, emb_dim,
conv_hidden, lstm_hidden, lstm_layer,
bidirectional, dropout=0.0, dropoute=0.1, wdrop=0.5, dropouth=0.3):
super().__init__()
self._sent_enc = ConvSentEncoder(
vocab_size, emb_dim, conv_hidden, dropout, dropoute)
self._art_enc = LSTMEncoder(
3*conv_hidden, lstm_hidden, lstm_layer,
dropout=dropout, wdrop=wdrop, dropouth=dropouth, bidirectional=bidirectional
)
lstm_out_dim = lstm_hidden * (2 if bidirectional else 1)
self._sent_linear = nn.Linear(lstm_out_dim, 1)
self._art_linear = nn.Linear(lstm_out_dim, lstm_out_dim)
def forward(self, article_sents, sent_nums):
enc_sent, enc_art = self._encode(article_sents, sent_nums)
saliency = torch.matmul(enc_sent, enc_art.unsqueeze(2))
saliency = torch.cat(
[s[:n] for s, n in zip(saliency, sent_nums)], dim=0)
content = self._sent_linear(
torch.cat([s[:n] for s, n in zip(enc_sent, sent_nums)], dim=0)
)
logit = (content + saliency).squeeze(1)
return logit
def extract(self, article_sents, sent_nums=None, k=4):
enc_sent, enc_art = self._encode(article_sents, sent_nums)
saliency = torch.matmul(enc_sent, enc_art.unsqueeze(2))
content = self._sent_linear(enc_sent)
logit = (content + saliency).squeeze(2)
if sent_nums is None:
assert len(article_sents) == 1
n_sent = logit.size(1)
extracted = logit[0].topk(
k if k < n_sent else n_sent, sorted=False
)[1].tolist()
else:
extracted = [l[:n].topk(k if k < n else n)[1].tolist()
for n, l in zip(sent_nums, logit)]
return extracted
def _encode(self, article_sents, sent_nums):
if sent_nums is None:
enc_sent = self._sent_enc(article_sents[0]).unsqueeze(0)
else:
max_n = max(sent_nums)
enc_sents = [self._sent_enc(art_sent)
for art_sent in article_sents]
def zero(n, device):
z = torch.zeros(n, self._art_enc.input_size).to(device)
return z
enc_sent = torch.stack(
[torch.cat([s, zero(max_n-n, s.device)],
dim=0) if n != max_n
else s
for s, n in zip(enc_sents, sent_nums)],
dim=0
)
lstm_out = self._art_enc(enc_sent, sent_nums)
enc_art = F.tanh(
self._art_linear(sequence_mean(lstm_out, sent_nums, dim=1)))
return lstm_out, enc_art
def set_embedding(self, embedding):
self._sent_enc.set_embedding(embedding)
class LSTMPointerNet(nn.Module):
def __init__(self, input_dim, n_hidden, n_layer,
dropout, n_hop):
super().__init__()
self._init_h = nn.Parameter(torch.Tensor(n_layer, n_hidden))
self._init_c = nn.Parameter(torch.Tensor(n_layer, n_hidden))
self._init_i = nn.Parameter(torch.Tensor(input_dim))
init.uniform_(self._init_h, -INI, INI)
init.uniform_(self._init_c, -INI, INI)
init.uniform_(self._init_i, -0.1, 0.1)
self._lstm = nn.LSTM(
input_dim, n_hidden, n_layer,
bidirectional=False, dropout=dropout
)
self._lstm_cell = None
self._attn_wm = nn.Parameter(torch.Tensor(input_dim, n_hidden))
self._attn_wq = nn.Parameter(torch.Tensor(n_hidden, n_hidden))
self._attn_v = nn.Parameter(torch.Tensor(n_hidden))
init.xavier_normal_(self._attn_wm)
init.xavier_normal_(self._attn_wq)
init.uniform_(self._attn_v, -INI, INI)
self._hop_wm = nn.Parameter(torch.Tensor(input_dim, n_hidden))
self._hop_wq = nn.Parameter(torch.Tensor(n_hidden, n_hidden))
self._hop_v = nn.Parameter(torch.Tensor(n_hidden))
init.xavier_normal_(self._hop_wm)
init.xavier_normal_(self._hop_wq)
init.uniform_(self._hop_v, -INI, INI)
self._n_hop = n_hop
def forward(self, attn_mem, mem_sizes, lstm_in):
attn_feat, hop_feat, lstm_states, init_i = self._prepare(attn_mem)
lstm_in[:,0,:] = init_i.squeeze(1)
lstm_in = lstm_in.transpose(0, 1)
query, final_states = self._lstm(lstm_in, lstm_states)
query = query.transpose(0, 1)
for _ in range(self._n_hop):
query = LSTMPointerNet.attention(
hop_feat, query, self._hop_v, self._hop_wq, mem_sizes)
output = LSTMPointerNet.attention_score(
attn_feat, query, self._attn_v, self._attn_wq)
return output
def extract(self, attn_mem, mem_sizes, k):
attn_feat, hop_feat, lstm_states, lstm_in = self._prepare(attn_mem)
lstm_in = lstm_in.squeeze(1)
if self._lstm_cell is None:
self._lstm_cell = MultiLayerLSTMCells.convert(
self._lstm).to(attn_mem.device)
extracts = []
for _ in range(k):
h, c = self._lstm_cell(lstm_in, lstm_states)
query = h[-1]
for _ in range(self._n_hop):
query = LSTMPointerNet.attention(
hop_feat, query, self._hop_v, self._hop_wq, mem_sizes)
score = LSTMPointerNet.attention_score(
attn_feat, query, self._attn_v, self._attn_wq)
score = score.squeeze()
for e in extracts:
score[e] = -1e6
ext = score.max(dim=0)[1].item()
extracts.append(ext)
lstm_states = (h, c)
lstm_in = attn_mem[:, ext, :]
return extracts
def _prepare(self, attn_mem):
attn_feat = torch.matmul(attn_mem, self._attn_wm.unsqueeze(0))
hop_feat = torch.matmul(attn_mem, self._hop_wm.unsqueeze(0))
bs = attn_mem.size(0)
n_l, d = self._init_h.size()
size = (n_l, bs, d)
lstm_states = (self._init_h.unsqueeze(1).expand(*size).contiguous(),
self._init_c.unsqueeze(1).expand(*size).contiguous())
d = self._init_i.size(0)
init_i = self._init_i.unsqueeze(0).unsqueeze(1).expand(bs, 1, d)
return attn_feat, hop_feat, lstm_states, init_i
@staticmethod
def attention_score(attention, query, v, w):
sum_ = attention.unsqueeze(1) + torch.matmul(
query, w.unsqueeze(0)
).unsqueeze(2)
score = torch.matmul(
F.tanh(sum_), v.unsqueeze(0).unsqueeze(1).unsqueeze(3)
).squeeze(3)
return score
@staticmethod
def attention(attention, query, v, w, mem_sizes):
score = LSTMPointerNet.attention_score(attention, query, v, w)
if mem_sizes is None:
norm_score = F.softmax(score, dim=-1)
else:
mask = len_mask(mem_sizes, score.device).unsqueeze(-2)
norm_score = prob_normalize(score, mask)
output = torch.matmul(norm_score, attention)
return output
class PtrExtractSumm(nn.Module):
def __init__(self, emb_dim, vocab_size, conv_hidden,
lstm_hidden, lstm_layer, bidirectional,
n_hop=1, dropout=0.0, dropoute=0.1, wdrop=0.5, dropouth=0.3):
super().__init__()
self._sent_enc = ConvSentEncoder(
vocab_size, emb_dim, conv_hidden, dropout, dropoute)
self._art_enc = LSTMEncoder(
3*conv_hidden, lstm_hidden, lstm_layer,
dropout=dropout, wdrop=wdrop, dropouth=dropouth, bidirectional=bidirectional
)
enc_out_dim = lstm_hidden * (2 if bidirectional else 1)
self._extractor = LSTMPointerNet(
enc_out_dim, lstm_hidden, lstm_layer,
dropout, n_hop
)
def forward(self, article_sents, sent_nums, target):
enc_out = self._encode(article_sents, sent_nums)
bs, nt = target.size()
d = enc_out.size(2)
ptr_in = torch.gather(
enc_out, dim=1, index=target.unsqueeze(2).expand(bs, nt, d)
)
output = self._extractor(enc_out, sent_nums, ptr_in)
return output
def extract(self, article_sents, sent_nums=None, k=4):
enc_out = self._encode(article_sents, sent_nums)
output = self._extractor.extract(enc_out, sent_nums, k)
return output
def _encode(self, article_sents, sent_nums):
if sent_nums is None:
enc_sent = self._sent_enc(article_sents[0]).unsqueeze(0)
else:
max_n = max(sent_nums)
enc_sents = [self._sent_enc(art_sent)
for art_sent in article_sents]
def zero(n, device):
z = torch.zeros(n, self._art_enc.input_size).to(device)
return z
enc_sent = torch.stack(
[torch.cat([s, zero(max_n-n, s.device)], dim=0)
if n != max_n
else s
for s, n in zip(enc_sents, sent_nums)],
dim=0
)
lstm_out = self._art_enc(enc_sent, sent_nums)
return lstm_out
def set_embedding(self, embedding):
self._sent_enc.set_embedding(embedding)
| true | true |
1c2b85511b0a7346be53799fb87eca7fbb9ef91b | 365 | py | Python | be_test/users/urls.py | greg-argulla/be_test | b745f26c5c3d63ef1bfcdbd7a71a222c6c332fd4 | [
"MIT"
] | null | null | null | be_test/users/urls.py | greg-argulla/be_test | b745f26c5c3d63ef1bfcdbd7a71a222c6c332fd4 | [
"MIT"
] | null | null | null | be_test/users/urls.py | greg-argulla/be_test | b745f26c5c3d63ef1bfcdbd7a71a222c6c332fd4 | [
"MIT"
] | null | null | null | from django.urls import path
from be_test.users.views import (
user_detail_view,
user_redirect_view,
user_update_view,
)
app_name = "users"
urlpatterns = [
path("~redirect/", view=user_redirect_view, name="redirect"),
path("~update/", view=user_update_view, name="update"),
path("<str:username>/", view=user_detail_view, name="detail"),
]
| 24.333333 | 66 | 0.70137 | from django.urls import path
from be_test.users.views import (
user_detail_view,
user_redirect_view,
user_update_view,
)
app_name = "users"
urlpatterns = [
path("~redirect/", view=user_redirect_view, name="redirect"),
path("~update/", view=user_update_view, name="update"),
path("<str:username>/", view=user_detail_view, name="detail"),
]
| true | true |
1c2b85b738b3b070506fd47fd2ffff0694c1fe00 | 1,481 | py | Python | tohtml.py | tcaenen/IkeaTrainTrack | 51b19250494292dd1c6da5a5d8808498c31f9b1e | [
"MIT"
] | 2 | 2017-05-10T06:13:35.000Z | 2019-06-23T09:03:12.000Z | tohtml.py | tcaenen/IkeaTrainTrack | 51b19250494292dd1c6da5a5d8808498c31f9b1e | [
"MIT"
] | 21 | 2017-05-10T12:53:25.000Z | 2017-09-24T19:22:05.000Z | tohtml.py | tcaenen/IkeaTrainTrack | 51b19250494292dd1c6da5a5d8808498c31f9b1e | [
"MIT"
] | 1 | 2020-01-28T11:06:21.000Z | 2020-01-28T11:06:21.000Z | #!/usr/local/bin/python3
"""
Render all tracks from the input into html page.
"""
import argparse
import sys
import os
import track
def write_report(tracks):
os.makedirs('report', exist_ok=True)
with open('report/index.html', 'w') as report:
report.write('<!doctype html>\n')
report.write('<body>\n')
report.write('<table>\n')
report.write('''<tr>
<th>descr<th>S<th>T<th>U<th>D<th>P<th>image
</tr>\n''')
for i, t in enumerate(tracks, start=1):
report.write('<tr><td>%s</td>' % t.path)
report.write('<td>{S}</td><td>{T}</td><td>{U}</td><td>{D}</td><td>{P}</td>'.format(
S=t.path.count('S'),
T=t.path.count('R') + t.path.count('L'),
U=t.path.count('U'),
D=t.path.count('D'),
P=t.count_pillars(),
))
report.write('<td><img src="preview%02d.png"></td>' % i)
report.write('</tr>\n')
t.draw('report/preview%002d.png' % i)
report.write('</table></body>\n')
DESCRIPTION = """\
Take tracks from standard input and print them out to html page.
Each track should be provided on a new line.
"""
def main():
parser = argparse.ArgumentParser(description=DESCRIPTION)
args = parser.parse_args()
tracks = []
for line in sys.stdin:
path = line.strip()
tracks.append(track.Track(path))
write_report(tracks)
if __name__ == '__main__':
main()
| 26.927273 | 95 | 0.554355 |
import argparse
import sys
import os
import track
def write_report(tracks):
os.makedirs('report', exist_ok=True)
with open('report/index.html', 'w') as report:
report.write('<!doctype html>\n')
report.write('<body>\n')
report.write('<table>\n')
report.write('''<tr>
<th>descr<th>S<th>T<th>U<th>D<th>P<th>image
</tr>\n''')
for i, t in enumerate(tracks, start=1):
report.write('<tr><td>%s</td>' % t.path)
report.write('<td>{S}</td><td>{T}</td><td>{U}</td><td>{D}</td><td>{P}</td>'.format(
S=t.path.count('S'),
T=t.path.count('R') + t.path.count('L'),
U=t.path.count('U'),
D=t.path.count('D'),
P=t.count_pillars(),
))
report.write('<td><img src="preview%02d.png"></td>' % i)
report.write('</tr>\n')
t.draw('report/preview%002d.png' % i)
report.write('</table></body>\n')
DESCRIPTION = """\
Take tracks from standard input and print them out to html page.
Each track should be provided on a new line.
"""
def main():
parser = argparse.ArgumentParser(description=DESCRIPTION)
args = parser.parse_args()
tracks = []
for line in sys.stdin:
path = line.strip()
tracks.append(track.Track(path))
write_report(tracks)
if __name__ == '__main__':
main()
| true | true |
1c2b881e80ccc36f41d3949450868f43bcd38f83 | 2,601 | py | Python | generate/generate_simple.py | guoguo12/haskell-ptable | 93857351c8db915cb59e773a30c0ec77eab7ac4c | [
"Apache-2.0"
] | 1 | 2015-11-08T08:51:05.000Z | 2015-11-08T08:51:05.000Z | generate/generate_simple.py | guoguo12/haskell-ptable | 93857351c8db915cb59e773a30c0ec77eab7ac4c | [
"Apache-2.0"
] | null | null | null | generate/generate_simple.py | guoguo12/haskell-ptable | 93857351c8db915cb59e773a30c0ec77eab7ac4c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""generate_simple.py: Generates simple Haskell chemical elements data file."""
__author__ = 'Allen Guo'
__license__ = 'Apache License'
__version__ = '2.0'
import collections
import os
import re
FIELDS = ['anum', 'symb', 'name', 'gnum', 'pnum', 'awei']
DATA_PATH = 'ptable_simple.csv'
OUTPUT_NAME = 'ptable.hs'
OUTPUT_PATH = os.path.join('..', OUTPUT_NAME)
OUTPUT_HEADER = '--%s: Simple Haskell chemical elements data file.' % OUTPUT_NAME
Element = collections.namedtuple('Element', FIELDS)
quote = lambda s: '"' + s + '"'
def get_data():
elements = []
f = open(DATA_PATH)
lines = f.readlines()[1:] # Omit header row
f.close()
for line in lines:
fields = line.split(',')
elements.append(Element(*fields))
return elements
def write_haskell_elements_list(output, elements):
output.append('') # Empty line
output.append('elements :: [[Char]]')
symbols = map(quote, [element.symb for element in elements])
output.append('elements = [%s]' % (', '.join(symbols)))
def write_haskell_function(output, field, output_type, elements):
output.append('') # Empty line to separate functions
output.append('%s :: String -> %s' % (field, output_type))
for element in elements:
value = getattr(element, field).strip()
if output_type == 'String':
value = quote(value)
elif output_type == 'Double':
value = value.split('(')[0] # Omit parenthesized uncertainty value, if present
if value[0] == '[' and value[-1] == ']':
value = value[1:-1] # Omit surrounding brackets, if present
elif output_type == 'Int':
if not value:
value = '-1' # Replace empty values with -1
output.append('%s "%s" = %s' % (field, element.symb, value))
output.append('%s _ = error "Invalid chemical element symbol"' % field)
def write_output(output):
output_file = open(OUTPUT_PATH, 'w')
output = map((lambda s: s + '\n'), output) # Add line breaks
output_file.writelines(output)
output_file.close()
def main():
output = [OUTPUT_HEADER]
elements = get_data()
write_haskell_elements_list(output, elements)
write_haskell_function(output, 'anum', 'Int', elements)
write_haskell_function(output, 'name', 'String', elements)
write_haskell_function(output, 'gnum', 'Int', elements)
write_haskell_function(output, 'pnum', 'Int', elements)
write_haskell_function(output, 'awei', 'Double', elements)
write_output(output)
if __name__ == '__main__':
main()
| 34.68 | 90 | 0.639754 |
__author__ = 'Allen Guo'
__license__ = 'Apache License'
__version__ = '2.0'
import collections
import os
import re
FIELDS = ['anum', 'symb', 'name', 'gnum', 'pnum', 'awei']
DATA_PATH = 'ptable_simple.csv'
OUTPUT_NAME = 'ptable.hs'
OUTPUT_PATH = os.path.join('..', OUTPUT_NAME)
OUTPUT_HEADER = '--%s: Simple Haskell chemical elements data file.' % OUTPUT_NAME
Element = collections.namedtuple('Element', FIELDS)
quote = lambda s: '"' + s + '"'
def get_data():
elements = []
f = open(DATA_PATH)
lines = f.readlines()[1:]
f.close()
for line in lines:
fields = line.split(',')
elements.append(Element(*fields))
return elements
def write_haskell_elements_list(output, elements):
output.append('')
output.append('elements :: [[Char]]')
symbols = map(quote, [element.symb for element in elements])
output.append('elements = [%s]' % (', '.join(symbols)))
def write_haskell_function(output, field, output_type, elements):
output.append('')
output.append('%s :: String -> %s' % (field, output_type))
for element in elements:
value = getattr(element, field).strip()
if output_type == 'String':
value = quote(value)
elif output_type == 'Double':
value = value.split('(')[0]
if value[0] == '[' and value[-1] == ']':
value = value[1:-1]
elif output_type == 'Int':
if not value:
value = '-1'
output.append('%s "%s" = %s' % (field, element.symb, value))
output.append('%s _ = error "Invalid chemical element symbol"' % field)
def write_output(output):
output_file = open(OUTPUT_PATH, 'w')
output = map((lambda s: s + '\n'), output)
output_file.writelines(output)
output_file.close()
def main():
output = [OUTPUT_HEADER]
elements = get_data()
write_haskell_elements_list(output, elements)
write_haskell_function(output, 'anum', 'Int', elements)
write_haskell_function(output, 'name', 'String', elements)
write_haskell_function(output, 'gnum', 'Int', elements)
write_haskell_function(output, 'pnum', 'Int', elements)
write_haskell_function(output, 'awei', 'Double', elements)
write_output(output)
if __name__ == '__main__':
main()
| true | true |
1c2b882f66594a45254e24f2e75a12183c29d43a | 31,715 | py | Python | src/bootstrap/bootstrap.py | TheSirC/rust | 823a75d9ba34860b9a6712c7a9d35e86e0a88436 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 518 | 2015-08-13T08:50:23.000Z | 2020-07-23T19:52:51.000Z | src/bootstrap/bootstrap.py | TheSirC/rust | 823a75d9ba34860b9a6712c7a9d35e86e0a88436 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 157 | 2015-08-09T12:52:55.000Z | 2020-07-19T20:02:52.000Z | src/bootstrap/bootstrap.py | TheSirC/rust | 823a75d9ba34860b9a6712c7a9d35e86e0a88436 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 29 | 2015-09-06T00:03:53.000Z | 2020-04-05T10:05:38.000Z | from __future__ import absolute_import, division, print_function
import argparse
import contextlib
import datetime
import hashlib
import os
import re
import shutil
import subprocess
import sys
import tarfile
import tempfile
from time import time
def get(url, path, verbose=False):
suffix = '.sha256'
sha_url = url + suffix
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
temp_path = temp_file.name
with tempfile.NamedTemporaryFile(suffix=suffix, delete=False) as sha_file:
sha_path = sha_file.name
try:
download(sha_path, sha_url, False, verbose)
if os.path.exists(path):
if verify(path, sha_path, False):
if verbose:
print("using already-download file", path)
return
else:
if verbose:
print("ignoring already-download file",
path, "due to failed verification")
os.unlink(path)
download(temp_path, url, True, verbose)
if not verify(temp_path, sha_path, verbose):
raise RuntimeError("failed verification")
if verbose:
print("moving {} to {}".format(temp_path, path))
shutil.move(temp_path, path)
finally:
delete_if_present(sha_path, verbose)
delete_if_present(temp_path, verbose)
def delete_if_present(path, verbose):
"""Remove the given file if present"""
if os.path.isfile(path):
if verbose:
print("removing", path)
os.unlink(path)
def download(path, url, probably_big, verbose):
for _ in range(0, 4):
try:
_download(path, url, probably_big, verbose, True)
return
except RuntimeError:
print("\nspurious failure, trying again")
_download(path, url, probably_big, verbose, False)
def _download(path, url, probably_big, verbose, exception):
if probably_big or verbose:
print("downloading {}".format(url))
# see http://serverfault.com/questions/301128/how-to-download
if sys.platform == 'win32':
run(["PowerShell.exe", "/nologo", "-Command",
"[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12;",
"(New-Object System.Net.WebClient).DownloadFile('{}', '{}')".format(url, path)],
verbose=verbose,
exception=exception)
else:
if probably_big or verbose:
option = "-#"
else:
option = "-s"
run(["curl", option,
"-y", "30", "-Y", "10", # timeout if speed is < 10 bytes/sec for > 30 seconds
"--connect-timeout", "30", # timeout if cannot connect within 30 seconds
"--retry", "3", "-Sf", "-o", path, url],
verbose=verbose,
exception=exception)
def verify(path, sha_path, verbose):
"""Check if the sha256 sum of the given path is valid"""
if verbose:
print("verifying", path)
with open(path, "rb") as source:
found = hashlib.sha256(source.read()).hexdigest()
with open(sha_path, "r") as sha256sum:
expected = sha256sum.readline().split()[0]
verified = found == expected
if not verified:
print("invalid checksum:\n"
" found: {}\n"
" expected: {}".format(found, expected))
return verified
def unpack(tarball, dst, verbose=False, match=None):
"""Unpack the given tarball file"""
print("extracting", tarball)
fname = os.path.basename(tarball).replace(".tar.gz", "")
with contextlib.closing(tarfile.open(tarball)) as tar:
for member in tar.getnames():
if "/" not in member:
continue
name = member.replace(fname + "/", "", 1)
if match is not None and not name.startswith(match):
continue
name = name[len(match) + 1:]
dst_path = os.path.join(dst, name)
if verbose:
print(" extracting", member)
tar.extract(member, dst)
src_path = os.path.join(dst, member)
if os.path.isdir(src_path) and os.path.exists(dst_path):
continue
shutil.move(src_path, dst_path)
shutil.rmtree(os.path.join(dst, fname))
def run(args, verbose=False, exception=False, **kwargs):
"""Run a child program in a new process"""
if verbose:
print("running: " + ' '.join(args))
sys.stdout.flush()
# Use Popen here instead of call() as it apparently allows powershell on
# Windows to not lock up waiting for input presumably.
ret = subprocess.Popen(args, **kwargs)
code = ret.wait()
if code != 0:
err = "failed to run: " + ' '.join(args)
if verbose or exception:
raise RuntimeError(err)
sys.exit(err)
def stage0_data(rust_root):
"""Build a dictionary from stage0.txt"""
nightlies = os.path.join(rust_root, "src/stage0.txt")
with open(nightlies, 'r') as nightlies:
lines = [line.rstrip() for line in nightlies
if not line.startswith("#")]
return dict([line.split(": ", 1) for line in lines if line])
def format_build_time(duration):
"""Return a nicer format for build time
>>> format_build_time('300')
'0:05:00'
"""
return str(datetime.timedelta(seconds=int(duration)))
def default_build_triple():
"""Build triple as in LLVM"""
default_encoding = sys.getdefaultencoding()
try:
ostype = subprocess.check_output(
['uname', '-s']).strip().decode(default_encoding)
cputype = subprocess.check_output(
['uname', '-m']).strip().decode(default_encoding)
except (subprocess.CalledProcessError, OSError):
if sys.platform == 'win32':
return 'x86_64-pc-windows-msvc'
err = "uname not found"
sys.exit(err)
# The goal here is to come up with the same triple as LLVM would,
# at least for the subset of platforms we're willing to target.
ostype_mapper = {
'Darwin': 'apple-darwin',
'DragonFly': 'unknown-dragonfly',
'FreeBSD': 'unknown-freebsd',
'Haiku': 'unknown-haiku',
'NetBSD': 'unknown-netbsd',
'OpenBSD': 'unknown-openbsd'
}
# Consider the direct transformation first and then the special cases
if ostype in ostype_mapper:
ostype = ostype_mapper[ostype]
elif ostype == 'Linux':
os_from_sp = subprocess.check_output(
['uname', '-o']).strip().decode(default_encoding)
if os_from_sp == 'Android':
ostype = 'linux-android'
else:
ostype = 'unknown-linux-gnu'
elif ostype == 'SunOS':
ostype = 'sun-solaris'
# On Solaris, uname -m will return a machine classification instead
# of a cpu type, so uname -p is recommended instead. However, the
# output from that option is too generic for our purposes (it will
# always emit 'i386' on x86/amd64 systems). As such, isainfo -k
# must be used instead.
try:
cputype = subprocess.check_output(
['isainfo', '-k']).strip().decode(default_encoding)
except (subprocess.CalledProcessError, OSError):
err = "isainfo not found"
sys.exit(err)
elif ostype.startswith('MINGW'):
# msys' `uname` does not print gcc configuration, but prints msys
# configuration. so we cannot believe `uname -m`:
# msys1 is always i686 and msys2 is always x86_64.
# instead, msys defines $MSYSTEM which is MINGW32 on i686 and
# MINGW64 on x86_64.
ostype = 'pc-windows-gnu'
cputype = 'i686'
if os.environ.get('MSYSTEM') == 'MINGW64':
cputype = 'x86_64'
elif ostype.startswith('MSYS'):
ostype = 'pc-windows-gnu'
elif ostype.startswith('CYGWIN_NT'):
cputype = 'i686'
if ostype.endswith('WOW64'):
cputype = 'x86_64'
ostype = 'pc-windows-gnu'
else:
err = "unknown OS type: {}".format(ostype)
sys.exit(err)
if cputype == 'powerpc' and ostype == 'unknown-freebsd':
cputype = subprocess.check_output(
['uname', '-p']).strip().decode(default_encoding)
cputype_mapper = {
'BePC': 'i686',
'aarch64': 'aarch64',
'amd64': 'x86_64',
'arm64': 'aarch64',
'i386': 'i686',
'i486': 'i686',
'i686': 'i686',
'i786': 'i686',
'powerpc': 'powerpc',
'powerpc64': 'powerpc64',
'powerpc64le': 'powerpc64le',
'ppc': 'powerpc',
'ppc64': 'powerpc64',
'ppc64le': 'powerpc64le',
's390x': 's390x',
'x64': 'x86_64',
'x86': 'i686',
'x86-64': 'x86_64',
'x86_64': 'x86_64'
}
# Consider the direct transformation first and then the special cases
if cputype in cputype_mapper:
cputype = cputype_mapper[cputype]
elif cputype in {'xscale', 'arm'}:
cputype = 'arm'
if ostype == 'linux-android':
ostype = 'linux-androideabi'
elif ostype == 'unknown-freebsd':
cputype = subprocess.check_output(
['uname', '-p']).strip().decode(default_encoding)
ostype = 'unknown-freebsd'
elif cputype == 'armv6l':
cputype = 'arm'
if ostype == 'linux-android':
ostype = 'linux-androideabi'
else:
ostype += 'eabihf'
elif cputype in {'armv7l', 'armv8l'}:
cputype = 'armv7'
if ostype == 'linux-android':
ostype = 'linux-androideabi'
else:
ostype += 'eabihf'
elif cputype == 'mips':
if sys.byteorder == 'big':
cputype = 'mips'
elif sys.byteorder == 'little':
cputype = 'mipsel'
else:
raise ValueError("unknown byteorder: {}".format(sys.byteorder))
elif cputype == 'mips64':
if sys.byteorder == 'big':
cputype = 'mips64'
elif sys.byteorder == 'little':
cputype = 'mips64el'
else:
raise ValueError('unknown byteorder: {}'.format(sys.byteorder))
# only the n64 ABI is supported, indicate it
ostype += 'abi64'
elif cputype == 'sparc' or cputype == 'sparcv9' or cputype == 'sparc64':
pass
else:
err = "unknown cpu type: {}".format(cputype)
sys.exit(err)
return "{}-{}".format(cputype, ostype)
@contextlib.contextmanager
def output(filepath):
tmp = filepath + '.tmp'
with open(tmp, 'w') as f:
yield f
try:
os.remove(filepath) # PermissionError/OSError on Win32 if in use
os.rename(tmp, filepath)
except OSError:
shutil.copy2(tmp, filepath)
os.remove(tmp)
class RustBuild(object):
"""Provide all the methods required to build Rust"""
def __init__(self):
self.cargo_channel = ''
self.date = ''
self._download_url = 'https://static.rust-lang.org'
self.rustc_channel = ''
self.build = ''
self.build_dir = os.path.join(os.getcwd(), "build")
self.clean = False
self.config_toml = ''
self.rust_root = ''
self.use_locked_deps = ''
self.use_vendored_sources = ''
self.verbose = False
def download_stage0(self):
"""Fetch the build system for Rust, written in Rust
This method will build a cache directory, then it will fetch the
tarball which has the stage0 compiler used to then bootstrap the Rust
compiler itself.
Each downloaded tarball is extracted, after that, the script
will move all the content to the right place.
"""
rustc_channel = self.rustc_channel
cargo_channel = self.cargo_channel
if self.rustc().startswith(self.bin_root()) and \
(not os.path.exists(self.rustc()) or
self.program_out_of_date(self.rustc_stamp())):
if os.path.exists(self.bin_root()):
shutil.rmtree(self.bin_root())
filename = "rust-std-{}-{}.tar.gz".format(
rustc_channel, self.build)
pattern = "rust-std-{}".format(self.build)
self._download_stage0_helper(filename, pattern)
filename = "rustc-{}-{}.tar.gz".format(rustc_channel, self.build)
self._download_stage0_helper(filename, "rustc")
self.fix_executable("{}/bin/rustc".format(self.bin_root()))
self.fix_executable("{}/bin/rustdoc".format(self.bin_root()))
with output(self.rustc_stamp()) as rust_stamp:
rust_stamp.write(self.date)
# This is required so that we don't mix incompatible MinGW
# libraries/binaries that are included in rust-std with
# the system MinGW ones.
if "pc-windows-gnu" in self.build:
filename = "rust-mingw-{}-{}.tar.gz".format(
rustc_channel, self.build)
self._download_stage0_helper(filename, "rust-mingw")
if self.cargo().startswith(self.bin_root()) and \
(not os.path.exists(self.cargo()) or
self.program_out_of_date(self.cargo_stamp())):
filename = "cargo-{}-{}.tar.gz".format(cargo_channel, self.build)
self._download_stage0_helper(filename, "cargo")
self.fix_executable("{}/bin/cargo".format(self.bin_root()))
with output(self.cargo_stamp()) as cargo_stamp:
cargo_stamp.write(self.date)
def _download_stage0_helper(self, filename, pattern):
cache_dst = os.path.join(self.build_dir, "cache")
rustc_cache = os.path.join(cache_dst, self.date)
if not os.path.exists(rustc_cache):
os.makedirs(rustc_cache)
url = "{}/dist/{}".format(self._download_url, self.date)
tarball = os.path.join(rustc_cache, filename)
if not os.path.exists(tarball):
get("{}/{}".format(url, filename), tarball, verbose=self.verbose)
unpack(tarball, self.bin_root(), match=pattern, verbose=self.verbose)
@staticmethod
def fix_executable(fname):
"""Modifies the interpreter section of 'fname' to fix the dynamic linker
This method is only required on NixOS and uses the PatchELF utility to
change the dynamic linker of ELF executables.
Please see https://nixos.org/patchelf.html for more information
"""
default_encoding = sys.getdefaultencoding()
try:
ostype = subprocess.check_output(
['uname', '-s']).strip().decode(default_encoding)
except subprocess.CalledProcessError:
return
except OSError as reason:
if getattr(reason, 'winerror', None) is not None:
return
raise reason
if ostype != "Linux":
return
if not os.path.exists("/etc/NIXOS"):
return
if os.path.exists("/lib"):
return
# At this point we're pretty sure the user is running NixOS
nix_os_msg = "info: you seem to be running NixOS. Attempting to patch"
print(nix_os_msg, fname)
try:
interpreter = subprocess.check_output(
["patchelf", "--print-interpreter", fname])
interpreter = interpreter.strip().decode(default_encoding)
except subprocess.CalledProcessError as reason:
print("warning: failed to call patchelf:", reason)
return
loader = interpreter.split("/")[-1]
try:
ldd_output = subprocess.check_output(
['ldd', '/run/current-system/sw/bin/sh'])
ldd_output = ldd_output.strip().decode(default_encoding)
except subprocess.CalledProcessError as reason:
print("warning: unable to call ldd:", reason)
return
for line in ldd_output.splitlines():
libname = line.split()[0]
if libname.endswith(loader):
loader_path = libname[:len(libname) - len(loader)]
break
else:
print("warning: unable to find the path to the dynamic linker")
return
correct_interpreter = loader_path + loader
try:
subprocess.check_output(
["patchelf", "--set-interpreter", correct_interpreter, fname])
except subprocess.CalledProcessError as reason:
print("warning: failed to call patchelf:", reason)
return
def rustc_stamp(self):
"""Return the path for .rustc-stamp
>>> rb = RustBuild()
>>> rb.build_dir = "build"
>>> rb.rustc_stamp() == os.path.join("build", "stage0", ".rustc-stamp")
True
"""
return os.path.join(self.bin_root(), '.rustc-stamp')
def cargo_stamp(self):
"""Return the path for .cargo-stamp
>>> rb = RustBuild()
>>> rb.build_dir = "build"
>>> rb.cargo_stamp() == os.path.join("build", "stage0", ".cargo-stamp")
True
"""
return os.path.join(self.bin_root(), '.cargo-stamp')
def program_out_of_date(self, stamp_path):
"""Check if the given program stamp is out of date"""
if not os.path.exists(stamp_path) or self.clean:
return True
with open(stamp_path, 'r') as stamp:
return self.date != stamp.read()
def bin_root(self):
"""Return the binary root directory
>>> rb = RustBuild()
>>> rb.build_dir = "build"
>>> rb.bin_root() == os.path.join("build", "stage0")
True
When the 'build' property is given should be a nested directory:
>>> rb.build = "devel"
>>> rb.bin_root() == os.path.join("build", "devel", "stage0")
True
"""
return os.path.join(self.build_dir, self.build, "stage0")
def get_toml(self, key, section=None):
"""Returns the value of the given key in config.toml, otherwise returns None
>>> rb = RustBuild()
>>> rb.config_toml = 'key1 = "value1"\\nkey2 = "value2"'
>>> rb.get_toml("key2")
'value2'
If the key does not exists, the result is None:
>>> rb.get_toml("key3") is None
True
Optionally also matches the section the key appears in
>>> rb.config_toml = '[a]\\nkey = "value1"\\n[b]\\nkey = "value2"'
>>> rb.get_toml('key', 'a')
'value1'
>>> rb.get_toml('key', 'b')
'value2'
>>> rb.get_toml('key', 'c') is None
True
"""
cur_section = None
for line in self.config_toml.splitlines():
section_match = re.match(r'^\s*\[(.*)\]\s*$', line)
if section_match is not None:
cur_section = section_match.group(1)
match = re.match(r'^{}\s*=(.*)$'.format(key), line)
if match is not None:
value = match.group(1)
if section is None or section == cur_section:
return self.get_string(value) or value.strip()
return None
def cargo(self):
"""Return config path for cargo"""
return self.program_config('cargo')
def rustc(self):
"""Return config path for rustc"""
return self.program_config('rustc')
def program_config(self, program):
"""Return config path for the given program
>>> rb = RustBuild()
>>> rb.config_toml = 'rustc = "rustc"\\n'
>>> rb.program_config('rustc')
'rustc'
>>> rb.config_toml = ''
>>> cargo_path = rb.program_config('cargo')
>>> cargo_path.rstrip(".exe") == os.path.join(rb.bin_root(),
... "bin", "cargo")
True
"""
config = self.get_toml(program)
if config:
return os.path.expanduser(config)
return os.path.join(self.bin_root(), "bin", "{}{}".format(
program, self.exe_suffix()))
@staticmethod
def get_string(line):
"""Return the value between double quotes
>>> RustBuild.get_string(' "devel" ')
'devel'
"""
start = line.find('"')
if start != -1:
end = start + 1 + line[start + 1:].find('"')
return line[start + 1:end]
start = line.find('\'')
if start != -1:
end = start + 1 + line[start + 1:].find('\'')
return line[start + 1:end]
return None
@staticmethod
def exe_suffix():
"""Return a suffix for executables"""
if sys.platform == 'win32':
return '.exe'
return ''
def bootstrap_binary(self):
"""Return the path of the bootstrap binary
>>> rb = RustBuild()
>>> rb.build_dir = "build"
>>> rb.bootstrap_binary() == os.path.join("build", "bootstrap",
... "debug", "bootstrap")
True
"""
return os.path.join(self.build_dir, "bootstrap", "debug", "bootstrap")
def build_bootstrap(self):
"""Build bootstrap"""
build_dir = os.path.join(self.build_dir, "bootstrap")
if self.clean and os.path.exists(build_dir):
shutil.rmtree(build_dir)
env = os.environ.copy()
env["RUSTC_BOOTSTRAP"] = '1'
env["CARGO_TARGET_DIR"] = build_dir
env["RUSTC"] = self.rustc()
env["LD_LIBRARY_PATH"] = os.path.join(self.bin_root(), "lib") + \
(os.pathsep + env["LD_LIBRARY_PATH"]) \
if "LD_LIBRARY_PATH" in env else ""
env["DYLD_LIBRARY_PATH"] = os.path.join(self.bin_root(), "lib") + \
(os.pathsep + env["DYLD_LIBRARY_PATH"]) \
if "DYLD_LIBRARY_PATH" in env else ""
env["LIBRARY_PATH"] = os.path.join(self.bin_root(), "lib") + \
(os.pathsep + env["LIBRARY_PATH"]) \
if "LIBRARY_PATH" in env else ""
env["RUSTFLAGS"] = "-Cdebuginfo=2 "
build_section = "target.{}".format(self.build_triple())
target_features = []
if self.get_toml("crt-static", build_section) == "true":
target_features += ["+crt-static"]
elif self.get_toml("crt-static", build_section) == "false":
target_features += ["-crt-static"]
if target_features:
env["RUSTFLAGS"] += "-C target-feature=" + (",".join(target_features)) + " "
target_linker = self.get_toml("linker", build_section)
if target_linker is not None:
env["RUSTFLAGS"] += "-C linker=" + target_linker + " "
env["PATH"] = os.path.join(self.bin_root(), "bin") + \
os.pathsep + env["PATH"]
if not os.path.isfile(self.cargo()):
raise Exception("no cargo executable found at `{}`".format(
self.cargo()))
args = [self.cargo(), "build", "--manifest-path",
os.path.join(self.rust_root, "src/bootstrap/Cargo.toml")]
for _ in range(1, self.verbose):
args.append("--verbose")
if self.use_locked_deps:
args.append("--locked")
if self.use_vendored_sources:
args.append("--frozen")
run(args, env=env, verbose=self.verbose)
def build_triple(self):
"""Build triple as in LLVM"""
config = self.get_toml('build')
if config:
return config
return default_build_triple()
def check_submodule(self, module, slow_submodules):
if not slow_submodules:
checked_out = subprocess.Popen(["git", "rev-parse", "HEAD"],
cwd=os.path.join(self.rust_root, module),
stdout=subprocess.PIPE)
return checked_out
else:
return None
def update_submodule(self, module, checked_out, recorded_submodules):
module_path = os.path.join(self.rust_root, module)
if checked_out != None:
default_encoding = sys.getdefaultencoding()
checked_out = checked_out.communicate()[0].decode(default_encoding).strip()
if recorded_submodules[module] == checked_out:
return
print("Updating submodule", module)
run(["git", "submodule", "-q", "sync", module],
cwd=self.rust_root, verbose=self.verbose)
try:
run(["git", "submodule", "update",
"--init", "--recursive", "--progress", module],
cwd=self.rust_root, verbose=self.verbose, exception=True)
except RuntimeError:
# Some versions of git don't support --progress.
run(["git", "submodule", "update",
"--init", "--recursive", module],
cwd=self.rust_root, verbose=self.verbose)
run(["git", "reset", "-q", "--hard"],
cwd=module_path, verbose=self.verbose)
run(["git", "clean", "-qdfx"],
cwd=module_path, verbose=self.verbose)
def update_submodules(self):
"""Update submodules"""
if (not os.path.exists(os.path.join(self.rust_root, ".git"))) or \
self.get_toml('submodules') == "false":
return
slow_submodules = self.get_toml('fast-submodules') == "false"
start_time = time()
if slow_submodules:
print('Unconditionally updating all submodules')
else:
print('Updating only changed submodules')
default_encoding = sys.getdefaultencoding()
submodules = [s.split(' ', 1)[1] for s in subprocess.check_output(
["git", "config", "--file",
os.path.join(self.rust_root, ".gitmodules"),
"--get-regexp", "path"]
).decode(default_encoding).splitlines()]
filtered_submodules = []
submodules_names = []
for module in submodules:
if module.endswith("llvm-project"):
if self.get_toml('llvm-config') and self.get_toml('lld') != 'true':
continue
if module.endswith("llvm-emscripten"):
backends = self.get_toml('codegen-backends')
if backends is None or not 'emscripten' in backends:
continue
check = self.check_submodule(module, slow_submodules)
filtered_submodules.append((module, check))
submodules_names.append(module)
recorded = subprocess.Popen(["git", "ls-tree", "HEAD"] + submodules_names,
cwd=self.rust_root, stdout=subprocess.PIPE)
recorded = recorded.communicate()[0].decode(default_encoding).strip().splitlines()
recorded_submodules = {}
for data in recorded:
data = data.split()
recorded_submodules[data[3]] = data[2]
for module in filtered_submodules:
self.update_submodule(module[0], module[1], recorded_submodules)
print("Submodules updated in %.2f seconds" % (time() - start_time))
def set_dev_environment(self):
"""Set download URL for development environment"""
self._download_url = 'https://dev-static.rust-lang.org'
def bootstrap(help_triggered):
"""Configure, fetch, build and run the initial bootstrap"""
# If the user is asking for help, let them know that the whole download-and-build
# process has to happen before anything is printed out.
if help_triggered:
print("info: Downloading and building bootstrap before processing --help")
print(" command. See src/bootstrap/README.md for help with common")
print(" commands.")
parser = argparse.ArgumentParser(description='Build rust')
parser.add_argument('--config')
parser.add_argument('--build')
parser.add_argument('--src')
parser.add_argument('--clean', action='store_true')
parser.add_argument('-v', '--verbose', action='count', default=0)
args = [a for a in sys.argv if a != '-h' and a != '--help']
args, _ = parser.parse_known_args(args)
# Configure initial bootstrap
build = RustBuild()
build.rust_root = args.src or os.path.abspath(os.path.join(__file__, '../../..'))
build.verbose = args.verbose
build.clean = args.clean
try:
with open(args.config or 'config.toml') as config:
build.config_toml = config.read()
except (OSError, IOError):
pass
match = re.search(r'\nverbose = (\d+)', build.config_toml)
if match is not None:
build.verbose = max(build.verbose, int(match.group(1)))
build.use_vendored_sources = '\nvendor = true' in build.config_toml
build.use_locked_deps = '\nlocked-deps = true' in build.config_toml
if 'SUDO_USER' in os.environ and not build.use_vendored_sources:
if os.environ.get('USER') != os.environ['SUDO_USER']:
build.use_vendored_sources = True
print('info: looks like you are running this command under `sudo`')
print(' and so in order to preserve your $HOME this will now')
print(' use vendored sources by default. Note that if this')
print(' does not work you should run a normal build first')
print(' before running a command like `sudo ./x.py install`')
if build.use_vendored_sources:
if not os.path.exists('.cargo'):
os.makedirs('.cargo')
with output('.cargo/config') as cargo_config:
cargo_config.write("""
[source.crates-io]
replace-with = 'vendored-sources'
registry = 'https://example.com'
[source.vendored-sources]
directory = '{}/vendor'
""".format(build.rust_root))
else:
if os.path.exists('.cargo'):
shutil.rmtree('.cargo')
data = stage0_data(build.rust_root)
build.date = data['date']
build.rustc_channel = data['rustc']
build.cargo_channel = data['cargo']
if 'dev' in data:
build.set_dev_environment()
build.update_submodules()
# Fetch/build the bootstrap
build.build = args.build or build.build_triple()
build.download_stage0()
sys.stdout.flush()
build.build_bootstrap()
sys.stdout.flush()
# Run the bootstrap
args = [build.bootstrap_binary()]
args.extend(sys.argv[1:])
env = os.environ.copy()
env["BUILD"] = build.build
env["SRC"] = build.rust_root
env["BOOTSTRAP_PARENT_ID"] = str(os.getpid())
env["BOOTSTRAP_PYTHON"] = sys.executable
env["BUILD_DIR"] = build.build_dir
env["RUSTC_BOOTSTRAP"] = '1'
env["CARGO"] = build.cargo()
env["RUSTC"] = build.rustc()
run(args, env=env, verbose=build.verbose)
def main():
"""Entry point for the bootstrap process"""
start_time = time()
# x.py help <cmd> ...
if len(sys.argv) > 1 and sys.argv[1] == 'help':
sys.argv = [sys.argv[0], '-h'] + sys.argv[2:]
help_triggered = (
'-h' in sys.argv) or ('--help' in sys.argv) or (len(sys.argv) == 1)
try:
bootstrap(help_triggered)
if not help_triggered:
print("Build completed successfully in {}".format(
format_build_time(time() - start_time)))
except (SystemExit, KeyboardInterrupt) as error:
if hasattr(error, 'code') and isinstance(error.code, int):
exit_code = error.code
else:
exit_code = 1
print(error)
if not help_triggered:
print("Build completed unsuccessfully in {}".format(
format_build_time(time() - start_time)))
sys.exit(exit_code)
if __name__ == '__main__':
main()
| 36.66474 | 96 | 0.57632 | from __future__ import absolute_import, division, print_function
import argparse
import contextlib
import datetime
import hashlib
import os
import re
import shutil
import subprocess
import sys
import tarfile
import tempfile
from time import time
def get(url, path, verbose=False):
suffix = '.sha256'
sha_url = url + suffix
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
temp_path = temp_file.name
with tempfile.NamedTemporaryFile(suffix=suffix, delete=False) as sha_file:
sha_path = sha_file.name
try:
download(sha_path, sha_url, False, verbose)
if os.path.exists(path):
if verify(path, sha_path, False):
if verbose:
print("using already-download file", path)
return
else:
if verbose:
print("ignoring already-download file",
path, "due to failed verification")
os.unlink(path)
download(temp_path, url, True, verbose)
if not verify(temp_path, sha_path, verbose):
raise RuntimeError("failed verification")
if verbose:
print("moving {} to {}".format(temp_path, path))
shutil.move(temp_path, path)
finally:
delete_if_present(sha_path, verbose)
delete_if_present(temp_path, verbose)
def delete_if_present(path, verbose):
if os.path.isfile(path):
if verbose:
print("removing", path)
os.unlink(path)
def download(path, url, probably_big, verbose):
for _ in range(0, 4):
try:
_download(path, url, probably_big, verbose, True)
return
except RuntimeError:
print("\nspurious failure, trying again")
_download(path, url, probably_big, verbose, False)
def _download(path, url, probably_big, verbose, exception):
if probably_big or verbose:
print("downloading {}".format(url))
if sys.platform == 'win32':
run(["PowerShell.exe", "/nologo", "-Command",
"[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12;",
"(New-Object System.Net.WebClient).DownloadFile('{}', '{}')".format(url, path)],
verbose=verbose,
exception=exception)
else:
if probably_big or verbose:
option = "-#"
else:
option = "-s"
run(["curl", option,
"-y", "30", "-Y", "10",
"--connect-timeout", "30",
"--retry", "3", "-Sf", "-o", path, url],
verbose=verbose,
exception=exception)
def verify(path, sha_path, verbose):
if verbose:
print("verifying", path)
with open(path, "rb") as source:
found = hashlib.sha256(source.read()).hexdigest()
with open(sha_path, "r") as sha256sum:
expected = sha256sum.readline().split()[0]
verified = found == expected
if not verified:
print("invalid checksum:\n"
" found: {}\n"
" expected: {}".format(found, expected))
return verified
def unpack(tarball, dst, verbose=False, match=None):
print("extracting", tarball)
fname = os.path.basename(tarball).replace(".tar.gz", "")
with contextlib.closing(tarfile.open(tarball)) as tar:
for member in tar.getnames():
if "/" not in member:
continue
name = member.replace(fname + "/", "", 1)
if match is not None and not name.startswith(match):
continue
name = name[len(match) + 1:]
dst_path = os.path.join(dst, name)
if verbose:
print(" extracting", member)
tar.extract(member, dst)
src_path = os.path.join(dst, member)
if os.path.isdir(src_path) and os.path.exists(dst_path):
continue
shutil.move(src_path, dst_path)
shutil.rmtree(os.path.join(dst, fname))
def run(args, verbose=False, exception=False, **kwargs):
if verbose:
print("running: " + ' '.join(args))
sys.stdout.flush()
ret = subprocess.Popen(args, **kwargs)
code = ret.wait()
if code != 0:
err = "failed to run: " + ' '.join(args)
if verbose or exception:
raise RuntimeError(err)
sys.exit(err)
def stage0_data(rust_root):
nightlies = os.path.join(rust_root, "src/stage0.txt")
with open(nightlies, 'r') as nightlies:
lines = [line.rstrip() for line in nightlies
if not line.startswith("#")]
return dict([line.split(": ", 1) for line in lines if line])
def format_build_time(duration):
return str(datetime.timedelta(seconds=int(duration)))
def default_build_triple():
default_encoding = sys.getdefaultencoding()
try:
ostype = subprocess.check_output(
['uname', '-s']).strip().decode(default_encoding)
cputype = subprocess.check_output(
['uname', '-m']).strip().decode(default_encoding)
except (subprocess.CalledProcessError, OSError):
if sys.platform == 'win32':
return 'x86_64-pc-windows-msvc'
err = "uname not found"
sys.exit(err)
ostype_mapper = {
'Darwin': 'apple-darwin',
'DragonFly': 'unknown-dragonfly',
'FreeBSD': 'unknown-freebsd',
'Haiku': 'unknown-haiku',
'NetBSD': 'unknown-netbsd',
'OpenBSD': 'unknown-openbsd'
}
# Consider the direct transformation first and then the special cases
if ostype in ostype_mapper:
ostype = ostype_mapper[ostype]
elif ostype == 'Linux':
os_from_sp = subprocess.check_output(
['uname', '-o']).strip().decode(default_encoding)
if os_from_sp == 'Android':
ostype = 'linux-android'
else:
ostype = 'unknown-linux-gnu'
elif ostype == 'SunOS':
ostype = 'sun-solaris'
# On Solaris, uname -m will return a machine classification instead
# of a cpu type, so uname -p is recommended instead. However, the
# output from that option is too generic for our purposes (it will
# always emit 'i386' on x86/amd64 systems). As such, isainfo -k
# must be used instead.
try:
cputype = subprocess.check_output(
['isainfo', '-k']).strip().decode(default_encoding)
except (subprocess.CalledProcessError, OSError):
err = "isainfo not found"
sys.exit(err)
elif ostype.startswith('MINGW'):
# msys' `uname` does not print gcc configuration, but prints msys
ostype = 'pc-windows-gnu'
cputype = 'i686'
if os.environ.get('MSYSTEM') == 'MINGW64':
cputype = 'x86_64'
elif ostype.startswith('MSYS'):
ostype = 'pc-windows-gnu'
elif ostype.startswith('CYGWIN_NT'):
cputype = 'i686'
if ostype.endswith('WOW64'):
cputype = 'x86_64'
ostype = 'pc-windows-gnu'
else:
err = "unknown OS type: {}".format(ostype)
sys.exit(err)
if cputype == 'powerpc' and ostype == 'unknown-freebsd':
cputype = subprocess.check_output(
['uname', '-p']).strip().decode(default_encoding)
cputype_mapper = {
'BePC': 'i686',
'aarch64': 'aarch64',
'amd64': 'x86_64',
'arm64': 'aarch64',
'i386': 'i686',
'i486': 'i686',
'i686': 'i686',
'i786': 'i686',
'powerpc': 'powerpc',
'powerpc64': 'powerpc64',
'powerpc64le': 'powerpc64le',
'ppc': 'powerpc',
'ppc64': 'powerpc64',
'ppc64le': 'powerpc64le',
's390x': 's390x',
'x64': 'x86_64',
'x86': 'i686',
'x86-64': 'x86_64',
'x86_64': 'x86_64'
}
if cputype in cputype_mapper:
cputype = cputype_mapper[cputype]
elif cputype in {'xscale', 'arm'}:
cputype = 'arm'
if ostype == 'linux-android':
ostype = 'linux-androideabi'
elif ostype == 'unknown-freebsd':
cputype = subprocess.check_output(
['uname', '-p']).strip().decode(default_encoding)
ostype = 'unknown-freebsd'
elif cputype == 'armv6l':
cputype = 'arm'
if ostype == 'linux-android':
ostype = 'linux-androideabi'
else:
ostype += 'eabihf'
elif cputype in {'armv7l', 'armv8l'}:
cputype = 'armv7'
if ostype == 'linux-android':
ostype = 'linux-androideabi'
else:
ostype += 'eabihf'
elif cputype == 'mips':
if sys.byteorder == 'big':
cputype = 'mips'
elif sys.byteorder == 'little':
cputype = 'mipsel'
else:
raise ValueError("unknown byteorder: {}".format(sys.byteorder))
elif cputype == 'mips64':
if sys.byteorder == 'big':
cputype = 'mips64'
elif sys.byteorder == 'little':
cputype = 'mips64el'
else:
raise ValueError('unknown byteorder: {}'.format(sys.byteorder))
ostype += 'abi64'
elif cputype == 'sparc' or cputype == 'sparcv9' or cputype == 'sparc64':
pass
else:
err = "unknown cpu type: {}".format(cputype)
sys.exit(err)
return "{}-{}".format(cputype, ostype)
@contextlib.contextmanager
def output(filepath):
tmp = filepath + '.tmp'
with open(tmp, 'w') as f:
yield f
try:
os.remove(filepath)
os.rename(tmp, filepath)
except OSError:
shutil.copy2(tmp, filepath)
os.remove(tmp)
class RustBuild(object):
def __init__(self):
self.cargo_channel = ''
self.date = ''
self._download_url = 'https://static.rust-lang.org'
self.rustc_channel = ''
self.build = ''
self.build_dir = os.path.join(os.getcwd(), "build")
self.clean = False
self.config_toml = ''
self.rust_root = ''
self.use_locked_deps = ''
self.use_vendored_sources = ''
self.verbose = False
def download_stage0(self):
rustc_channel = self.rustc_channel
cargo_channel = self.cargo_channel
if self.rustc().startswith(self.bin_root()) and \
(not os.path.exists(self.rustc()) or
self.program_out_of_date(self.rustc_stamp())):
if os.path.exists(self.bin_root()):
shutil.rmtree(self.bin_root())
filename = "rust-std-{}-{}.tar.gz".format(
rustc_channel, self.build)
pattern = "rust-std-{}".format(self.build)
self._download_stage0_helper(filename, pattern)
filename = "rustc-{}-{}.tar.gz".format(rustc_channel, self.build)
self._download_stage0_helper(filename, "rustc")
self.fix_executable("{}/bin/rustc".format(self.bin_root()))
self.fix_executable("{}/bin/rustdoc".format(self.bin_root()))
with output(self.rustc_stamp()) as rust_stamp:
rust_stamp.write(self.date)
# libraries/binaries that are included in rust-std with
# the system MinGW ones.
if "pc-windows-gnu" in self.build:
filename = "rust-mingw-{}-{}.tar.gz".format(
rustc_channel, self.build)
self._download_stage0_helper(filename, "rust-mingw")
if self.cargo().startswith(self.bin_root()) and \
(not os.path.exists(self.cargo()) or
self.program_out_of_date(self.cargo_stamp())):
filename = "cargo-{}-{}.tar.gz".format(cargo_channel, self.build)
self._download_stage0_helper(filename, "cargo")
self.fix_executable("{}/bin/cargo".format(self.bin_root()))
with output(self.cargo_stamp()) as cargo_stamp:
cargo_stamp.write(self.date)
def _download_stage0_helper(self, filename, pattern):
cache_dst = os.path.join(self.build_dir, "cache")
rustc_cache = os.path.join(cache_dst, self.date)
if not os.path.exists(rustc_cache):
os.makedirs(rustc_cache)
url = "{}/dist/{}".format(self._download_url, self.date)
tarball = os.path.join(rustc_cache, filename)
if not os.path.exists(tarball):
get("{}/{}".format(url, filename), tarball, verbose=self.verbose)
unpack(tarball, self.bin_root(), match=pattern, verbose=self.verbose)
@staticmethod
def fix_executable(fname):
default_encoding = sys.getdefaultencoding()
try:
ostype = subprocess.check_output(
['uname', '-s']).strip().decode(default_encoding)
except subprocess.CalledProcessError:
return
except OSError as reason:
if getattr(reason, 'winerror', None) is not None:
return
raise reason
if ostype != "Linux":
return
if not os.path.exists("/etc/NIXOS"):
return
if os.path.exists("/lib"):
return
# At this point we're pretty sure the user is running NixOS
nix_os_msg = "info: you seem to be running NixOS. Attempting to patch"
print(nix_os_msg, fname)
try:
interpreter = subprocess.check_output(
["patchelf", "--print-interpreter", fname])
interpreter = interpreter.strip().decode(default_encoding)
except subprocess.CalledProcessError as reason:
print("warning: failed to call patchelf:", reason)
return
loader = interpreter.split("/")[-1]
try:
ldd_output = subprocess.check_output(
['ldd', '/run/current-system/sw/bin/sh'])
ldd_output = ldd_output.strip().decode(default_encoding)
except subprocess.CalledProcessError as reason:
print("warning: unable to call ldd:", reason)
return
for line in ldd_output.splitlines():
libname = line.split()[0]
if libname.endswith(loader):
loader_path = libname[:len(libname) - len(loader)]
break
else:
print("warning: unable to find the path to the dynamic linker")
return
correct_interpreter = loader_path + loader
try:
subprocess.check_output(
["patchelf", "--set-interpreter", correct_interpreter, fname])
except subprocess.CalledProcessError as reason:
print("warning: failed to call patchelf:", reason)
return
def rustc_stamp(self):
return os.path.join(self.bin_root(), '.rustc-stamp')
def cargo_stamp(self):
return os.path.join(self.bin_root(), '.cargo-stamp')
def program_out_of_date(self, stamp_path):
if not os.path.exists(stamp_path) or self.clean:
return True
with open(stamp_path, 'r') as stamp:
return self.date != stamp.read()
def bin_root(self):
return os.path.join(self.build_dir, self.build, "stage0")
def get_toml(self, key, section=None):
cur_section = None
for line in self.config_toml.splitlines():
section_match = re.match(r'^\s*\[(.*)\]\s*$', line)
if section_match is not None:
cur_section = section_match.group(1)
match = re.match(r'^{}\s*=(.*)$'.format(key), line)
if match is not None:
value = match.group(1)
if section is None or section == cur_section:
return self.get_string(value) or value.strip()
return None
def cargo(self):
return self.program_config('cargo')
def rustc(self):
return self.program_config('rustc')
def program_config(self, program):
config = self.get_toml(program)
if config:
return os.path.expanduser(config)
return os.path.join(self.bin_root(), "bin", "{}{}".format(
program, self.exe_suffix()))
@staticmethod
def get_string(line):
start = line.find('"')
if start != -1:
end = start + 1 + line[start + 1:].find('"')
return line[start + 1:end]
start = line.find('\'')
if start != -1:
end = start + 1 + line[start + 1:].find('\'')
return line[start + 1:end]
return None
@staticmethod
def exe_suffix():
if sys.platform == 'win32':
return '.exe'
return ''
def bootstrap_binary(self):
return os.path.join(self.build_dir, "bootstrap", "debug", "bootstrap")
def build_bootstrap(self):
build_dir = os.path.join(self.build_dir, "bootstrap")
if self.clean and os.path.exists(build_dir):
shutil.rmtree(build_dir)
env = os.environ.copy()
env["RUSTC_BOOTSTRAP"] = '1'
env["CARGO_TARGET_DIR"] = build_dir
env["RUSTC"] = self.rustc()
env["LD_LIBRARY_PATH"] = os.path.join(self.bin_root(), "lib") + \
(os.pathsep + env["LD_LIBRARY_PATH"]) \
if "LD_LIBRARY_PATH" in env else ""
env["DYLD_LIBRARY_PATH"] = os.path.join(self.bin_root(), "lib") + \
(os.pathsep + env["DYLD_LIBRARY_PATH"]) \
if "DYLD_LIBRARY_PATH" in env else ""
env["LIBRARY_PATH"] = os.path.join(self.bin_root(), "lib") + \
(os.pathsep + env["LIBRARY_PATH"]) \
if "LIBRARY_PATH" in env else ""
env["RUSTFLAGS"] = "-Cdebuginfo=2 "
build_section = "target.{}".format(self.build_triple())
target_features = []
if self.get_toml("crt-static", build_section) == "true":
target_features += ["+crt-static"]
elif self.get_toml("crt-static", build_section) == "false":
target_features += ["-crt-static"]
if target_features:
env["RUSTFLAGS"] += "-C target-feature=" + (",".join(target_features)) + " "
target_linker = self.get_toml("linker", build_section)
if target_linker is not None:
env["RUSTFLAGS"] += "-C linker=" + target_linker + " "
env["PATH"] = os.path.join(self.bin_root(), "bin") + \
os.pathsep + env["PATH"]
if not os.path.isfile(self.cargo()):
raise Exception("no cargo executable found at `{}`".format(
self.cargo()))
args = [self.cargo(), "build", "--manifest-path",
os.path.join(self.rust_root, "src/bootstrap/Cargo.toml")]
for _ in range(1, self.verbose):
args.append("--verbose")
if self.use_locked_deps:
args.append("--locked")
if self.use_vendored_sources:
args.append("--frozen")
run(args, env=env, verbose=self.verbose)
def build_triple(self):
config = self.get_toml('build')
if config:
return config
return default_build_triple()
def check_submodule(self, module, slow_submodules):
if not slow_submodules:
checked_out = subprocess.Popen(["git", "rev-parse", "HEAD"],
cwd=os.path.join(self.rust_root, module),
stdout=subprocess.PIPE)
return checked_out
else:
return None
def update_submodule(self, module, checked_out, recorded_submodules):
module_path = os.path.join(self.rust_root, module)
if checked_out != None:
default_encoding = sys.getdefaultencoding()
checked_out = checked_out.communicate()[0].decode(default_encoding).strip()
if recorded_submodules[module] == checked_out:
return
print("Updating submodule", module)
run(["git", "submodule", "-q", "sync", module],
cwd=self.rust_root, verbose=self.verbose)
try:
run(["git", "submodule", "update",
"--init", "--recursive", "--progress", module],
cwd=self.rust_root, verbose=self.verbose, exception=True)
except RuntimeError:
run(["git", "submodule", "update",
"--init", "--recursive", module],
cwd=self.rust_root, verbose=self.verbose)
run(["git", "reset", "-q", "--hard"],
cwd=module_path, verbose=self.verbose)
run(["git", "clean", "-qdfx"],
cwd=module_path, verbose=self.verbose)
def update_submodules(self):
if (not os.path.exists(os.path.join(self.rust_root, ".git"))) or \
self.get_toml('submodules') == "false":
return
slow_submodules = self.get_toml('fast-submodules') == "false"
start_time = time()
if slow_submodules:
print('Unconditionally updating all submodules')
else:
print('Updating only changed submodules')
default_encoding = sys.getdefaultencoding()
submodules = [s.split(' ', 1)[1] for s in subprocess.check_output(
["git", "config", "--file",
os.path.join(self.rust_root, ".gitmodules"),
"--get-regexp", "path"]
).decode(default_encoding).splitlines()]
filtered_submodules = []
submodules_names = []
for module in submodules:
if module.endswith("llvm-project"):
if self.get_toml('llvm-config') and self.get_toml('lld') != 'true':
continue
if module.endswith("llvm-emscripten"):
backends = self.get_toml('codegen-backends')
if backends is None or not 'emscripten' in backends:
continue
check = self.check_submodule(module, slow_submodules)
filtered_submodules.append((module, check))
submodules_names.append(module)
recorded = subprocess.Popen(["git", "ls-tree", "HEAD"] + submodules_names,
cwd=self.rust_root, stdout=subprocess.PIPE)
recorded = recorded.communicate()[0].decode(default_encoding).strip().splitlines()
recorded_submodules = {}
for data in recorded:
data = data.split()
recorded_submodules[data[3]] = data[2]
for module in filtered_submodules:
self.update_submodule(module[0], module[1], recorded_submodules)
print("Submodules updated in %.2f seconds" % (time() - start_time))
def set_dev_environment(self):
self._download_url = 'https://dev-static.rust-lang.org'
def bootstrap(help_triggered):
# If the user is asking for help, let them know that the whole download-and-build
# process has to happen before anything is printed out.
if help_triggered:
print("info: Downloading and building bootstrap before processing --help")
print(" command. See src/bootstrap/README.md for help with common")
print(" commands.")
parser = argparse.ArgumentParser(description='Build rust')
parser.add_argument('--config')
parser.add_argument('--build')
parser.add_argument('--src')
parser.add_argument('--clean', action='store_true')
parser.add_argument('-v', '--verbose', action='count', default=0)
args = [a for a in sys.argv if a != '-h' and a != '--help']
args, _ = parser.parse_known_args(args)
# Configure initial bootstrap
build = RustBuild()
build.rust_root = args.src or os.path.abspath(os.path.join(__file__, '../../..'))
build.verbose = args.verbose
build.clean = args.clean
try:
with open(args.config or 'config.toml') as config:
build.config_toml = config.read()
except (OSError, IOError):
pass
match = re.search(r'\nverbose = (\d+)', build.config_toml)
if match is not None:
build.verbose = max(build.verbose, int(match.group(1)))
build.use_vendored_sources = '\nvendor = true' in build.config_toml
build.use_locked_deps = '\nlocked-deps = true' in build.config_toml
if 'SUDO_USER' in os.environ and not build.use_vendored_sources:
if os.environ.get('USER') != os.environ['SUDO_USER']:
build.use_vendored_sources = True
print('info: looks like you are running this command under `sudo`')
print(' and so in order to preserve your $HOME this will now')
print(' use vendored sources by default. Note that if this')
print(' does not work you should run a normal build first')
print(' before running a command like `sudo ./x.py install`')
if build.use_vendored_sources:
if not os.path.exists('.cargo'):
os.makedirs('.cargo')
with output('.cargo/config') as cargo_config:
cargo_config.write("""
[source.crates-io]
replace-with = 'vendored-sources'
registry = 'https://example.com'
[source.vendored-sources]
directory = '{}/vendor'
""".format(build.rust_root))
else:
if os.path.exists('.cargo'):
shutil.rmtree('.cargo')
data = stage0_data(build.rust_root)
build.date = data['date']
build.rustc_channel = data['rustc']
build.cargo_channel = data['cargo']
if 'dev' in data:
build.set_dev_environment()
build.update_submodules()
# Fetch/build the bootstrap
build.build = args.build or build.build_triple()
build.download_stage0()
sys.stdout.flush()
build.build_bootstrap()
sys.stdout.flush()
# Run the bootstrap
args = [build.bootstrap_binary()]
args.extend(sys.argv[1:])
env = os.environ.copy()
env["BUILD"] = build.build
env["SRC"] = build.rust_root
env["BOOTSTRAP_PARENT_ID"] = str(os.getpid())
env["BOOTSTRAP_PYTHON"] = sys.executable
env["BUILD_DIR"] = build.build_dir
env["RUSTC_BOOTSTRAP"] = '1'
env["CARGO"] = build.cargo()
env["RUSTC"] = build.rustc()
run(args, env=env, verbose=build.verbose)
def main():
start_time = time()
# x.py help <cmd> ...
if len(sys.argv) > 1 and sys.argv[1] == 'help':
sys.argv = [sys.argv[0], '-h'] + sys.argv[2:]
help_triggered = (
'-h' in sys.argv) or ('--help' in sys.argv) or (len(sys.argv) == 1)
try:
bootstrap(help_triggered)
if not help_triggered:
print("Build completed successfully in {}".format(
format_build_time(time() - start_time)))
except (SystemExit, KeyboardInterrupt) as error:
if hasattr(error, 'code') and isinstance(error.code, int):
exit_code = error.code
else:
exit_code = 1
print(error)
if not help_triggered:
print("Build completed unsuccessfully in {}".format(
format_build_time(time() - start_time)))
sys.exit(exit_code)
if __name__ == '__main__':
main()
| true | true |
1c2b88ed5ec3568339f2f644baf1031a78b8c89f | 12,105 | py | Python | sdks/python/client/argo_workflows/model/azure_file_volume_source.py | BearerPipelineTest/argo-workflows | ecd91b1c4215a2ab8742f7c43eaade98a1d47eba | [
"Apache-2.0"
] | 1 | 2022-02-24T01:45:03.000Z | 2022-02-24T01:45:03.000Z | sdks/python/client/argo_workflows/model/azure_file_volume_source.py | BearerPipelineTest/argo-workflows | ecd91b1c4215a2ab8742f7c43eaade98a1d47eba | [
"Apache-2.0"
] | 18 | 2022-02-01T23:09:58.000Z | 2022-03-31T23:28:41.000Z | sdks/python/client/argo_workflows/model/azure_file_volume_source.py | BearerPipelineTest/argo-workflows | ecd91b1c4215a2ab8742f7c43eaade98a1d47eba | [
"Apache-2.0"
] | null | null | null | """
Argo Workflows API
Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. For more information, please see https://argoproj.github.io/argo-workflows/ # noqa: E501
The version of the OpenAPI document: VERSION
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from argo_workflows.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from argo_workflows.exceptions import ApiAttributeError
class AzureFileVolumeSource(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'secret_name': (str,), # noqa: E501
'share_name': (str,), # noqa: E501
'read_only': (bool,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'secret_name': 'secretName', # noqa: E501
'share_name': 'shareName', # noqa: E501
'read_only': 'readOnly', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, secret_name, share_name, *args, **kwargs): # noqa: E501
"""AzureFileVolumeSource - a model defined in OpenAPI
Args:
secret_name (str): the name of secret that contains Azure Storage Account Name and Key
share_name (str): Share Name
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
read_only (bool): Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.secret_name = secret_name
self.share_name = share_name
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, secret_name, share_name, *args, **kwargs): # noqa: E501
"""AzureFileVolumeSource - a model defined in OpenAPI
Args:
secret_name (str): the name of secret that contains Azure Storage Account Name and Key
share_name (str): Share Name
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
read_only (bool): Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.secret_name = secret_name
self.share_name = share_name
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 44.503676 | 206 | 0.580008 |
import re
import sys
from argo_workflows.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from argo_workflows.exceptions import ApiAttributeError
class AzureFileVolumeSource(ModelNormal):
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
return (bool, date, datetime, dict, float, int, list, str, none_type,)
_nullable = False
@cached_property
def openapi_types():
return {
'secret_name': (str,),
'share_name': (str,),
'read_only': (bool,),
}
@cached_property
def discriminator():
return None
attribute_map = {
'secret_name': 'secretName',
'share_name': 'shareName',
'read_only': 'readOnly',
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, secret_name, share_name, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.secret_name = secret_name
self.share_name = share_name
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, secret_name, share_name, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.secret_name = secret_name
self.share_name = share_name
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| true | true |
1c2b88fb227d84629ce8a214ac3e7fa19aff31d5 | 2,527 | py | Python | src/datamodules/num_datamodule.py | kevin3314/gcn_ppi | 39b0e618bbb592f9cb8d37edf28deeb7c0987dad | [
"MIT"
] | null | null | null | src/datamodules/num_datamodule.py | kevin3314/gcn_ppi | 39b0e618bbb592f9cb8d37edf28deeb7c0987dad | [
"MIT"
] | 1 | 2021-12-08T02:47:10.000Z | 2021-12-08T02:47:10.000Z | src/datamodules/num_datamodule.py | kevin3314/gcn_ppi | 39b0e618bbb592f9cb8d37edf28deeb7c0987dad | [
"MIT"
] | null | null | null | from pathlib import Path
from typing import Optional, Union
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, Dataset
from src.datamodules.datasets.num_dataset import NumDataset
class NumDatasetModule(LightningDataModule):
def __init__(
self,
train_csv_path: Union[str, Path],
valid_csv_path: Union[str, Path],
test_csv_path: Union[str, Path],
feature_tsv_path: Union[str, Path],
batch_size: int = 32,
num_workers: int = 0,
pin_memory: bool = False,
**kwargs,
):
"""
Args:
data_dir (Union[str, Path]): Data dir to load.
k (int, optional): The number of neighbor nodes. Defaults to 5.
batch_size (int, optional): Batch sizes. Defaults to 32.
num_workers (int, optional): The number of workers. Defaults to 0.
pin_memory (bool, optional): Defaults to False.
"""
super().__init__()
self.train_csv_path = Path(train_csv_path)
self.valid_csv_path = Path(valid_csv_path)
self.test_csv_path = Path(test_csv_path)
self.batch_size = batch_size
self.num_workers = num_workers
self.pin_memory = pin_memory
self.feature_tsv_path = Path(feature_tsv_path)
self.train_ds: Optional[Dataset] = None
self.valid_ds: Optional[Dataset] = None
self.test_ds: Optional[Dataset] = None
def setup(self, stage: Optional[str] = None):
"""Load data"""
self.train_ds = NumDataset(self.train_csv_path, self.feature_tsv_path)
self.valid_ds = NumDataset(self.valid_csv_path, self.feature_tsv_path)
self.test_ds = NumDataset(self.test_csv_path, self.feature_tsv_path)
def train_dataloader(self):
return DataLoader(
dataset=self.train_ds,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
shuffle=True,
)
def val_dataloader(self):
return DataLoader(
dataset=self.valid_ds,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
shuffle=False,
)
def test_dataloader(self):
return DataLoader(
dataset=self.test_ds,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
shuffle=False,
)
| 33.25 | 78 | 0.629205 | from pathlib import Path
from typing import Optional, Union
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, Dataset
from src.datamodules.datasets.num_dataset import NumDataset
class NumDatasetModule(LightningDataModule):
def __init__(
self,
train_csv_path: Union[str, Path],
valid_csv_path: Union[str, Path],
test_csv_path: Union[str, Path],
feature_tsv_path: Union[str, Path],
batch_size: int = 32,
num_workers: int = 0,
pin_memory: bool = False,
**kwargs,
):
super().__init__()
self.train_csv_path = Path(train_csv_path)
self.valid_csv_path = Path(valid_csv_path)
self.test_csv_path = Path(test_csv_path)
self.batch_size = batch_size
self.num_workers = num_workers
self.pin_memory = pin_memory
self.feature_tsv_path = Path(feature_tsv_path)
self.train_ds: Optional[Dataset] = None
self.valid_ds: Optional[Dataset] = None
self.test_ds: Optional[Dataset] = None
def setup(self, stage: Optional[str] = None):
self.train_ds = NumDataset(self.train_csv_path, self.feature_tsv_path)
self.valid_ds = NumDataset(self.valid_csv_path, self.feature_tsv_path)
self.test_ds = NumDataset(self.test_csv_path, self.feature_tsv_path)
def train_dataloader(self):
return DataLoader(
dataset=self.train_ds,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
shuffle=True,
)
def val_dataloader(self):
return DataLoader(
dataset=self.valid_ds,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
shuffle=False,
)
def test_dataloader(self):
return DataLoader(
dataset=self.test_ds,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
shuffle=False,
)
| true | true |
1c2b897085353aa185789b82ad3aa2c503b3a00a | 3,928 | py | Python | discordbot/stocks/technical_analysis/kc.py | CameronBeebe/GamestonkTerminal | e235f09290fbc188566643e5a7be46298d33ac35 | [
"MIT"
] | 1 | 2021-12-01T02:54:28.000Z | 2021-12-01T02:54:28.000Z | discordbot/stocks/technical_analysis/kc.py | CameronBeebe/GamestonkTerminal | e235f09290fbc188566643e5a7be46298d33ac35 | [
"MIT"
] | null | null | null | discordbot/stocks/technical_analysis/kc.py | CameronBeebe/GamestonkTerminal | e235f09290fbc188566643e5a7be46298d33ac35 | [
"MIT"
] | null | null | null | import discord
import config_discordbot as cfg
from discordbot import gst_imgur
from datetime import datetime, timedelta
from matplotlib import pyplot as plt
import os
import helpers
from gamestonk_terminal.helper_funcs import plot_autoscale
from gamestonk_terminal.common.technical_analysis import volatility_model
from gamestonk_terminal.config_plot import PLOT_DPI
async def kc_command(
ctx, ticker="", length="20", scalar="2", mamode="sma", offset="0", start="", end=""
):
"""Displays chart with keltner channel [Yahoo Finance]"""
try:
# Debug
if cfg.DEBUG:
print(
f"!stocks.ta.kc {ticker} {length} {scalar} {mamode} {offset} {start} {end}"
)
# Check for argument
possible_ma = ["sma", "ema", "wma", "hma", "zlma"]
if ticker == "":
raise Exception("Stock ticker is required")
if start == "":
start = datetime.now() - timedelta(days=365)
else:
start = datetime.strptime(start, cfg.DATE_FORMAT)
if end == "":
end = datetime.now()
else:
end = datetime.strptime(end, cfg.DATE_FORMAT)
if not length.lstrip("-").isnumeric():
raise Exception("Number has to be an integer")
length = float(length)
if not scalar.lstrip("-").isnumeric():
raise Exception("Number has to be an integer")
scalar = float(scalar)
if not offset.lstrip("-").isnumeric():
raise Exception("Number has to be an integer")
offset = float(offset)
if mamode not in possible_ma:
raise Exception("Invalid ma entered")
ticker = ticker.upper()
df_stock = helpers.load(ticker, start)
if df_stock.empty:
raise Exception("Stock ticker is invalid")
# Retrieve Data
df_stock = df_stock.loc[(df_stock.index >= start) & (df_stock.index < end)]
df_ta = volatility_model.kc("1440min", df_stock, length, scalar, mamode, offset)
# Output Data
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
ax.plot(df_stock.index, df_stock["Adj Close"].values, color="fuchsia")
ax.plot(df_ta.index, df_ta.iloc[:, 0].values, "b", lw=1.5, label="upper")
ax.plot(df_ta.index, df_ta.iloc[:, 1].values, "b", lw=1.5, ls="--")
ax.plot(df_ta.index, df_ta.iloc[:, 2].values, "b", lw=1.5, label="lower")
ax.set_title(f"{ticker} Keltner Channels")
ax.set_xlim(df_stock.index[0], df_stock.index[-1])
ax.set_xlabel("Time")
ax.set_ylabel("Price")
ax.legend([ticker, df_ta.columns[0], df_ta.columns[1], df_ta.columns[2]])
ax.fill_between(
df_ta.index,
df_ta.iloc[:, 0].values,
df_ta.iloc[:, 2].values,
alpha=0.1,
color="b",
)
ax.grid(b=True, which="major", color="#666666", linestyle="-")
plt.gcf().autofmt_xdate()
fig.tight_layout(pad=1)
plt.legend()
plt.savefig("ta_kc.png")
uploaded_image = gst_imgur.upload_image("ta_kc.png", title="something")
image_link = uploaded_image.link
if cfg.DEBUG:
print(f"Image URL: {image_link}")
title = "Stocks: Keltner-Channel " + ticker
embed = discord.Embed(title=title, colour=cfg.COLOR)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
embed.set_image(url=image_link)
os.remove("ta_kc.png")
await ctx.send(embed=embed)
except Exception as e:
embed = discord.Embed(
title="ERROR Stocks: Keltner-Channel",
colour=cfg.COLOR,
description=e,
)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
await ctx.send(embed=embed)
| 32.733333 | 91 | 0.588595 | import discord
import config_discordbot as cfg
from discordbot import gst_imgur
from datetime import datetime, timedelta
from matplotlib import pyplot as plt
import os
import helpers
from gamestonk_terminal.helper_funcs import plot_autoscale
from gamestonk_terminal.common.technical_analysis import volatility_model
from gamestonk_terminal.config_plot import PLOT_DPI
async def kc_command(
ctx, ticker="", length="20", scalar="2", mamode="sma", offset="0", start="", end=""
):
try:
if cfg.DEBUG:
print(
f"!stocks.ta.kc {ticker} {length} {scalar} {mamode} {offset} {start} {end}"
)
possible_ma = ["sma", "ema", "wma", "hma", "zlma"]
if ticker == "":
raise Exception("Stock ticker is required")
if start == "":
start = datetime.now() - timedelta(days=365)
else:
start = datetime.strptime(start, cfg.DATE_FORMAT)
if end == "":
end = datetime.now()
else:
end = datetime.strptime(end, cfg.DATE_FORMAT)
if not length.lstrip("-").isnumeric():
raise Exception("Number has to be an integer")
length = float(length)
if not scalar.lstrip("-").isnumeric():
raise Exception("Number has to be an integer")
scalar = float(scalar)
if not offset.lstrip("-").isnumeric():
raise Exception("Number has to be an integer")
offset = float(offset)
if mamode not in possible_ma:
raise Exception("Invalid ma entered")
ticker = ticker.upper()
df_stock = helpers.load(ticker, start)
if df_stock.empty:
raise Exception("Stock ticker is invalid")
df_stock = df_stock.loc[(df_stock.index >= start) & (df_stock.index < end)]
df_ta = volatility_model.kc("1440min", df_stock, length, scalar, mamode, offset)
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
ax.plot(df_stock.index, df_stock["Adj Close"].values, color="fuchsia")
ax.plot(df_ta.index, df_ta.iloc[:, 0].values, "b", lw=1.5, label="upper")
ax.plot(df_ta.index, df_ta.iloc[:, 1].values, "b", lw=1.5, ls="--")
ax.plot(df_ta.index, df_ta.iloc[:, 2].values, "b", lw=1.5, label="lower")
ax.set_title(f"{ticker} Keltner Channels")
ax.set_xlim(df_stock.index[0], df_stock.index[-1])
ax.set_xlabel("Time")
ax.set_ylabel("Price")
ax.legend([ticker, df_ta.columns[0], df_ta.columns[1], df_ta.columns[2]])
ax.fill_between(
df_ta.index,
df_ta.iloc[:, 0].values,
df_ta.iloc[:, 2].values,
alpha=0.1,
color="b",
)
ax.grid(b=True, which="major", color="#666666", linestyle="-")
plt.gcf().autofmt_xdate()
fig.tight_layout(pad=1)
plt.legend()
plt.savefig("ta_kc.png")
uploaded_image = gst_imgur.upload_image("ta_kc.png", title="something")
image_link = uploaded_image.link
if cfg.DEBUG:
print(f"Image URL: {image_link}")
title = "Stocks: Keltner-Channel " + ticker
embed = discord.Embed(title=title, colour=cfg.COLOR)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
embed.set_image(url=image_link)
os.remove("ta_kc.png")
await ctx.send(embed=embed)
except Exception as e:
embed = discord.Embed(
title="ERROR Stocks: Keltner-Channel",
colour=cfg.COLOR,
description=e,
)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
await ctx.send(embed=embed)
| true | true |
1c2b8a06abf17dae9e8a8c3ce166e9aebc12b8e5 | 3,198 | py | Python | examples/secret.py | ironman9356/discord.py | 65084a52df071dd2cabb806321a748a1b7e2af24 | [
"MIT"
] | 1 | 2021-08-28T04:50:31.000Z | 2021-08-28T04:50:31.000Z | examples/secret.py | ironman9356/discord.py | 65084a52df071dd2cabb806321a748a1b7e2af24 | [
"MIT"
] | null | null | null | examples/secret.py | ironman9356/discord.py | 65084a52df071dd2cabb806321a748a1b7e2af24 | [
"MIT"
] | null | null | null | import typing
import discord
from discord.ext import commands
bot = commands.Bot(command_prefix=commands.when_mentioned, description="Nothing to see here!")
# the `hidden` keyword argument hides it from the help command.
@bot.group(hidden=True)
async def secret(ctx: commands.Context):
"""What is this "secret" you speak of?"""
if ctx.invoked_subcommand is None:
await ctx.send("Shh!", delete_after=5)
def create_overwrites(ctx, *objects):
"""This is just a helper function that creates the overwrites for the
voice/text channels.
A `discord.PermissionOverwrite` allows you to determine the permissions
of an object, whether it be a `discord.Role` or a `discord.Member`.
In this case, the `view_channel` permission is being used to hide the channel
from being viewed by whoever does not meet the criteria, thus creating a
secret channel.
"""
# a dict comprehension is being utilised here to set the same permission overwrites
# for each `discord.Role` or `discord.Member`.
overwrites = {obj: discord.PermissionOverwrite(view_channel=True) for obj in objects}
# prevents the default role (@everyone) from viewing the channel
# if it isn't already allowed to view the channel.
overwrites.setdefault(ctx.guild.default_role, discord.PermissionOverwrite(view_channel=False))
# makes sure the client is always allowed to view the channel.
overwrites[ctx.guild.me] = discord.PermissionOverwrite(view_channel=True)
return overwrites
# since these commands rely on guild related features,
# it is best to lock it to be guild-only.
@secret.command()
@commands.guild_only()
async def text(
ctx: commands.GuildContext, name: str, *objects: typing.Union[discord.Role, discord.Member]
):
"""This makes a text channel with a specified name
that is only visible to roles or members that are specified.
"""
overwrites = create_overwrites(ctx, *objects)
await ctx.guild.create_text_channel(
name,
overwrites=overwrites,
topic="Top secret text channel. Any leakage of this channel may result in serious trouble.",
reason="Very secret business.",
)
@secret.command()
@commands.guild_only()
async def voice(
ctx: commands.GuildContext, name: str, *objects: typing.Union[discord.Role, discord.Member]
):
"""This does the same thing as the `text` subcommand
but instead creates a voice channel.
"""
overwrites = create_overwrites(ctx, *objects)
await ctx.guild.create_voice_channel(
name, overwrites=overwrites, reason="Very secret business."
)
@secret.command()
@commands.guild_only()
async def emoji(ctx: commands.GuildContext, emoji: discord.PartialEmoji, *roles: discord.Role):
"""This clones a specified emoji that only specified roles
are allowed to use.
"""
# fetch the emoji asset and read it as bytes.
emoji_bytes = await emoji.read()
# the key parameter here is `roles`, which controls
# what roles are able to use the emoji.
await ctx.guild.create_custom_emoji(
name=emoji.name, image=emoji_bytes, roles=roles, reason="Very secret business."
)
bot.run("token")
| 32.969072 | 100 | 0.719199 | import typing
import discord
from discord.ext import commands
bot = commands.Bot(command_prefix=commands.when_mentioned, description="Nothing to see here!")
@bot.group(hidden=True)
async def secret(ctx: commands.Context):
if ctx.invoked_subcommand is None:
await ctx.send("Shh!", delete_after=5)
def create_overwrites(ctx, *objects):
overwrites = {obj: discord.PermissionOverwrite(view_channel=True) for obj in objects}
overwrites.setdefault(ctx.guild.default_role, discord.PermissionOverwrite(view_channel=False))
# makes sure the client is always allowed to view the channel.
overwrites[ctx.guild.me] = discord.PermissionOverwrite(view_channel=True)
return overwrites
# since these commands rely on guild related features,
# it is best to lock it to be guild-only.
@secret.command()
@commands.guild_only()
async def text(
ctx: commands.GuildContext, name: str, *objects: typing.Union[discord.Role, discord.Member]
):
overwrites = create_overwrites(ctx, *objects)
await ctx.guild.create_text_channel(
name,
overwrites=overwrites,
topic="Top secret text channel. Any leakage of this channel may result in serious trouble.",
reason="Very secret business.",
)
@secret.command()
@commands.guild_only()
async def voice(
ctx: commands.GuildContext, name: str, *objects: typing.Union[discord.Role, discord.Member]
):
overwrites = create_overwrites(ctx, *objects)
await ctx.guild.create_voice_channel(
name, overwrites=overwrites, reason="Very secret business."
)
@secret.command()
@commands.guild_only()
async def emoji(ctx: commands.GuildContext, emoji: discord.PartialEmoji, *roles: discord.Role):
# fetch the emoji asset and read it as bytes.
emoji_bytes = await emoji.read()
# the key parameter here is `roles`, which controls
# what roles are able to use the emoji.
await ctx.guild.create_custom_emoji(
name=emoji.name, image=emoji_bytes, roles=roles, reason="Very secret business."
)
bot.run("token")
| true | true |
1c2b8c273175f9b60343e43eb7cc07fa5f1d8bc0 | 1,106 | py | Python | examples/impls.py | ashwinvin/Tanjun | e16e28a3be7b809762e2cdc583ae9fe9edf8a0ab | [
"BSD-3-Clause"
] | null | null | null | examples/impls.py | ashwinvin/Tanjun | e16e28a3be7b809762e2cdc583ae9fe9edf8a0ab | [
"BSD-3-Clause"
] | null | null | null | examples/impls.py | ashwinvin/Tanjun | e16e28a3be7b809762e2cdc583ae9fe9edf8a0ab | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# cython: language_level=3
"""Placeholder for `proto`'s standard implementations including logic for injecting them."""
import typing
import examples.config
import tanjun
from examples import protos
async def connect_to_database(*args: typing.Any, **kwargs: typing.Any) -> typing.Any:
raise NotImplementedError # this is a stand in for the real implementation which would be imported
class DatabaseImpl:
def __init__(self, connection: typing.Any) -> None:
self._conn = connection
@classmethod
async def connect(cls, config: examples.config.ExampleConfig = tanjun.injected(type=examples.config.ExampleConfig)):
return cls(await connect_to_database(password=config.database_password, url=config.database_url))
async def get_guild_info(self, guild_id: int) -> typing.Optional[protos.GuildConfig]:
raise NotImplementedError
async def get_user_info(self, user_id: int) -> typing.Optional[protos.UserInfo]:
raise NotImplementedError
async def remove_user(self, user_id: int) -> None:
raise NotImplementedError
| 35.677419 | 120 | 0.745027 |
import typing
import examples.config
import tanjun
from examples import protos
async def connect_to_database(*args: typing.Any, **kwargs: typing.Any) -> typing.Any:
raise NotImplementedError
class DatabaseImpl:
def __init__(self, connection: typing.Any) -> None:
self._conn = connection
@classmethod
async def connect(cls, config: examples.config.ExampleConfig = tanjun.injected(type=examples.config.ExampleConfig)):
return cls(await connect_to_database(password=config.database_password, url=config.database_url))
async def get_guild_info(self, guild_id: int) -> typing.Optional[protos.GuildConfig]:
raise NotImplementedError
async def get_user_info(self, user_id: int) -> typing.Optional[protos.UserInfo]:
raise NotImplementedError
async def remove_user(self, user_id: int) -> None:
raise NotImplementedError
| true | true |
1c2b8e04a61867b25fa43d687483bf205d9c6ce0 | 4,308 | py | Python | graphene_neo4j/settings.py | Usama0121/graphene-neo4j | 8d8c5a106b3d41851516eb7334d4f9beb8bb301c | [
"MIT"
] | null | null | null | graphene_neo4j/settings.py | Usama0121/graphene-neo4j | 8d8c5a106b3d41851516eb7334d4f9beb8bb301c | [
"MIT"
] | null | null | null | graphene_neo4j/settings.py | Usama0121/graphene-neo4j | 8d8c5a106b3d41851516eb7334d4f9beb8bb301c | [
"MIT"
] | null | null | null | """
Settings for Graphene are all namespaced in the GRAPHENE setting.
For example your project's `settings.py` file might look like this:
GRAPHENE = {
'SCHEMA': 'my_app.schema.schema'
'MIDDLEWARE': (
'graphene_neo4j.debug.DjangoDebugMiddleware',
)
}
This module provides the `graphene_settings` object, that is used to access
Graphene settings, checking for user settings first, then falling
back to the defaults.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.test.signals import setting_changed
from django.utils import six
try:
import importlib # Available in Python 3.1+
except ImportError:
from django.utils import importlib # Will be removed in Django 1.9
# Copied shamelessly from Django REST Framework
DEFAULTS = {
"SCHEMA": None,
"SCHEMA_OUTPUT": "schema.json",
"SCHEMA_INDENT": 2,
"MIDDLEWARE": (),
# Set to True if the connection fields must have
# either the first or last argument
"RELAY_CONNECTION_ENFORCE_FIRST_OR_LAST": False,
# Max items returned in ConnectionFields / FilterConnectionFields
"RELAY_CONNECTION_MAX_LIMIT": 100,
}
def init_midleware():
if settings.DEBUG:
DEFAULTS["MIDDLEWARE"] += ("graphene_neo4j.debug.DjangoDebugMiddleware",)
# try:
# init_midleware()
# except Exception:
# if not settings.configured:
# settings.configure()
# List of settings that may be in string import notation.
IMPORT_STRINGS = ("MIDDLEWARE", "SCHEMA")
def perform_import(val, setting_name):
"""
If the given setting is a string import notation,
then perform the necessary import or imports.
"""
if val is None:
return None
elif isinstance(val, six.string_types):
return import_from_string(val, setting_name)
elif isinstance(val, (list, tuple)):
return [import_from_string(item, setting_name) for item in val]
return val
def import_from_string(val, setting_name):
"""
Attempt to import a class from a string representation.
"""
try:
# Nod to tastypie's use of importlib.
parts = val.split(".")
module_path, class_name = ".".join(parts[:-1]), parts[-1]
module = importlib.import_module(module_path)
return getattr(module, class_name)
except (ImportError, AttributeError) as e:
msg = "Could not import '%s' for Graphene setting '%s'. %s: %s." % (
val,
setting_name,
e.__class__.__name__,
e,
)
raise ImportError(msg)
class GrapheneSettings(object):
"""
A settings object, that allows API settings to be accessed as properties.
For example:
from graphene_neo4j.settings import settings
print(settings.SCHEMA)
Any setting with string import paths will be automatically resolved
and return the class, rather than the string literal.
"""
def __init__(self, user_settings=None, defaults=None, import_strings=None):
if user_settings:
self._user_settings = user_settings
self.defaults = defaults or DEFAULTS
self.import_strings = import_strings or IMPORT_STRINGS
@property
def user_settings(self):
if not hasattr(self, "_user_settings"):
self._user_settings = getattr(settings, "GRAPHENE", {})
return self._user_settings
def __getattr__(self, attr):
if attr not in self.defaults:
raise AttributeError("Invalid Graphene setting: '%s'" % attr)
try:
# Check if present in user settings
val = self.user_settings[attr]
except KeyError:
# Fall back to defaults
val = self.defaults[attr]
# Coerce import strings into classes
if attr in self.import_strings:
val = perform_import(val, attr)
# Cache the result
setattr(self, attr, val)
return val
graphene_settings = GrapheneSettings(None, DEFAULTS, IMPORT_STRINGS)
def reload_graphene_settings(*args, **kwargs):
global graphene_settings
setting, value = kwargs["setting"], kwargs["value"]
if setting == "GRAPHENE":
graphene_settings = GrapheneSettings(value, DEFAULTS, IMPORT_STRINGS)
setting_changed.connect(reload_graphene_settings)
| 29.710345 | 81 | 0.674327 | from __future__ import unicode_literals
from django.conf import settings
from django.test.signals import setting_changed
from django.utils import six
try:
import importlib
except ImportError:
from django.utils import importlib
DEFAULTS = {
"SCHEMA": None,
"SCHEMA_OUTPUT": "schema.json",
"SCHEMA_INDENT": 2,
"MIDDLEWARE": (),
"RELAY_CONNECTION_ENFORCE_FIRST_OR_LAST": False,
"RELAY_CONNECTION_MAX_LIMIT": 100,
}
def init_midleware():
if settings.DEBUG:
DEFAULTS["MIDDLEWARE"] += ("graphene_neo4j.debug.DjangoDebugMiddleware",)
IMPORT_STRINGS = ("MIDDLEWARE", "SCHEMA")
def perform_import(val, setting_name):
if val is None:
return None
elif isinstance(val, six.string_types):
return import_from_string(val, setting_name)
elif isinstance(val, (list, tuple)):
return [import_from_string(item, setting_name) for item in val]
return val
def import_from_string(val, setting_name):
try:
parts = val.split(".")
module_path, class_name = ".".join(parts[:-1]), parts[-1]
module = importlib.import_module(module_path)
return getattr(module, class_name)
except (ImportError, AttributeError) as e:
msg = "Could not import '%s' for Graphene setting '%s'. %s: %s." % (
val,
setting_name,
e.__class__.__name__,
e,
)
raise ImportError(msg)
class GrapheneSettings(object):
def __init__(self, user_settings=None, defaults=None, import_strings=None):
if user_settings:
self._user_settings = user_settings
self.defaults = defaults or DEFAULTS
self.import_strings = import_strings or IMPORT_STRINGS
@property
def user_settings(self):
if not hasattr(self, "_user_settings"):
self._user_settings = getattr(settings, "GRAPHENE", {})
return self._user_settings
def __getattr__(self, attr):
if attr not in self.defaults:
raise AttributeError("Invalid Graphene setting: '%s'" % attr)
try:
# Check if present in user settings
val = self.user_settings[attr]
except KeyError:
# Fall back to defaults
val = self.defaults[attr]
# Coerce import strings into classes
if attr in self.import_strings:
val = perform_import(val, attr)
# Cache the result
setattr(self, attr, val)
return val
graphene_settings = GrapheneSettings(None, DEFAULTS, IMPORT_STRINGS)
def reload_graphene_settings(*args, **kwargs):
global graphene_settings
setting, value = kwargs["setting"], kwargs["value"]
if setting == "GRAPHENE":
graphene_settings = GrapheneSettings(value, DEFAULTS, IMPORT_STRINGS)
setting_changed.connect(reload_graphene_settings)
| true | true |
1c2b8fae919acb7bbfb4b3891c86c5b4efb0cec8 | 315 | py | Python | python/petitBloc/ui/const.py | sol-ansano-kim/unitBlock | ba95a5e5625359d4bbab97cbf18df5ba259e1aee | [
"MIT"
] | 24 | 2018-01-17T02:58:10.000Z | 2021-08-20T20:34:08.000Z | python/petitBloc/ui/const.py | sol-ansano-kim/unitBlock | ba95a5e5625359d4bbab97cbf18df5ba259e1aee | [
"MIT"
] | 2 | 2018-12-05T08:02:49.000Z | 2021-05-21T06:57:02.000Z | python/petitBloc/ui/const.py | sol-ansano-kim/unitBlock | ba95a5e5625359d4bbab97cbf18df5ba259e1aee | [
"MIT"
] | 5 | 2018-02-06T05:40:17.000Z | 2022-03-19T06:30:20.000Z | ObjectName = "petitBloc"
ParamEditorBlockNameMaximumWidth = 300
ParamLabelMinimumWidth = 50
ParamLabelMaximumWidth = 200
LogMaximumHeight = 400
from .. import const as petitBlocConst
RootBoxName = petitBlocConst.RootBoxName
InProxyBlock = petitBlocConst.InProxyBlock
OutProxyBlock = petitBlocConst.OutProxyBlock
| 24.230769 | 44 | 0.847619 | ObjectName = "petitBloc"
ParamEditorBlockNameMaximumWidth = 300
ParamLabelMinimumWidth = 50
ParamLabelMaximumWidth = 200
LogMaximumHeight = 400
from .. import const as petitBlocConst
RootBoxName = petitBlocConst.RootBoxName
InProxyBlock = petitBlocConst.InProxyBlock
OutProxyBlock = petitBlocConst.OutProxyBlock
| true | true |
1c2b902fa54767ec48c56cb5fd6be4d410cd6e74 | 1,136 | py | Python | rest_registration/utils/verification.py | pragex/django-rest-registration | 2750b3e6d33cde15ba46d5c5b4cb683973f7b914 | [
"MIT"
] | null | null | null | rest_registration/utils/verification.py | pragex/django-rest-registration | 2750b3e6d33cde15ba46d5c5b4cb683973f7b914 | [
"MIT"
] | 4 | 2021-04-08T21:52:33.000Z | 2021-06-10T20:25:03.000Z | rest_registration/utils/verification.py | pragex/django-rest-registration | 2750b3e6d33cde15ba46d5c5b4cb683973f7b914 | [
"MIT"
] | null | null | null | from urllib.parse import urlencode
from django.core.signing import BadSignature, SignatureExpired
from django.utils.translation import gettext as _
from rest_registration.exceptions import BadRequest
def verify_signer_or_bad_request(signer):
try:
signer.verify()
except SignatureExpired:
raise BadRequest(_("Signature expired"))
except BadSignature:
raise BadRequest(_("Invalid signature"))
def build_default_verification_url(signer):
base_url = signer.get_base_url()
params = urlencode(signer.get_signed_data())
url = '{base_url}?{params}'.format(base_url=base_url, params=params)
if signer.request:
url = signer.request.build_absolute_uri(url)
return url
def build_default_template_context(
user, user_address, data,
notification_type=None, notification_method=None):
context = {
'user': user,
'email': user_address,
}
data = data.copy()
params_signer = data.pop('params_signer', None)
if params_signer:
context['verification_url'] = params_signer.get_url()
context.update(data)
return context
| 28.4 | 72 | 0.712148 | from urllib.parse import urlencode
from django.core.signing import BadSignature, SignatureExpired
from django.utils.translation import gettext as _
from rest_registration.exceptions import BadRequest
def verify_signer_or_bad_request(signer):
try:
signer.verify()
except SignatureExpired:
raise BadRequest(_("Signature expired"))
except BadSignature:
raise BadRequest(_("Invalid signature"))
def build_default_verification_url(signer):
base_url = signer.get_base_url()
params = urlencode(signer.get_signed_data())
url = '{base_url}?{params}'.format(base_url=base_url, params=params)
if signer.request:
url = signer.request.build_absolute_uri(url)
return url
def build_default_template_context(
user, user_address, data,
notification_type=None, notification_method=None):
context = {
'user': user,
'email': user_address,
}
data = data.copy()
params_signer = data.pop('params_signer', None)
if params_signer:
context['verification_url'] = params_signer.get_url()
context.update(data)
return context
| true | true |
1c2b9435d8c802973218dc178d9ea3d7468dc3f8 | 10,031 | py | Python | webapp/api.py | nairsshreya/cs257 | 4703e21bb70a313647b8cbfd0b5b7e4a5e9a28b0 | [
"MIT"
] | null | null | null | webapp/api.py | nairsshreya/cs257 | 4703e21bb70a313647b8cbfd0b5b7e4a5e9a28b0 | [
"MIT"
] | null | null | null | webapp/api.py | nairsshreya/cs257 | 4703e21bb70a313647b8cbfd0b5b7e4a5e9a28b0 | [
"MIT"
] | null | null | null | '''
api.py
Shreya Nair and Elliot Hanson, 5th November 2021
Updated 8th - 24th November, 2021
Flask API to support a national parks web application that connects to a database and uses user input to
format queries and display results.
'''
import flask
import json
import psycopg2
import config
import sys
api = flask.Blueprint('api', __name__)
def get_connection():
''' Returns a connection to the database described in the
config module. May raise an exception as described in the
documentation for psycopg2.connect. '''
return psycopg2.connect(database=config.database,
user=config.user,
password=config.password)
def get_state():
''' Queries the database for the names and id of all 50 American states for our drop down selector '''
query = '''SELECT id, name
FROM states ORDER BY id'''
states = []
try:
connection = get_connection()
cursor = connection.cursor()
cursor.execute(query, tuple())
for row in cursor:
state = {'id': row[0], 'name': row[1]}
states.append(state)
cursor.close()
connection.close()
except Exception as e:
print(e, file=sys.stderr)
return states
def get_park_info():
''' Queries the database for the names of all 56 National Parks for our drop down selector '''
query = '''SELECT park_code, park_name, state_code
FROM parks ORDER BY park_name'''
park_names = []
try:
connection = get_connection()
cursor = connection.cursor()
cursor.execute(query, tuple())
for row in cursor:
park_info = {'park_code': row[0], 'park_name': row[1], 'state_code': row[2],}
park_names.append(park_info)
cursor.close()
connection.close()
except Exception as e:
print(e, file=sys.stderr)
return park_names
def get_category():
''' Queries the database for the names of 14 categories of species for our drop down selector '''
query = '''SELECT category
FROM categories ORDER BY category'''
categories = []
try:
connection = get_connection()
cursor = connection.cursor()
cursor.execute(query, tuple())
for row in cursor:
category = {'name': row[0]}
categories.append(category)
cursor.close()
connection.close()
except Exception as e:
print(e, file=sys.stderr)
return categories
@api.route('/park_search/parks', strict_slashes=False)
def load_parks():
''' Loads the information for our parks selector and returns data to the javascript file. '''
return json.dumps(get_park_info())
@api.route('/park_search/states', strict_slashes=False)
def load_states():
''' Loads the information for our states selector and returns data to the javascript file. '''
return json.dumps(get_state())
@api.route('/park_search/', strict_slashes=False)
def get_park():
'''Queries the database for the park(s) information based on selected values from the user.
Handles exceptions when park names and/or state names are not selected.
AND parks.state_code LIKE CONCAT('%',states.id,'%')
'''
name = flask.request.args.get('park_name')
state = flask.request.args.get('state')
if name == 'selectParkName' or name is None :
name = ''
if state == 'selectState' or state is None:
state = ''
# name = '%' + name + '%'
# state = '%' + state + '%'
# Testing :
# print(name, state)
query = '''SELECT DISTINCT park_code, park_name, state_code, acreage, longitude, latitude
FROM parks, states
WHERE parks.park_code iLIKE CONCAT('%%',%s,'%%')
AND parks.state_code iLIKE CONCAT('%%',%s,'%%')
ORDER BY parks.park_name'''
park_results = []
try:
connection = get_connection()
cursor = connection.cursor()
cursor.execute(query, (name, state))
for row in cursor:
# Testing : print(row)
park = {'park_code': row[0], 'park_name': row[1], 'state_code': row[2],
'acreage': row[3], 'longitude': row[4], 'latitude': row[5]}
park_results.append(park)
cursor.close()
connection.close()
except Exception as e:
print(e, file=sys.stderr)
return json.dumps(park_results)
# Code for Species Page
@api.route('/species_search/', strict_slashes=False)
def get_species():
''' Loads the information for our selectors for species page and returns data to the javascript file.
accounts for when there is no search specified for each field. Will try using CONCAT but this works right now.'''
species_name = flask.request.args.get('name')
if species_name == 'species_name' or species_name is None:
species_name = ''
species_name = '%' + species_name + '%'
category = flask.request.args.get('category')
if category == 'selectCategory' or category is None:
category = ''
category = '%' + category + '%'
order = flask.request.args.get('order')
if order == 'order' or order is None:
order = ''
order = '%' + order + '%'
family = flask.request.args.get('family')
if family == 'family' or family is None:
family = ''
family = '%' + family + '%'
park_code = flask.request.args.get('park_code')
if park_code == 'selectParkName' or park_code is None :
park_code = ''
park_code = '%' + park_code + '%'
state = flask.request.args.get('state')
if state == 'selectState' or state is None:
state = ''
state = '%' + state + '%'
# Testing :
# print(species_name, species_name, category, order, family, park_code, state)
query = '''SELECT species.common_names, species.scientific_name, categories.category, orders.order_name,
families.family, species.nativeness, parks.park_code, states.id, parks.park_name
FROM species, categories, orders, families, states, parks
WHERE (species.common_names iLIKE %s OR species.scientific_name iLIKE %s)
AND species.category_id = categories.id
AND species.order_id = orders.id
AND orders.order_name iLIKE %s
AND categories.category iLIKE %s
AND species.family_id = families.id
AND families.family iLIKE %s
AND species.park_code iLIKE %s
AND parks.state_code iLIKE %s
AND parks.state_code iLIKE concat('%%', states.id, '%%')
AND species.park_code = parks.park_code
ORDER BY species.scientific_name'''
species_results = []
try:
connection = get_connection()
cursor = connection.cursor()
cursor.execute(query, (species_name, species_name, order, category, family, park_code, state))
results = {}
for row in cursor:
if row[1] in results:
temp = results[row[1]]
temp['park_names'].append(row[8])
if row[7] not in temp['state']:
temp['state'].append(row[7])
if row[5] == 'Native' and (' ' + row[6]) not in temp['nativeTo']:
temp['nativeTo'].append(' ' + row[6])
elif row[5] == 'Not Native' and (' ' + row[6]) not in temp['notNative']:
temp['notNative'].append(' ' + row[6])
elif row[5] == 'Unknown' or row[5] == 'Present' or row[5] == 'Not Confirmed':
if (' ' + row[6]) not in temp['unknown']:
temp['unknown'].append(' ' + row[6])
else:
if row[5] == 'Native':
results[row[1]] = {'common_name': row[0], 'scientific_name': row[1], 'category': row[2],
'order': row[3], 'family': row[4], 'nativeTo': [' ' + row[6]], 'notNative': [], 'unknown':[], 'state':[row[7]], 'park_names':[row[8]]}
elif row[5] == 'Not Native':
results[row[1]] = {'common_name': row[0], 'scientific_name': row[1], 'category': row[2],
'order': row[3], 'family': row[4], 'nativeTo': [], 'notNative': [' ' + row[6]],
'unknown': [], 'state': [row[7]], 'park_names':[row[8]]}
else:
results[row[1]] = {'common_name': row[0], 'scientific_name': row[1], 'category': row[2],
'order': row[3], 'family': row[4], 'nativeTo': [], 'notNative': [],
'unknown': [' ' + row[6]], 'state': [row[7]], 'park_names':[row[8]]}
cursor.close()
connection.close()
except Exception as e:
print(e, file=sys.stderr)
return json.dumps(results)
@api.route('/species_search/categories', strict_slashes=False)
def load_categories():
''' Loads the categories for the category selector on the species page'''
return json.dumps(get_category())
@api.route('/species_search/states', strict_slashes=False)
def load_states_species():
''' Loads the states for the state selector on the species page'''
return json.dumps(get_state())
@api.route('/species_search/parks', strict_slashes=False)
def load_parks_species():
''' Loads the parks for the park selector on the species page'''
return json.dumps(get_park_info())
@api.route('/help/')
def help():
''' This api route will lead to a page that contains information about the different requests that can be made'''
help_text = open('templates/help.txt').read()
return flask.Response(help_text, mimetype='text/plain')
| 37.85283 | 154 | 0.574021 | import flask
import json
import psycopg2
import config
import sys
api = flask.Blueprint('api', __name__)
def get_connection():
return psycopg2.connect(database=config.database,
user=config.user,
password=config.password)
def get_state():
query = '''SELECT id, name
FROM states ORDER BY id'''
states = []
try:
connection = get_connection()
cursor = connection.cursor()
cursor.execute(query, tuple())
for row in cursor:
state = {'id': row[0], 'name': row[1]}
states.append(state)
cursor.close()
connection.close()
except Exception as e:
print(e, file=sys.stderr)
return states
def get_park_info():
query = '''SELECT park_code, park_name, state_code
FROM parks ORDER BY park_name'''
park_names = []
try:
connection = get_connection()
cursor = connection.cursor()
cursor.execute(query, tuple())
for row in cursor:
park_info = {'park_code': row[0], 'park_name': row[1], 'state_code': row[2],}
park_names.append(park_info)
cursor.close()
connection.close()
except Exception as e:
print(e, file=sys.stderr)
return park_names
def get_category():
query = '''SELECT category
FROM categories ORDER BY category'''
categories = []
try:
connection = get_connection()
cursor = connection.cursor()
cursor.execute(query, tuple())
for row in cursor:
category = {'name': row[0]}
categories.append(category)
cursor.close()
connection.close()
except Exception as e:
print(e, file=sys.stderr)
return categories
@api.route('/park_search/parks', strict_slashes=False)
def load_parks():
return json.dumps(get_park_info())
@api.route('/park_search/states', strict_slashes=False)
def load_states():
return json.dumps(get_state())
@api.route('/park_search/', strict_slashes=False)
def get_park():
name = flask.request.args.get('park_name')
state = flask.request.args.get('state')
if name == 'selectParkName' or name is None :
name = ''
if state == 'selectState' or state is None:
state = ''
query = '''SELECT DISTINCT park_code, park_name, state_code, acreage, longitude, latitude
FROM parks, states
WHERE parks.park_code iLIKE CONCAT('%%',%s,'%%')
AND parks.state_code iLIKE CONCAT('%%',%s,'%%')
ORDER BY parks.park_name'''
park_results = []
try:
connection = get_connection()
cursor = connection.cursor()
cursor.execute(query, (name, state))
for row in cursor:
park = {'park_code': row[0], 'park_name': row[1], 'state_code': row[2],
'acreage': row[3], 'longitude': row[4], 'latitude': row[5]}
park_results.append(park)
cursor.close()
connection.close()
except Exception as e:
print(e, file=sys.stderr)
return json.dumps(park_results)
@api.route('/species_search/', strict_slashes=False)
def get_species():
species_name = flask.request.args.get('name')
if species_name == 'species_name' or species_name is None:
species_name = ''
species_name = '%' + species_name + '%'
category = flask.request.args.get('category')
if category == 'selectCategory' or category is None:
category = ''
category = '%' + category + '%'
order = flask.request.args.get('order')
if order == 'order' or order is None:
order = ''
order = '%' + order + '%'
family = flask.request.args.get('family')
if family == 'family' or family is None:
family = ''
family = '%' + family + '%'
park_code = flask.request.args.get('park_code')
if park_code == 'selectParkName' or park_code is None :
park_code = ''
park_code = '%' + park_code + '%'
state = flask.request.args.get('state')
if state == 'selectState' or state is None:
state = ''
state = '%' + state + '%'
query = '''SELECT species.common_names, species.scientific_name, categories.category, orders.order_name,
families.family, species.nativeness, parks.park_code, states.id, parks.park_name
FROM species, categories, orders, families, states, parks
WHERE (species.common_names iLIKE %s OR species.scientific_name iLIKE %s)
AND species.category_id = categories.id
AND species.order_id = orders.id
AND orders.order_name iLIKE %s
AND categories.category iLIKE %s
AND species.family_id = families.id
AND families.family iLIKE %s
AND species.park_code iLIKE %s
AND parks.state_code iLIKE %s
AND parks.state_code iLIKE concat('%%', states.id, '%%')
AND species.park_code = parks.park_code
ORDER BY species.scientific_name'''
species_results = []
try:
connection = get_connection()
cursor = connection.cursor()
cursor.execute(query, (species_name, species_name, order, category, family, park_code, state))
results = {}
for row in cursor:
if row[1] in results:
temp = results[row[1]]
temp['park_names'].append(row[8])
if row[7] not in temp['state']:
temp['state'].append(row[7])
if row[5] == 'Native' and (' ' + row[6]) not in temp['nativeTo']:
temp['nativeTo'].append(' ' + row[6])
elif row[5] == 'Not Native' and (' ' + row[6]) not in temp['notNative']:
temp['notNative'].append(' ' + row[6])
elif row[5] == 'Unknown' or row[5] == 'Present' or row[5] == 'Not Confirmed':
if (' ' + row[6]) not in temp['unknown']:
temp['unknown'].append(' ' + row[6])
else:
if row[5] == 'Native':
results[row[1]] = {'common_name': row[0], 'scientific_name': row[1], 'category': row[2],
'order': row[3], 'family': row[4], 'nativeTo': [' ' + row[6]], 'notNative': [], 'unknown':[], 'state':[row[7]], 'park_names':[row[8]]}
elif row[5] == 'Not Native':
results[row[1]] = {'common_name': row[0], 'scientific_name': row[1], 'category': row[2],
'order': row[3], 'family': row[4], 'nativeTo': [], 'notNative': [' ' + row[6]],
'unknown': [], 'state': [row[7]], 'park_names':[row[8]]}
else:
results[row[1]] = {'common_name': row[0], 'scientific_name': row[1], 'category': row[2],
'order': row[3], 'family': row[4], 'nativeTo': [], 'notNative': [],
'unknown': [' ' + row[6]], 'state': [row[7]], 'park_names':[row[8]]}
cursor.close()
connection.close()
except Exception as e:
print(e, file=sys.stderr)
return json.dumps(results)
@api.route('/species_search/categories', strict_slashes=False)
def load_categories():
return json.dumps(get_category())
@api.route('/species_search/states', strict_slashes=False)
def load_states_species():
return json.dumps(get_state())
@api.route('/species_search/parks', strict_slashes=False)
def load_parks_species():
return json.dumps(get_park_info())
@api.route('/help/')
def help():
help_text = open('templates/help.txt').read()
return flask.Response(help_text, mimetype='text/plain')
| true | true |
1c2b94b1aac569b3095b406511f6fdc947e04413 | 122 | py | Python | Discord_Games/__init__.py | v1s1t0r999/Discord-Games | 275bbb52fdcdb87d2116d1248619ea98e8b7d721 | [
"MIT"
] | 24 | 2021-04-03T21:18:15.000Z | 2022-03-26T09:37:53.000Z | Discord_Games/__init__.py | v1s1t0r999/Discord-Games | 275bbb52fdcdb87d2116d1248619ea98e8b7d721 | [
"MIT"
] | 7 | 2021-05-12T11:34:33.000Z | 2022-03-31T21:53:27.000Z | Discord_Games/__init__.py | v1s1t0r999/Discord-Games | 275bbb52fdcdb87d2116d1248619ea98e8b7d721 | [
"MIT"
] | 12 | 2021-05-15T13:50:10.000Z | 2022-01-17T03:42:38.000Z | __version__ = "1.6.9"
__author__ = "Tom-the-Bomb"
__license__ = "MIT"
__copyright__ = "Copyright 2021 Tom-the-Bomb" | 30.5 | 45 | 0.688525 | __version__ = "1.6.9"
__author__ = "Tom-the-Bomb"
__license__ = "MIT"
__copyright__ = "Copyright 2021 Tom-the-Bomb" | true | true |
1c2b94f5bfd726db80963fcd865ef59f20582ca1 | 8,082 | py | Python | riam_api_client/models/inline_response20033_message_tabla_desarrollo.py | RiskAmerica/api-client-python | 468c554a0440bef5086828631e25d99d41e28571 | [
"MIT"
] | null | null | null | riam_api_client/models/inline_response20033_message_tabla_desarrollo.py | RiskAmerica/api-client-python | 468c554a0440bef5086828631e25d99d41e28571 | [
"MIT"
] | null | null | null | riam_api_client/models/inline_response20033_message_tabla_desarrollo.py | RiskAmerica/api-client-python | 468c554a0440bef5086828631e25d99d41e28571 | [
"MIT"
] | 1 | 2021-04-14T15:52:03.000Z | 2021-04-14T15:52:03.000Z | # coding: utf-8
"""
APIs RISKAMERICA
A continuación les presentamos la documentación las **APIs** **de** **RiskAmerica**, el cual es un servicio pagado ofrecido por RiskAmerica que se contrata por separado a nuestras otras ofertas de software. Algunas consideraciones que debe tener al momento de usar las APIs: - El APIKEY o Token lo puede conseguir solicitándolo al equipo comercial de RiskAmerica - El request necesita ser enviado con el header **Accept:** **application/json** para que responda en formato **JSON** (de no ser enviado con esto se responderá en formato **XML**) - Todos los Servicios son **REST** y sus parametros pueden ser enviados tanto en **POST** como **GET** - El uso de las APIs puede llevar un cobro asociado según se pacte en el acuerdo comercial, por lo que le recomendamos ser cuidadosos en el uso de éstas para evitar sobre-cargos innecesarios. - RiskAmerica funciona con un mecanismo de **WhiteList** **de** **IPs** para las consultas de las API. Para habilitar o modificar la lista de IPs permitidas debe contactarse al mail **contacto@riskamerica.com**. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
# Importing related models
class InlineResponse20033MessageTablaDesarrollo(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'fecha_cupon': 'date',
'interes': 'float',
'amortizacion': 'float',
'capital_insoluto': 'float',
'flujo': 'float'
}
attribute_map = {
'fecha_cupon': 'fechaCupon',
'interes': 'interes',
'amortizacion': 'amortizacion',
'capital_insoluto': 'capitalInsoluto',
'flujo': 'flujo'
}
def __init__(self, fecha_cupon=None, interes=None, amortizacion=None, capital_insoluto=None, flujo=None): # noqa: E501
"""InlineResponse20033MessageTablaDesarrollo - a model defined in Swagger""" # noqa: E501
self._fecha_cupon = None
self._interes = None
self._amortizacion = None
self._capital_insoluto = None
self._flujo = None
self.discriminator = None
if fecha_cupon is not None:
self.fecha_cupon = fecha_cupon
if interes is not None:
self.interes = interes
if amortizacion is not None:
self.amortizacion = amortizacion
if capital_insoluto is not None:
self.capital_insoluto = capital_insoluto
if flujo is not None:
self.flujo = flujo
@property
def fecha_cupon(self):
"""Gets the fecha_cupon of this InlineResponse20033MessageTablaDesarrollo. # noqa: E501
Fecha del cupon # noqa: E501
:return: The fecha_cupon of this InlineResponse20033MessageTablaDesarrollo. # noqa: E501
:rtype: date
"""
return self._fecha_cupon
@fecha_cupon.setter
def fecha_cupon(self, fecha_cupon):
"""Sets the fecha_cupon of this InlineResponse20033MessageTablaDesarrollo.
Fecha del cupon # noqa: E501
:param fecha_cupon: The fecha_cupon of this InlineResponse20033MessageTablaDesarrollo. # noqa: E501
:type: date
"""
self._fecha_cupon = fecha_cupon
@property
def interes(self):
"""Gets the interes of this InlineResponse20033MessageTablaDesarrollo. # noqa: E501
Interes en base 100 # noqa: E501
:return: The interes of this InlineResponse20033MessageTablaDesarrollo. # noqa: E501
:rtype: float
"""
return self._interes
@interes.setter
def interes(self, interes):
"""Sets the interes of this InlineResponse20033MessageTablaDesarrollo.
Interes en base 100 # noqa: E501
:param interes: The interes of this InlineResponse20033MessageTablaDesarrollo. # noqa: E501
:type: float
"""
self._interes = interes
@property
def amortizacion(self):
"""Gets the amortizacion of this InlineResponse20033MessageTablaDesarrollo. # noqa: E501
Amortización en base 100 # noqa: E501
:return: The amortizacion of this InlineResponse20033MessageTablaDesarrollo. # noqa: E501
:rtype: float
"""
return self._amortizacion
@amortizacion.setter
def amortizacion(self, amortizacion):
"""Sets the amortizacion of this InlineResponse20033MessageTablaDesarrollo.
Amortización en base 100 # noqa: E501
:param amortizacion: The amortizacion of this InlineResponse20033MessageTablaDesarrollo. # noqa: E501
:type: float
"""
self._amortizacion = amortizacion
@property
def capital_insoluto(self):
"""Gets the capital_insoluto of this InlineResponse20033MessageTablaDesarrollo. # noqa: E501
Capital Insoluto en base 100 # noqa: E501
:return: The capital_insoluto of this InlineResponse20033MessageTablaDesarrollo. # noqa: E501
:rtype: float
"""
return self._capital_insoluto
@capital_insoluto.setter
def capital_insoluto(self, capital_insoluto):
"""Sets the capital_insoluto of this InlineResponse20033MessageTablaDesarrollo.
Capital Insoluto en base 100 # noqa: E501
:param capital_insoluto: The capital_insoluto of this InlineResponse20033MessageTablaDesarrollo. # noqa: E501
:type: float
"""
self._capital_insoluto = capital_insoluto
@property
def flujo(self):
"""Gets the flujo of this InlineResponse20033MessageTablaDesarrollo. # noqa: E501
Flujo en base 100 # noqa: E501
:return: The flujo of this InlineResponse20033MessageTablaDesarrollo. # noqa: E501
:rtype: float
"""
return self._flujo
@flujo.setter
def flujo(self, flujo):
"""Sets the flujo of this InlineResponse20033MessageTablaDesarrollo.
Flujo en base 100 # noqa: E501
:param flujo: The flujo of this InlineResponse20033MessageTablaDesarrollo. # noqa: E501
:type: float
"""
self._flujo = flujo
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InlineResponse20033MessageTablaDesarrollo, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse20033MessageTablaDesarrollo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 36.080357 | 1,070 | 0.641673 |
import pprint
import re
import six
class InlineResponse20033MessageTablaDesarrollo(object):
swagger_types = {
'fecha_cupon': 'date',
'interes': 'float',
'amortizacion': 'float',
'capital_insoluto': 'float',
'flujo': 'float'
}
attribute_map = {
'fecha_cupon': 'fechaCupon',
'interes': 'interes',
'amortizacion': 'amortizacion',
'capital_insoluto': 'capitalInsoluto',
'flujo': 'flujo'
}
def __init__(self, fecha_cupon=None, interes=None, amortizacion=None, capital_insoluto=None, flujo=None):
self._fecha_cupon = None
self._interes = None
self._amortizacion = None
self._capital_insoluto = None
self._flujo = None
self.discriminator = None
if fecha_cupon is not None:
self.fecha_cupon = fecha_cupon
if interes is not None:
self.interes = interes
if amortizacion is not None:
self.amortizacion = amortizacion
if capital_insoluto is not None:
self.capital_insoluto = capital_insoluto
if flujo is not None:
self.flujo = flujo
@property
def fecha_cupon(self):
return self._fecha_cupon
@fecha_cupon.setter
def fecha_cupon(self, fecha_cupon):
self._fecha_cupon = fecha_cupon
@property
def interes(self):
return self._interes
@interes.setter
def interes(self, interes):
self._interes = interes
@property
def amortizacion(self):
return self._amortizacion
@amortizacion.setter
def amortizacion(self, amortizacion):
self._amortizacion = amortizacion
@property
def capital_insoluto(self):
return self._capital_insoluto
@capital_insoluto.setter
def capital_insoluto(self, capital_insoluto):
self._capital_insoluto = capital_insoluto
@property
def flujo(self):
return self._flujo
@flujo.setter
def flujo(self, flujo):
self._flujo = flujo
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InlineResponse20033MessageTablaDesarrollo, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, InlineResponse20033MessageTablaDesarrollo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c2b9585be8b8a990ab91b217805fb2d1ec67fb8 | 2,517 | py | Python | tasks/task_11_CAD_cell_tally_heat/example_CAD_simulation.py | py1sl/openmc_workshop | 4468b0d9e9e57c6c7cb491d365ef2c3a019e3ecd | [
"MIT"
] | 1 | 2021-08-23T22:49:31.000Z | 2021-08-23T22:49:31.000Z | tasks/task_11_CAD_cell_tally_heat/example_CAD_simulation.py | pshriwise/neutronics-workshop | d2b80b2f73c50b94a56b98f0bb180c03ecb0a906 | [
"MIT"
] | null | null | null | tasks/task_11_CAD_cell_tally_heat/example_CAD_simulation.py | pshriwise/neutronics-workshop | d2b80b2f73c50b94a56b98f0bb180c03ecb0a906 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""example_CAD_simulation.py: uses a dagmc.h5m file for the geometry."""
__author__ = "Jonathan Shimwell"
import openmc
import json
import os
from neutronics_material_maker import Material
from parametric_plasma_source import Plasma
# MATERIALS using the neutronics material maker
breeder_material = Material(material_name='Li4SiO4', enrichment=90).openmc_material
copper = Material(material_name="copper").openmc_material
eurofer = Material(material_name='eurofer').openmc_material
mats = openmc.Materials([breeder_material, eurofer, copper])
# GEOMETRY using dagmc doesn't contain any CSG geometry
universe = openmc.Universe()
geom = openmc.Geometry(universe)
# SIMULATION SETTINGS
# Instantiate a Settings object
sett = openmc.Settings()
batches = 10
sett.batches = batches
sett.inactive = 0
sett.particles = 1000
sett.run_mode = 'fixed source'
sett.dagmc = True # this is the openmc command enables use of the dagmc.h5m file as the geometry
# creates a source object
source = openmc.Source()
# this creates a neutron distribution with the shape of a tokamak plasma
my_plasma = Plasma(elongation=2.9,
minor_radius=1.118,
major_radius=1.9,
triangularity = 0.55)
# there are other parameters that can be set for the plasma, but we can use the defaults for now
my_plasma.export_plasma_source('my_custom_plasma_source.so')
# sets the source poition, direction and energy with predefined plasma parameters (see source_sampling.cpp)
source.library = './my_custom_plasma_source.so'
sett.source = source
tallies = openmc.Tallies()
tbr_tally = openmc.Tally(name='TBR')
tbr_tally.scores = ['(n,Xt)'] # MT 205 is the (n,Xt) reaction where X is a wildcard, if MT 105 or (n,t) then some tritium production will be missed, for example (n,nt) which happens in Li7 would be missed
tallies.append(tbr_tally)
# Run OpenMC!
model = openmc.model.Model(geom, mats, sett, tallies)
sp_filename = model.run()
# open the results file
sp = openmc.StatePoint(sp_filename)
# access the tally
tbr_tally = sp.get_tally(name='TBR')
df = tbr_tally.get_pandas_dataframe()
tbr_tally_result = df['mean'].sum()
# print result
print('The tritium breeding ratio was found, TBR = ', tbr_tally_result)
# output result in json file
json_output = {'TBR': tbr_tally_result}
with open('cad_simulation_results.json', 'w') as file_object:
json.dump(json_output, file_object, indent=2)
os.system('cp cad_simulation_results.json /my_openmc_workshop')
| 30.325301 | 205 | 0.756456 |
__author__ = "Jonathan Shimwell"
import openmc
import json
import os
from neutronics_material_maker import Material
from parametric_plasma_source import Plasma
breeder_material = Material(material_name='Li4SiO4', enrichment=90).openmc_material
copper = Material(material_name="copper").openmc_material
eurofer = Material(material_name='eurofer').openmc_material
mats = openmc.Materials([breeder_material, eurofer, copper])
universe = openmc.Universe()
geom = openmc.Geometry(universe)
# SIMULATION SETTINGS
# Instantiate a Settings object
sett = openmc.Settings()
batches = 10
sett.batches = batches
sett.inactive = 0
sett.particles = 1000
sett.run_mode = 'fixed source'
sett.dagmc = True # this is the openmc command enables use of the dagmc.h5m file as the geometry
# creates a source object
source = openmc.Source()
# this creates a neutron distribution with the shape of a tokamak plasma
my_plasma = Plasma(elongation=2.9,
minor_radius=1.118,
major_radius=1.9,
triangularity = 0.55)
# there are other parameters that can be set for the plasma, but we can use the defaults for now
my_plasma.export_plasma_source('my_custom_plasma_source.so')
# sets the source poition, direction and energy with predefined plasma parameters (see source_sampling.cpp)
source.library = './my_custom_plasma_source.so'
sett.source = source
tallies = openmc.Tallies()
tbr_tally = openmc.Tally(name='TBR')
tbr_tally.scores = ['(n,Xt)'] # MT 205 is the (n,Xt) reaction where X is a wildcard, if MT 105 or (n,t) then some tritium production will be missed, for example (n,nt) which happens in Li7 would be missed
tallies.append(tbr_tally)
# Run OpenMC!
model = openmc.model.Model(geom, mats, sett, tallies)
sp_filename = model.run()
# open the results file
sp = openmc.StatePoint(sp_filename)
# access the tally
tbr_tally = sp.get_tally(name='TBR')
df = tbr_tally.get_pandas_dataframe()
tbr_tally_result = df['mean'].sum()
# print result
print('The tritium breeding ratio was found, TBR = ', tbr_tally_result)
# output result in json file
json_output = {'TBR': tbr_tally_result}
with open('cad_simulation_results.json', 'w') as file_object:
json.dump(json_output, file_object, indent=2)
os.system('cp cad_simulation_results.json /my_openmc_workshop')
| true | true |
1c2b96c9e94e311f4a24da718b549b59fea95823 | 507 | py | Python | nams/solutions/io.py | nitish-awasthi/Network-Analysis-Made-Simple | 1829f63d9814c7893a1e008b8b1717da95a54ae7 | [
"MIT"
] | 853 | 2015-04-08T01:58:34.000Z | 2022-03-28T15:39:30.000Z | nams/solutions/io.py | alex-soldatkin/Network-Analysis-Made-Simple | 85328910d90ce0540476c8ffe7bf026dce7dc8c5 | [
"MIT"
] | 177 | 2015-08-08T05:33:06.000Z | 2022-03-21T15:43:07.000Z | nams/solutions/io.py | alex-soldatkin/Network-Analysis-Made-Simple | 85328910d90ce0540476c8ffe7bf026dce7dc8c5 | [
"MIT"
] | 390 | 2015-03-28T02:22:34.000Z | 2022-03-24T18:47:43.000Z | """Solutions to I/O chapter"""
def filter_graph(G, minimum_num_trips):
"""
Filter the graph such that
only edges that have minimum_num_trips or more
are present.
"""
G_filtered = G.copy()
for u, v, d in G.edges(data=True):
if d["num_trips"] < minimum_num_trips:
G_filtered.remove_edge(u, v)
return G_filtered
def test_graph_integrity(G):
"""Test integrity of raw Divvy graph."""
assert len(G.nodes()) == 300
assert len(G.edges()) == 44422
| 24.142857 | 50 | 0.631164 |
def filter_graph(G, minimum_num_trips):
G_filtered = G.copy()
for u, v, d in G.edges(data=True):
if d["num_trips"] < minimum_num_trips:
G_filtered.remove_edge(u, v)
return G_filtered
def test_graph_integrity(G):
assert len(G.nodes()) == 300
assert len(G.edges()) == 44422
| true | true |
1c2b979139a59f1ab39a1be1902c8412e0c51c9a | 12,928 | py | Python | wifipumpkin3/core/wirelessmode/docker.py | paramint/wifipumpkin3 | cd985184d471a85d0a7b1c826b93f798ef478772 | [
"Apache-2.0"
] | 1 | 2021-02-03T22:54:35.000Z | 2021-02-03T22:54:35.000Z | wifipumpkin3/core/wirelessmode/docker.py | quang9bh/wifipumpkin3 | f372012daf7936e4597c067e8337c124c9c0042b | [
"Apache-2.0"
] | 1 | 2021-02-10T16:12:08.000Z | 2021-02-10T16:12:08.000Z | wifipumpkin3/core/wirelessmode/docker.py | quang9bh/wifipumpkin3 | f372012daf7936e4597c067e8337c124c9c0042b | [
"Apache-2.0"
] | null | null | null | from wifipumpkin3.core.config.globalimport import *
import weakref
from os import system, path, getcwd, popen, listdir, mkdir, chown
from pwd import getpwnam
from grp import getgrnam
from time import asctime
from subprocess import check_output, Popen, PIPE, STDOUT, CalledProcessError, call
from wifipumpkin3.core.controls.threads import ProcessHostapd, ProcessThread
from wifipumpkin3.core.wirelessmode.wirelessmode import Mode
from wifipumpkin3.core.common.uimodel import *
from wifipumpkin3.core.utility.printer import display_messages, setcolor
from wifipumpkin3.exceptions.errors.networkException import *
# This file is part of the wifipumpkin3 Open Source Project.
# wifipumpkin3 is licensed under the Apache 2.0.
# Copyright 2020 P0cL4bs Team - Marcos Bomfim (mh4x0f)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Docker(Mode):
configRoot = "docker"
subConfig = "docker"
ID = "docker"
Name = "Wireless Docker AP Mode"
def __init__(self, parent=0):
super(Docker, self).__init__(parent)
self.confgSecurity = []
@property
def Settings(self):
return DockerSettings.getInstance()
def getSettings(self):
return self.Settings
def Initialize(self):
# settings ap
self.Settings.Configure()
if not (self.Settings.checkNetworkAP()):
sys.exit(1)
self.check_Wireless_Security()
ignore = ("interface=", "ssid=", "channel=", "essid=")
with open(C.DOCKERHOSTAPDCONF_PATH, "w") as apconf:
for i in self.Settings.SettingsAP["hostapd"]:
apconf.write(i)
apconf.close()
def boot(self):
# create thread for hostapd and connect get_Hostapd_Response function
self.reactor = ProcessHostapd(
{self.getHostapdPath: [C.DOCKERHOSTAPDCONF_PATH]}, "MDSNjD"
)
self.reactor.setObjectName("hostapd_{}".format(self.ID))
self.reactor.statusAP_connected.connect(self.get_Hostapd_Response)
self.reactor.statusAPError.connect(self.get_error_hostapdServices)
def setIptables(self):
# this mehtod is called when post start all threads
self.interfacesLink = Refactor.get_interfaces()
print(display_messages("sharing internet connection with NAT...", info=True))
self.ifaceHostapd = self.conf.get("accesspoint", "interface")
iptables_file = {
"iptables.ipv4.nat": [
"# Generated by iptables-save v1.6.0 on Sun Jun 5 11:18:08 2016"
"*nat"
":PREROUTING ACCEPT [123:11003]"
":INPUT ACCEPT [5:1402]"
":OUTPUT ACCEPT [2:152]"
":POSTROUTING ACCEPT [0:0]"
":DOCKER - [0:0]"
"-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER"
"-A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER"
"-A POSTROUTING -s 172.17.0.0/16 ! -o docker0 -j MASQUERADE"
"-A POSTROUTING -o $inet -j MASQUERADE"
"COMMIT"
"# Completed on Sun Jun 5 11:18:08 2016"
"# Generated by iptables-save v1.6.0 on Sun Jun 5 11:18:08 2016"
"*filter"
":INPUT ACCEPT [320:23582]"
":FORWARD ACCEPT [0:0]"
":OUTPUT ACCEPT [194:28284]"
":DOCKER - [0:0]"
"-A FORWARD -o docker0 -j DOCKER"
"-A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT"
"-A FORWARD -i docker0 ! -o docker0 -j ACCEPT"
"-A FORWARD -i docker0 -o docker0 -j ACCEPT"
"-A FORWARD -i $inet -o $wlan -m state --state RELATED,ESTABLISHED -j ACCEPT"
"-A FORWARD -i $wlan -o $inet -j ACCEPT"
"COMMIT"
"# Completed on Sun Jun 5 11:18:08 2016"
]
}
with open(C.DOCKERIPTABLESPATH, "w") as f:
for line in iptables_file["iptables.ipv4.nat"]:
try:
if "$inet" in line:
line = line.replace(
"$inet", self.interfacesLink["activated"][0]
)
if "$wlan" in line:
line = line.replace("$wlan", self.ifaceHostapd)
f.write("{}\n".format(line))
except Exception:
pass
f.close()
popen("iptables-restore < {}".format(C.DOCKERIPTABLESPATH))
def get_Hostapd_Response(self, data):
if self.conf.get("accesspoint", "status_ap", format=bool):
print(
display_messages(
"{} client has left AP ".format(setcolor(data, color="red")),
info=True,
)
)
def setNetworkManager(self, interface=str, Remove=False):
""" mac address of interface to exclude """
networkmanager = C.NETWORKMANAGER
config = configparser.RawConfigParser()
MAC = Linux.get_interface_mac(interface)
exclude = {
"MAC": "mac:{}".format(MAC),
"interface": "interface-name:{}".format(interface),
}
if not Remove:
if path.exists(networkmanager):
config.read(networkmanager)
try:
config.add_section("keyfile")
except configparser.DuplicateSectionError:
config.set(
"keyfile",
"unmanaged-devices",
"{}".format(
exclude["interface"] if MAC != None else exclude["MAC"]
),
)
else:
config.set(
"keyfile",
"unmanaged-devices",
"{}".format(
exclude["interface"] if MAC != None else exclude["MAC"]
),
)
finally:
with open(networkmanager, "wb") as configfile:
config.write(configfile)
return True
return False
elif Remove:
if path.exists(networkmanager):
config.read(networkmanager)
try:
config.remove_option("keyfile", "unmanaged-devices")
with open(networkmanager, "wb") as configfile:
config.write(configfile)
return True
except configparser.NoSectionError:
return True
return False
class DockerSettings(CoreSettings):
Name = "Static"
ID = "Static"
Category = "Wireless"
instances = []
@classmethod
def getInstance(cls):
return cls.instances[0]
def __init__(self, parent):
super(DockerSettings, self).__init__(parent)
self.__class__.instances.append(weakref.proxy(self))
self.conf = SettingsINI.getInstance()
self.title = self.__class__.__name__
self.SettingsAP = {}
self.interfaces = Linux.get_interfaces()
self.DHCP = self.getDHCPConfig()
def getDHCPConfig(self):
DHCP = {}
DHCP["leasetimeDef"] = self.conf.get("dhcpdefault", "leasetimeDef")
DHCP["leasetimeMax"] = self.conf.get("dhcpdefault", "leasetimeMax")
DHCP["subnet"] = self.conf.get("dhcpdefault", "subnet")
DHCP["router"] = self.conf.get("dhcpdefault", "router")
DHCP["netmask"] = self.conf.get("dhcpdefault", "netmask")
DHCP["broadcast"] = self.conf.get("dhcpdefault", "broadcast")
DHCP["range"] = self.conf.get("dhcpdefault", "range")
return DHCP
def Configure(self):
""" configure interface and dhcpd for mount Access Point """
self.ifaceHostapd = self.conf.get("accesspoint", "interface")
self.SettingsAP = {
"interface": [
"ifconfig %s up" % (self.ifaceHostapd),
"ifconfig %s %s netmask %s"
% (self.ifaceHostapd, self.DHCP["router"], self.DHCP["netmask"]),
"ifconfig %s mtu 1400" % (self.ifaceHostapd),
"route add -net %s netmask %s gw %s"
% (self.DHCP["subnet"], self.DHCP["netmask"], self.DHCP["router"]),
],
"kill": [
"iptables -w --flush",
"iptables -w --table nat --flush",
"iptables -w --delete-chain",
"iptables -w --table nat --delete-chain",
"killall dhpcd 2>/dev/null",
"ifconfig {} down".format(self.ifaceHostapd),
"ifconfig {} up".format(self.ifaceHostapd),
"ifconfig {} 0".format(self.ifaceHostapd),
],
"hostapd": [
"interface={}\n".format(self.ifaceHostapd),
"ssid={}\n".format(self.conf.get("accesspoint", "ssid")),
"channel={}\n".format(self.conf.get("accesspoint", "channel")),
"bssid={}\n".format(self.conf.get("accesspoint", "bssid")),
],
"dhcp-server": [
"authoritative;\n",
"default-lease-time {};\n".format(self.DHCP["leasetimeDef"]),
"max-lease-time {};\n".format(self.DHCP["leasetimeMax"]),
"subnet %s netmask %s {\n"
% (self.DHCP["subnet"], self.DHCP["netmask"]),
"option routers {};\n".format(self.DHCP["router"]),
"option subnet-mask {};\n".format(self.DHCP["netmask"]),
"option broadcast-address {};\n".format(self.DHCP["broadcast"]),
'option domain-name "%s";\n' % (self.conf.get("accesspoint", "ssid")),
"option domain-name-servers {};\n".format("8.8.8.8"),
"range {};\n".format(self.DHCP["range"].replace("/", " ")),
"}",
],
}
print(display_messages("enable forwarding in iptables...", sucess=True))
Linux.set_ip_forward(1)
# clean iptables settings
for line in self.SettingsAP["kill"]:
exec_bash(line)
# set interface using ifconfig
for line in self.SettingsAP["interface"]:
exec_bash(line)
# check if dhcp option is enabled.
if self.conf.get("accesspoint", "dhcp_server", format=bool):
with open(C.DHCPCONF_PATH, "w") as dhcp:
for line in self.SettingsAP["dhcp-server"]:
dhcp.write(line)
dhcp.close()
if not path.isdir("/etc/dhcp/"):
mkdir("/etc/dhcp")
move(C.DHCPCONF_PATH, "/etc/dhcp/")
def checkNetworkAP(self):
self.ifaceHostapd = self.conf.get("accesspoint", "interface")
# check if interface has been support AP mode (necessary for hostapd)
if self.conf.get("accesspoint", "check_support_ap_mode", format=bool):
if not "AP" in self.get_supported_interface(self.ifaceHostapd)["Supported"]:
raise ApModeSupportError(
"[Error] AP mode", "{} ap mode not found!".format(self.ifaceHostapd)
)
# check if Wireless interface is being used
if self.ifaceHostapd == self.interfaces["activated"][0]:
raise InterfaceBuzyError(
"Wireless interface is busy",
"Device {} is busy".format(self.ifaceHostapd),
)
return True
def get_supported_interface(self, dev):
""" get all support mode from interface wireless """
_iface = {"info": {}, "Supported": []}
try:
output = check_output(
["iw", dev, "info"], stderr=STDOUT, universal_newlines=True
)
for line in output.split("\n\t"):
_iface["info"][line.split()[0]] = line.split()[1]
rulesfilter = '| grep "Supported interface modes" -A 10 | grep "*"'
supportMode = popen(
"iw phy{} info {}".format(_iface["info"]["wiphy"], rulesfilter)
).read()
for mode in supportMode.split("\n\t\t"):
_iface["Supported"].append(mode.split("* ")[1])
except CalledProcessError:
return _iface
return _iface
| 42.110749 | 93 | 0.546643 | from wifipumpkin3.core.config.globalimport import *
import weakref
from os import system, path, getcwd, popen, listdir, mkdir, chown
from pwd import getpwnam
from grp import getgrnam
from time import asctime
from subprocess import check_output, Popen, PIPE, STDOUT, CalledProcessError, call
from wifipumpkin3.core.controls.threads import ProcessHostapd, ProcessThread
from wifipumpkin3.core.wirelessmode.wirelessmode import Mode
from wifipumpkin3.core.common.uimodel import *
from wifipumpkin3.core.utility.printer import display_messages, setcolor
from wifipumpkin3.exceptions.errors.networkException import *
class Docker(Mode):
configRoot = "docker"
subConfig = "docker"
ID = "docker"
Name = "Wireless Docker AP Mode"
def __init__(self, parent=0):
super(Docker, self).__init__(parent)
self.confgSecurity = []
@property
def Settings(self):
return DockerSettings.getInstance()
def getSettings(self):
return self.Settings
def Initialize(self):
self.Settings.Configure()
if not (self.Settings.checkNetworkAP()):
sys.exit(1)
self.check_Wireless_Security()
ignore = ("interface=", "ssid=", "channel=", "essid=")
with open(C.DOCKERHOSTAPDCONF_PATH, "w") as apconf:
for i in self.Settings.SettingsAP["hostapd"]:
apconf.write(i)
apconf.close()
def boot(self):
self.reactor = ProcessHostapd(
{self.getHostapdPath: [C.DOCKERHOSTAPDCONF_PATH]}, "MDSNjD"
)
self.reactor.setObjectName("hostapd_{}".format(self.ID))
self.reactor.statusAP_connected.connect(self.get_Hostapd_Response)
self.reactor.statusAPError.connect(self.get_error_hostapdServices)
def setIptables(self):
self.interfacesLink = Refactor.get_interfaces()
print(display_messages("sharing internet connection with NAT...", info=True))
self.ifaceHostapd = self.conf.get("accesspoint", "interface")
iptables_file = {
"iptables.ipv4.nat": [
"# Generated by iptables-save v1.6.0 on Sun Jun 5 11:18:08 2016"
"*nat"
":PREROUTING ACCEPT [123:11003]"
":INPUT ACCEPT [5:1402]"
":OUTPUT ACCEPT [2:152]"
":POSTROUTING ACCEPT [0:0]"
":DOCKER - [0:0]"
"-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER"
"-A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER"
"-A POSTROUTING -s 172.17.0.0/16 ! -o docker0 -j MASQUERADE"
"-A POSTROUTING -o $inet -j MASQUERADE"
"COMMIT"
"# Completed on Sun Jun 5 11:18:08 2016"
"# Generated by iptables-save v1.6.0 on Sun Jun 5 11:18:08 2016"
"*filter"
":INPUT ACCEPT [320:23582]"
":FORWARD ACCEPT [0:0]"
":OUTPUT ACCEPT [194:28284]"
":DOCKER - [0:0]"
"-A FORWARD -o docker0 -j DOCKER"
"-A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT"
"-A FORWARD -i docker0 ! -o docker0 -j ACCEPT"
"-A FORWARD -i docker0 -o docker0 -j ACCEPT"
"-A FORWARD -i $inet -o $wlan -m state --state RELATED,ESTABLISHED -j ACCEPT"
"-A FORWARD -i $wlan -o $inet -j ACCEPT"
"COMMIT"
"# Completed on Sun Jun 5 11:18:08 2016"
]
}
with open(C.DOCKERIPTABLESPATH, "w") as f:
for line in iptables_file["iptables.ipv4.nat"]:
try:
if "$inet" in line:
line = line.replace(
"$inet", self.interfacesLink["activated"][0]
)
if "$wlan" in line:
line = line.replace("$wlan", self.ifaceHostapd)
f.write("{}\n".format(line))
except Exception:
pass
f.close()
popen("iptables-restore < {}".format(C.DOCKERIPTABLESPATH))
def get_Hostapd_Response(self, data):
if self.conf.get("accesspoint", "status_ap", format=bool):
print(
display_messages(
"{} client has left AP ".format(setcolor(data, color="red")),
info=True,
)
)
def setNetworkManager(self, interface=str, Remove=False):
networkmanager = C.NETWORKMANAGER
config = configparser.RawConfigParser()
MAC = Linux.get_interface_mac(interface)
exclude = {
"MAC": "mac:{}".format(MAC),
"interface": "interface-name:{}".format(interface),
}
if not Remove:
if path.exists(networkmanager):
config.read(networkmanager)
try:
config.add_section("keyfile")
except configparser.DuplicateSectionError:
config.set(
"keyfile",
"unmanaged-devices",
"{}".format(
exclude["interface"] if MAC != None else exclude["MAC"]
),
)
else:
config.set(
"keyfile",
"unmanaged-devices",
"{}".format(
exclude["interface"] if MAC != None else exclude["MAC"]
),
)
finally:
with open(networkmanager, "wb") as configfile:
config.write(configfile)
return True
return False
elif Remove:
if path.exists(networkmanager):
config.read(networkmanager)
try:
config.remove_option("keyfile", "unmanaged-devices")
with open(networkmanager, "wb") as configfile:
config.write(configfile)
return True
except configparser.NoSectionError:
return True
return False
class DockerSettings(CoreSettings):
Name = "Static"
ID = "Static"
Category = "Wireless"
instances = []
@classmethod
def getInstance(cls):
return cls.instances[0]
def __init__(self, parent):
super(DockerSettings, self).__init__(parent)
self.__class__.instances.append(weakref.proxy(self))
self.conf = SettingsINI.getInstance()
self.title = self.__class__.__name__
self.SettingsAP = {}
self.interfaces = Linux.get_interfaces()
self.DHCP = self.getDHCPConfig()
def getDHCPConfig(self):
DHCP = {}
DHCP["leasetimeDef"] = self.conf.get("dhcpdefault", "leasetimeDef")
DHCP["leasetimeMax"] = self.conf.get("dhcpdefault", "leasetimeMax")
DHCP["subnet"] = self.conf.get("dhcpdefault", "subnet")
DHCP["router"] = self.conf.get("dhcpdefault", "router")
DHCP["netmask"] = self.conf.get("dhcpdefault", "netmask")
DHCP["broadcast"] = self.conf.get("dhcpdefault", "broadcast")
DHCP["range"] = self.conf.get("dhcpdefault", "range")
return DHCP
def Configure(self):
self.ifaceHostapd = self.conf.get("accesspoint", "interface")
self.SettingsAP = {
"interface": [
"ifconfig %s up" % (self.ifaceHostapd),
"ifconfig %s %s netmask %s"
% (self.ifaceHostapd, self.DHCP["router"], self.DHCP["netmask"]),
"ifconfig %s mtu 1400" % (self.ifaceHostapd),
"route add -net %s netmask %s gw %s"
% (self.DHCP["subnet"], self.DHCP["netmask"], self.DHCP["router"]),
],
"kill": [
"iptables -w --flush",
"iptables -w --table nat --flush",
"iptables -w --delete-chain",
"iptables -w --table nat --delete-chain",
"killall dhpcd 2>/dev/null",
"ifconfig {} down".format(self.ifaceHostapd),
"ifconfig {} up".format(self.ifaceHostapd),
"ifconfig {} 0".format(self.ifaceHostapd),
],
"hostapd": [
"interface={}\n".format(self.ifaceHostapd),
"ssid={}\n".format(self.conf.get("accesspoint", "ssid")),
"channel={}\n".format(self.conf.get("accesspoint", "channel")),
"bssid={}\n".format(self.conf.get("accesspoint", "bssid")),
],
"dhcp-server": [
"authoritative;\n",
"default-lease-time {};\n".format(self.DHCP["leasetimeDef"]),
"max-lease-time {};\n".format(self.DHCP["leasetimeMax"]),
"subnet %s netmask %s {\n"
% (self.DHCP["subnet"], self.DHCP["netmask"]),
"option routers {};\n".format(self.DHCP["router"]),
"option subnet-mask {};\n".format(self.DHCP["netmask"]),
"option broadcast-address {};\n".format(self.DHCP["broadcast"]),
'option domain-name "%s";\n' % (self.conf.get("accesspoint", "ssid")),
"option domain-name-servers {};\n".format("8.8.8.8"),
"range {};\n".format(self.DHCP["range"].replace("/", " ")),
"}",
],
}
print(display_messages("enable forwarding in iptables...", sucess=True))
Linux.set_ip_forward(1)
for line in self.SettingsAP["kill"]:
exec_bash(line)
for line in self.SettingsAP["interface"]:
exec_bash(line)
if self.conf.get("accesspoint", "dhcp_server", format=bool):
with open(C.DHCPCONF_PATH, "w") as dhcp:
for line in self.SettingsAP["dhcp-server"]:
dhcp.write(line)
dhcp.close()
if not path.isdir("/etc/dhcp/"):
mkdir("/etc/dhcp")
move(C.DHCPCONF_PATH, "/etc/dhcp/")
def checkNetworkAP(self):
self.ifaceHostapd = self.conf.get("accesspoint", "interface")
if self.conf.get("accesspoint", "check_support_ap_mode", format=bool):
if not "AP" in self.get_supported_interface(self.ifaceHostapd)["Supported"]:
raise ApModeSupportError(
"[Error] AP mode", "{} ap mode not found!".format(self.ifaceHostapd)
)
if self.ifaceHostapd == self.interfaces["activated"][0]:
raise InterfaceBuzyError(
"Wireless interface is busy",
"Device {} is busy".format(self.ifaceHostapd),
)
return True
def get_supported_interface(self, dev):
_iface = {"info": {}, "Supported": []}
try:
output = check_output(
["iw", dev, "info"], stderr=STDOUT, universal_newlines=True
)
for line in output.split("\n\t"):
_iface["info"][line.split()[0]] = line.split()[1]
rulesfilter = '| grep "Supported interface modes" -A 10 | grep "*"'
supportMode = popen(
"iw phy{} info {}".format(_iface["info"]["wiphy"], rulesfilter)
).read()
for mode in supportMode.split("\n\t\t"):
_iface["Supported"].append(mode.split("* ")[1])
except CalledProcessError:
return _iface
return _iface
| true | true |
1c2b97e7cb9d89cc18c7b84286ca91c8ae9fc482 | 827 | py | Python | backend/app/core/tests/test_models.py | DBankx/qlip_py | 0e5622c45ce6a817e24583e9f395f9391f7e6361 | [
"MIT"
] | null | null | null | backend/app/core/tests/test_models.py | DBankx/qlip_py | 0e5622c45ce6a817e24583e9f395f9391f7e6361 | [
"MIT"
] | null | null | null | backend/app/core/tests/test_models.py | DBankx/qlip_py | 0e5622c45ce6a817e24583e9f395f9391f7e6361 | [
"MIT"
] | null | null | null | from django.test import TestCase
from rest_framework.test import APIClient
from rest_framework import status
from django.contrib.auth import get_user_model
class TestModels(TestCase):
"""Test database models"""
def setUp(self):
self.test_email = 'test@test.com'
self.test_password = 'Pa$$w0rd'
self.first_name = 'Test'
self.last_name = 'User'
self.username = 'testuser'
def test_create_user_with_email_successful(self):
"""Test that creating user with valid email is successful"""
user = get_user_model().objects.create_user(email=self.test_email, password=self.test_password, first_name=self.first_name, last_name=self.last_name, username=self.username)
self.assertEqual(user.email, self.test_email)
self.assertNotEqual(user.avatar, None);
self.assertTrue(user.check_password(self.test_password)) | 33.08 | 175 | 0.783555 | from django.test import TestCase
from rest_framework.test import APIClient
from rest_framework import status
from django.contrib.auth import get_user_model
class TestModels(TestCase):
def setUp(self):
self.test_email = 'test@test.com'
self.test_password = 'Pa$$w0rd'
self.first_name = 'Test'
self.last_name = 'User'
self.username = 'testuser'
def test_create_user_with_email_successful(self):
user = get_user_model().objects.create_user(email=self.test_email, password=self.test_password, first_name=self.first_name, last_name=self.last_name, username=self.username)
self.assertEqual(user.email, self.test_email)
self.assertNotEqual(user.avatar, None);
self.assertTrue(user.check_password(self.test_password)) | true | true |
1c2b9a0bea0e5ad14b22a7621c2559944af40027 | 1,577 | py | Python | setup.py | jsfehler/pytest-match-skip | 2e907b528c155bd95d90d45e1cdf3d96f0df608d | [
"MIT"
] | 2 | 2018-04-13T05:37:31.000Z | 2019-07-19T21:53:20.000Z | setup.py | jsfehler/pytest-match-skip | 2e907b528c155bd95d90d45e1cdf3d96f0df608d | [
"MIT"
] | 3 | 2017-10-05T11:42:55.000Z | 2019-05-08T15:55:38.000Z | setup.py | jsfehler/pytest-match-skip | 2e907b528c155bd95d90d45e1cdf3d96f0df608d | [
"MIT"
] | 1 | 2017-10-02T15:02:54.000Z | 2017-10-02T15:02:54.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
file_path = os.path.join(os.path.dirname(__file__), fname)
with open(file_path, 'r') as f:
return f.read()
setup(
name='pytest-match-skip',
version='0.2.1',
author='Joshua Fehler',
author_email='jsfehler@gmail.com',
maintainer='Joshua Fehler',
maintainer_email='jsfehler@gmail.com',
license='MIT',
url='https://github.com/jsfehler/pytest-match-skip',
description='Skip matching marks. Matches partial marks using wildcards.',
long_description=read('README.rst'),
packages=['pytest_match_skip'],
install_requires=['pytest>=4.4.1'],
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Pytest',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
],
entry_points={
'pytest11': [
'match-skip = pytest_match_skip.plugin',
],
},
)
| 30.921569 | 78 | 0.607483 |
import os
from setuptools import setup
def read(fname):
file_path = os.path.join(os.path.dirname(__file__), fname)
with open(file_path, 'r') as f:
return f.read()
setup(
name='pytest-match-skip',
version='0.2.1',
author='Joshua Fehler',
author_email='jsfehler@gmail.com',
maintainer='Joshua Fehler',
maintainer_email='jsfehler@gmail.com',
license='MIT',
url='https://github.com/jsfehler/pytest-match-skip',
description='Skip matching marks. Matches partial marks using wildcards.',
long_description=read('README.rst'),
packages=['pytest_match_skip'],
install_requires=['pytest>=4.4.1'],
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Pytest',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
],
entry_points={
'pytest11': [
'match-skip = pytest_match_skip.plugin',
],
},
)
| true | true |
1c2b9b26d4dfe531f0458928aa5bc51aa6683e42 | 3,452 | py | Python | brownie/test/stateful.py | AlanVerbner/brownie | c41311d30d7d0b25d32e83caa4943209969ceee0 | [
"MIT"
] | null | null | null | brownie/test/stateful.py | AlanVerbner/brownie | c41311d30d7d0b25d32e83caa4943209969ceee0 | [
"MIT"
] | null | null | null | brownie/test/stateful.py | AlanVerbner/brownie | c41311d30d7d0b25d32e83caa4943209969ceee0 | [
"MIT"
] | 1 | 2020-08-30T01:18:53.000Z | 2020-08-30T01:18:53.000Z | #!/usr/bin/python3
import sys
from collections import deque
from inspect import getmembers
from types import FunctionType
from typing import Any, Optional
from hypothesis import settings as hp_settings
from hypothesis import stateful as sf
from hypothesis.strategies import SearchStrategy
import brownie
from brownie.utils import color
sf.__tracebackhide__ = True
marker = deque("-/|\\-/|\\")
class _BrownieStateMachine:
_failed = False
def __init__(self) -> None:
brownie.rpc.revert()
sf.RuleBasedStateMachine.__init__(self)
# pytest capturemanager plugin, added when accessed via the state_manager fixture
capman = getattr(self, "_capman", None)
if capman:
with capman.global_and_fixture_disabled():
c = color("red" if self._failed else "yellow")
sys.stdout.write(f"{c}{marker[0]}\033[1D")
sys.stdout.flush()
marker.rotate(1)
if hasattr(self, "setup"):
self.setup() # type: ignore
def execute_step(self, step):
try:
super().execute_step(step)
except Exception:
type(self)._failed = True
raise
def check_invariants(self):
try:
super().check_invariants()
except Exception:
type(self)._failed = True
raise
def _member_filter(member: tuple) -> bool:
attr, fn = member
return (
type(fn) is FunctionType
and not hasattr(sf.RuleBasedStateMachine, attr)
and not next((i for i in fn.__dict__.keys() if i.startswith("hypothesis_stateful")), False)
)
def _attr_filter(attr: str, pattern: str) -> bool:
return attr == pattern or attr.startswith(f"{pattern}_")
def _generate_state_machine(rules_object: type) -> type:
bases = (_BrownieStateMachine, rules_object, sf.RuleBasedStateMachine)
machine = type("BrownieStateMachine", bases, {})
strategies = {k: v for k, v in getmembers(rules_object) if isinstance(v, SearchStrategy)}
for attr, fn in filter(_member_filter, getmembers(machine)):
varnames = [[i] for i in fn.__code__.co_varnames[1 : fn.__code__.co_argcount]]
if fn.__defaults__:
for i in range(-1, -1 - len(fn.__defaults__), -1):
varnames[i].append(fn.__defaults__[i])
if _attr_filter(attr, "initialize"):
wrapped = sf.initialize(**{key[0]: strategies[key[-1]] for key in varnames})
setattr(machine, attr, wrapped(fn))
elif _attr_filter(attr, "invariant"):
setattr(machine, attr, sf.invariant()(fn))
elif _attr_filter(attr, "rule"):
wrapped = sf.rule(**{key[0]: strategies[key[-1]] for key in varnames})
setattr(machine, attr, wrapped(fn))
return machine
def state_machine(
rules_object: type, *args: Any, settings: Optional[dict] = None, **kwargs: Any
) -> None:
machine = _generate_state_machine(rules_object)
if hasattr(rules_object, "__init__"):
# __init__ is treated as a class method
rules_object.__init__(machine, *args, **kwargs) # type: ignore
brownie.rpc.snapshot()
try:
sf.run_state_machine_as_test(lambda: machine(), settings=hp_settings(**settings or {}))
finally:
if hasattr(machine, "teardown_final"):
# teardown_final is also a class method
machine.teardown_final(machine) # type: ignore
| 31.669725 | 99 | 0.641367 |
import sys
from collections import deque
from inspect import getmembers
from types import FunctionType
from typing import Any, Optional
from hypothesis import settings as hp_settings
from hypothesis import stateful as sf
from hypothesis.strategies import SearchStrategy
import brownie
from brownie.utils import color
sf.__tracebackhide__ = True
marker = deque("-/|\\-/|\\")
class _BrownieStateMachine:
_failed = False
def __init__(self) -> None:
brownie.rpc.revert()
sf.RuleBasedStateMachine.__init__(self)
capman = getattr(self, "_capman", None)
if capman:
with capman.global_and_fixture_disabled():
c = color("red" if self._failed else "yellow")
sys.stdout.write(f"{c}{marker[0]}\033[1D")
sys.stdout.flush()
marker.rotate(1)
if hasattr(self, "setup"):
self.setup()
def execute_step(self, step):
try:
super().execute_step(step)
except Exception:
type(self)._failed = True
raise
def check_invariants(self):
try:
super().check_invariants()
except Exception:
type(self)._failed = True
raise
def _member_filter(member: tuple) -> bool:
attr, fn = member
return (
type(fn) is FunctionType
and not hasattr(sf.RuleBasedStateMachine, attr)
and not next((i for i in fn.__dict__.keys() if i.startswith("hypothesis_stateful")), False)
)
def _attr_filter(attr: str, pattern: str) -> bool:
return attr == pattern or attr.startswith(f"{pattern}_")
def _generate_state_machine(rules_object: type) -> type:
bases = (_BrownieStateMachine, rules_object, sf.RuleBasedStateMachine)
machine = type("BrownieStateMachine", bases, {})
strategies = {k: v for k, v in getmembers(rules_object) if isinstance(v, SearchStrategy)}
for attr, fn in filter(_member_filter, getmembers(machine)):
varnames = [[i] for i in fn.__code__.co_varnames[1 : fn.__code__.co_argcount]]
if fn.__defaults__:
for i in range(-1, -1 - len(fn.__defaults__), -1):
varnames[i].append(fn.__defaults__[i])
if _attr_filter(attr, "initialize"):
wrapped = sf.initialize(**{key[0]: strategies[key[-1]] for key in varnames})
setattr(machine, attr, wrapped(fn))
elif _attr_filter(attr, "invariant"):
setattr(machine, attr, sf.invariant()(fn))
elif _attr_filter(attr, "rule"):
wrapped = sf.rule(**{key[0]: strategies[key[-1]] for key in varnames})
setattr(machine, attr, wrapped(fn))
return machine
def state_machine(
rules_object: type, *args: Any, settings: Optional[dict] = None, **kwargs: Any
) -> None:
machine = _generate_state_machine(rules_object)
if hasattr(rules_object, "__init__"):
rules_object.__init__(machine, *args, **kwargs)
brownie.rpc.snapshot()
try:
sf.run_state_machine_as_test(lambda: machine(), settings=hp_settings(**settings or {}))
finally:
if hasattr(machine, "teardown_final"):
machine.teardown_final(machine)
| true | true |
1c2b9dc7b756c2d3c66afbf2db52e2f1e4fe8fa4 | 1,213 | py | Python | src/main/python/fearank/ranking/RandomForestClassifierScore.py | catilgan/featureranking | b37fdba4aa0adf678e3e415e909bbdc54a977b07 | [
"BSD-3-Clause"
] | null | null | null | src/main/python/fearank/ranking/RandomForestClassifierScore.py | catilgan/featureranking | b37fdba4aa0adf678e3e415e909bbdc54a977b07 | [
"BSD-3-Clause"
] | 7 | 2019-07-30T09:22:18.000Z | 2019-07-30T09:42:45.000Z | src/main/python/fearank/ranking/RandomForestClassifierScore.py | catilgan/featureranking | b37fdba4aa0adf678e3e415e909bbdc54a977b07 | [
"BSD-3-Clause"
] | 1 | 2020-04-07T12:54:19.000Z | 2020-04-07T12:54:19.000Z | from sklearn.ensemble import RandomForestClassifier
from fearank.ranking.Ranking import Ranking
class RandomForestClassifierScore(Ranking):
"""Select features according to Mutual Info Regression.
"""
TYPE = 'random_forest_classifier'
@staticmethod
def execute(data, cols):
return Ranking._execute_single(RandomForestClassifierScore._execute_ranking_sorted, data, cols)
@staticmethod
def execute_multiple(data, cols, iterations=2):
return Ranking._execute_multiple(RandomForestClassifierScore._execute_ranking, data, cols, iterations)
@staticmethod
def _execute_ranking(x, y):
model = RandomForestClassifier()
model.fit(x, y)
idx = list(range(len(model.feature_importances_)))
return idx, model.feature_importances_
@staticmethod
def _execute_ranking_sorted(x, y):
model = RandomForestClassifier()
model.fit(x, y)
idx = list(range(len(model.feature_importances_)))
values = sorted(zip(idx, model.feature_importances_), key=lambda xi: xi[1] * -1)
idx_sorted = [x[0] for x in values]
values_sorted = [x[1] for x in values]
return idx_sorted, values_sorted
| 30.325 | 110 | 0.703215 | from sklearn.ensemble import RandomForestClassifier
from fearank.ranking.Ranking import Ranking
class RandomForestClassifierScore(Ranking):
TYPE = 'random_forest_classifier'
@staticmethod
def execute(data, cols):
return Ranking._execute_single(RandomForestClassifierScore._execute_ranking_sorted, data, cols)
@staticmethod
def execute_multiple(data, cols, iterations=2):
return Ranking._execute_multiple(RandomForestClassifierScore._execute_ranking, data, cols, iterations)
@staticmethod
def _execute_ranking(x, y):
model = RandomForestClassifier()
model.fit(x, y)
idx = list(range(len(model.feature_importances_)))
return idx, model.feature_importances_
@staticmethod
def _execute_ranking_sorted(x, y):
model = RandomForestClassifier()
model.fit(x, y)
idx = list(range(len(model.feature_importances_)))
values = sorted(zip(idx, model.feature_importances_), key=lambda xi: xi[1] * -1)
idx_sorted = [x[0] for x in values]
values_sorted = [x[1] for x in values]
return idx_sorted, values_sorted
| true | true |
1c2b9e11330dff349d1ec406c6330edeaf4d9929 | 2,969 | py | Python | scripts/internal/print_announce.py | alxchk/psutil | 550ae6f8119f2d4607d283e9fc224ead24862d1a | [
"BSD-3-Clause"
] | 1 | 2021-08-14T13:48:32.000Z | 2021-08-14T13:48:32.000Z | scripts/internal/print_announce.py | alxchk/psutil | 550ae6f8119f2d4607d283e9fc224ead24862d1a | [
"BSD-3-Clause"
] | 1 | 2018-04-15T22:59:15.000Z | 2018-04-15T22:59:15.000Z | scripts/internal/print_announce.py | alxchk/psutil | 550ae6f8119f2d4607d283e9fc224ead24862d1a | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2009 Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Prints release announce based on HISTORY.rst file content.
"""
import os
import re
from psutil import __version__ as PRJ_VERSION
HERE = os.path.abspath(os.path.dirname(__file__))
HISTORY = os.path.abspath(os.path.join(HERE, '../../HISTORY.rst'))
PRJ_NAME = 'psutil'
PRJ_URL_HOME = 'https://github.com/giampaolo/psutil'
PRJ_URL_DOC = 'http://psutil.readthedocs.io'
PRJ_URL_DOWNLOAD = 'https://pypi.python.org/pypi/psutil'
PRJ_URL_WHATSNEW = \
'https://github.com/giampaolo/psutil/blob/master/HISTORY.rst'
template = """\
Hello all,
I'm glad to announce the release of {prj_name} {prj_version}:
{prj_urlhome}
About
=====
psutil (process and system utilities) is a cross-platform library for \
retrieving information on running processes and system utilization (CPU, \
memory, disks, network) in Python. It is useful mainly for system \
monitoring, profiling and limiting process resources and management of \
running processes. It implements many functionalities offered by command \
line tools such as: ps, top, lsof, netstat, ifconfig, who, df, kill, free, \
nice, ionice, iostat, iotop, uptime, pidof, tty, taskset, pmap. It \
currently supports Linux, Windows, OSX, Sun Solaris, FreeBSD, OpenBSD, NetBSD \
and AIX, both 32-bit and 64-bit architectures, with Python versions from 2.6 \
to 3.6. PyPy is also known to work.
What's new
==========
{changes}
Links
=====
- Home page: {prj_urlhome}
- Download: {prj_urldownload}
- Documentation: {prj_urldoc}
- What's new: {prj_urlwhatsnew}
--
Giampaolo - http://grodola.blogspot.com
"""
def get_changes():
"""Get the most recent changes for this release by parsing
HISTORY.rst file.
"""
with open(HISTORY) as f:
lines = f.readlines()
block = []
# eliminate the part preceding the first block
for i, line in enumerate(lines):
line = lines.pop(0)
if line.startswith('===='):
break
lines.pop(0)
for i, line in enumerate(lines):
line = lines.pop(0)
line = line.rstrip()
if re.match("^- \d+_: ", line):
num, _, rest = line.partition(': ')
num = ''.join([x for x in num if x.isdigit()])
line = "- #%s: %s" % (num, rest)
if line.startswith('===='):
break
block.append(line)
# eliminate bottom empty lines
block.pop(-1)
while not block[-1]:
block.pop(-1)
return "\n".join(block)
def main():
changes = get_changes()
print(template.format(
prj_name=PRJ_NAME,
prj_version=PRJ_VERSION,
prj_urlhome=PRJ_URL_HOME,
prj_urldownload=PRJ_URL_DOWNLOAD,
prj_urldoc=PRJ_URL_DOC,
prj_urlwhatsnew=PRJ_URL_WHATSNEW,
changes=changes,
))
if __name__ == '__main__':
main()
| 25.594828 | 79 | 0.659818 |
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
from psutil import __version__ as PRJ_VERSION
HERE = os.path.abspath(os.path.dirname(__file__))
HISTORY = os.path.abspath(os.path.join(HERE, '../../HISTORY.rst'))
PRJ_NAME = 'psutil'
PRJ_URL_HOME = 'https://github.com/giampaolo/psutil'
PRJ_URL_DOC = 'http://psutil.readthedocs.io'
PRJ_URL_DOWNLOAD = 'https://pypi.python.org/pypi/psutil'
PRJ_URL_WHATSNEW = \
'https://github.com/giampaolo/psutil/blob/master/HISTORY.rst'
template = """\
Hello all,
I'm glad to announce the release of {prj_name} {prj_version}:
{prj_urlhome}
About
=====
psutil (process and system utilities) is a cross-platform library for \
retrieving information on running processes and system utilization (CPU, \
memory, disks, network) in Python. It is useful mainly for system \
monitoring, profiling and limiting process resources and management of \
running processes. It implements many functionalities offered by command \
line tools such as: ps, top, lsof, netstat, ifconfig, who, df, kill, free, \
nice, ionice, iostat, iotop, uptime, pidof, tty, taskset, pmap. It \
currently supports Linux, Windows, OSX, Sun Solaris, FreeBSD, OpenBSD, NetBSD \
and AIX, both 32-bit and 64-bit architectures, with Python versions from 2.6 \
to 3.6. PyPy is also known to work.
What's new
==========
{changes}
Links
=====
- Home page: {prj_urlhome}
- Download: {prj_urldownload}
- Documentation: {prj_urldoc}
- What's new: {prj_urlwhatsnew}
--
Giampaolo - http://grodola.blogspot.com
"""
def get_changes():
with open(HISTORY) as f:
lines = f.readlines()
block = []
for i, line in enumerate(lines):
line = lines.pop(0)
if line.startswith('===='):
break
lines.pop(0)
for i, line in enumerate(lines):
line = lines.pop(0)
line = line.rstrip()
if re.match("^- \d+_: ", line):
num, _, rest = line.partition(': ')
num = ''.join([x for x in num if x.isdigit()])
line = "- #%s: %s" % (num, rest)
if line.startswith('===='):
break
block.append(line)
block.pop(-1)
while not block[-1]:
block.pop(-1)
return "\n".join(block)
def main():
changes = get_changes()
print(template.format(
prj_name=PRJ_NAME,
prj_version=PRJ_VERSION,
prj_urlhome=PRJ_URL_HOME,
prj_urldownload=PRJ_URL_DOWNLOAD,
prj_urldoc=PRJ_URL_DOC,
prj_urlwhatsnew=PRJ_URL_WHATSNEW,
changes=changes,
))
if __name__ == '__main__':
main()
| true | true |
1c2b9e70ddd9e1cd920f14b13cc3dbab1e8517a2 | 1,106 | py | Python | fqn_decorators/asynchronous.py | riyazudheen/py-fqn-decorators | 406582ad7b40592b51c0699ef95fca883dd36c42 | [
"Apache-2.0"
] | null | null | null | fqn_decorators/asynchronous.py | riyazudheen/py-fqn-decorators | 406582ad7b40592b51c0699ef95fca883dd36c42 | [
"Apache-2.0"
] | null | null | null | fqn_decorators/asynchronous.py | riyazudheen/py-fqn-decorators | 406582ad7b40592b51c0699ef95fca883dd36c42 | [
"Apache-2.0"
] | null | null | null | """This module implements an async-aware decorator.
For backwards compatibility with Python 2.x, it needs to remain separate from
the rest of the codebase, since it uses Python 3.5 syntax (``async def``, ``await``).
"""
import sys
from .decorators import Decorator
class AsyncDecorator(Decorator):
# __call__ should be sync to return a decorator class object, not a coroutine
def __call__(self, *args, **kwargs):
if not self.func:
# Decorator initialized without providing the function (parametrised decorator)
return self.__class__(args[0], **self.params)
self.fqn = self.get_fqn()
self.args = args
self.kwargs = kwargs
async def async_wrapper(*args, **kwargs):
self.before()
try:
self.result = await self.func(*self.args, **self.kwargs)
except:
self.exc_info = sys.exc_info()
self.exception()
raise
finally:
self.after()
return self.result
return async_wrapper(*args, **kwargs)
| 32.529412 | 91 | 0.605787 | import sys
from .decorators import Decorator
class AsyncDecorator(Decorator):
def __call__(self, *args, **kwargs):
if not self.func:
return self.__class__(args[0], **self.params)
self.fqn = self.get_fqn()
self.args = args
self.kwargs = kwargs
async def async_wrapper(*args, **kwargs):
self.before()
try:
self.result = await self.func(*self.args, **self.kwargs)
except:
self.exc_info = sys.exc_info()
self.exception()
raise
finally:
self.after()
return self.result
return async_wrapper(*args, **kwargs)
| true | true |
1c2b9f92a91f10c5d75799254c078e9c640eb6d2 | 698 | py | Python | t637.py | showerhhh/leetcode_python | ea26e756dd10befbc22d99c258acd8198b215630 | [
"MIT"
] | null | null | null | t637.py | showerhhh/leetcode_python | ea26e756dd10befbc22d99c258acd8198b215630 | [
"MIT"
] | null | null | null | t637.py | showerhhh/leetcode_python | ea26e756dd10befbc22d99c258acd8198b215630 | [
"MIT"
] | null | null | null | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def averageOfLevels(self, root: TreeNode):
queue = [root]
results = []
while queue:
sum = 0
count = 0
for i in range(len(queue)):
node = queue.pop(0)
sum += node.val
count += 1
if node.left is not None:
queue.append(node.left)
if node.right is not None:
queue.append(node.right)
results.append(sum / count)
return results
| 24.928571 | 46 | 0.477077 |
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def averageOfLevels(self, root: TreeNode):
queue = [root]
results = []
while queue:
sum = 0
count = 0
for i in range(len(queue)):
node = queue.pop(0)
sum += node.val
count += 1
if node.left is not None:
queue.append(node.left)
if node.right is not None:
queue.append(node.right)
results.append(sum / count)
return results
| true | true |
1c2b9f9b114fc6e8b804d732af432f29101e1d7e | 10,668 | py | Python | terminusdb_client/tests/integration_tests/test_schema.py | polyneme/terminusdb-client-python | 720024e33465f830709691507b4fbd5b3597e29f | [
"Apache-2.0"
] | null | null | null | terminusdb_client/tests/integration_tests/test_schema.py | polyneme/terminusdb-client-python | 720024e33465f830709691507b4fbd5b3597e29f | [
"Apache-2.0"
] | null | null | null | terminusdb_client/tests/integration_tests/test_schema.py | polyneme/terminusdb-client-python | 720024e33465f830709691507b4fbd5b3597e29f | [
"Apache-2.0"
] | null | null | null | import datetime as dt
import pytest
from terminusdb_client.errors import DatabaseError
from terminusdb_client.woqlclient.woqlClient import WOQLClient
from terminusdb_client.woqlschema.woql_schema import DocumentTemplate, WOQLSchema
def test_create_schema(docker_url, test_schema):
my_schema = test_schema
client = WOQLClient(docker_url)
client.connect()
client.create_database("test_docapi")
client.insert_document(
my_schema, commit_msg="I am checking in the schema", graph_type="schema"
)
result = client.get_all_documents(graph_type="schema")
for item in result:
if "@id" in item:
assert item["@id"] in [
"Employee",
"Person",
"Address",
"Team",
"Country",
"Coordinate",
"Role",
]
elif "@type" in item:
assert item["@type"] == "@context"
else:
raise AssertionError()
def test_create_schema2(docker_url, test_schema):
my_schema = test_schema
client = WOQLClient(docker_url)
client.connect()
client.create_database("test_docapi2")
my_schema.commit(client, "I am checking in the schema")
result = client.get_all_documents(graph_type="schema")
for item in result:
if "@id" in item:
assert item["@id"] in [
"Employee",
"Person",
"Address",
"Team",
"Country",
"Coordinate",
"Role",
]
elif "@type" in item:
assert item["@type"] == "@context"
else:
raise AssertionError()
def test_insert_cheuk(docker_url, test_schema):
my_schema = test_schema
Country = my_schema.object.get("Country")
Address = my_schema.object.get("Address")
Employee = my_schema.object.get("Employee")
Role = my_schema.object.get("Role")
Team = my_schema.object.get("Team")
uk = Country()
uk.name = "United Kingdom"
uk.perimeter = []
home = Address()
home.street = "123 Abc Street"
home.country = uk
home.postal_code = "A12 345"
cheuk = Employee()
cheuk.permisstion = {Role.Admin, Role.Read}
cheuk.address_of = home
cheuk.contact_number = "07777123456"
cheuk.age = 21
cheuk.name = "Cheuk"
cheuk.managed_by = cheuk
cheuk.friend_of = {cheuk}
cheuk.member_of = Team.IT
client = WOQLClient(docker_url)
client.connect(db="test_docapi")
# client.create_database("test_docapi")
# print(cheuk._obj_to_dict())
with pytest.raises(ValueError) as error:
client.insert_document(home)
assert str(error.value) == "Subdocument cannot be added directly"
with pytest.raises(ValueError) as error:
client.insert_document([cheuk])
assert (
str(error.value)
== f"{uk._capture} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
with pytest.raises(ValueError) as error:
client.insert_document(cheuk)
assert (
str(error.value)
== "There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
assert cheuk._id is None and uk._id is None
client.insert_document([uk, cheuk], commit_msg="Adding cheuk")
assert cheuk._backend_id and cheuk._id
assert uk._backend_id and uk._id
result = client.get_all_documents()
for item in result:
if item.get("@type") == "Country":
assert item["name"] == "United Kingdom"
elif item.get("@type") == "Employee":
assert item["address_of"]["postal_code"] == "A12 345"
assert item["address_of"]["street"] == "123 Abc Street"
assert item["name"] == "Cheuk"
assert item["age"] == 21
assert item["contact_number"] == "07777123456"
assert item["managed_by"] == item["@id"]
else:
raise AssertionError()
def test_getting_and_deleting_cheuk(docker_url):
assert "cheuk" not in globals()
assert "cheuk" not in locals()
client = WOQLClient(docker_url)
client.connect(db="test_docapi")
new_schema = WOQLSchema()
new_schema.from_db(client)
cheuk = new_schema.import_objects(
client.get_documents_by_type("Employee", as_list=True)
)[0]
result = cheuk._obj_to_dict()
assert result["address_of"]["postal_code"] == "A12 345"
assert result["address_of"]["street"] == "123 Abc Street"
assert result["name"] == "Cheuk"
assert result["age"] == 21
assert result["contact_number"] == "07777123456"
assert result.get("@id")
client.delete_document(cheuk)
assert client.get_documents_by_type("Employee", as_list=True) == []
def test_insert_cheuk_again(docker_url, test_schema):
client = WOQLClient(docker_url)
client.connect(db="test_docapi")
new_schema = WOQLSchema()
new_schema.from_db(client)
uk = new_schema.import_objects(client.get_document("Country/United%20Kingdom"))
Address = new_schema.object.get("Address")
Employee = new_schema.object.get("Employee")
Role = new_schema.object.get("Role")
Team = new_schema.object.get("Team")
Coordinate = new_schema.object.get("Coordinate")
home = Address()
home.street = "123 Abc Street"
home.country = uk
home.postal_code = "A12 345"
location = Coordinate(x=0.7, y=51.3)
uk.perimeter = [location]
with pytest.raises(ValueError) as error:
uk.name = "United Kingdom of Great Britain and Northern Ireland"
assert (
str(error.value)
== "name has been used to generated id hance cannot be changed."
)
cheuk = Employee()
cheuk.permisstion = {Role.admin, Role.read}
cheuk.address_of = home
cheuk.contact_number = "07777123456"
cheuk.age = 21
cheuk.name = "Cheuk"
cheuk.managed_by = cheuk
cheuk.friend_of = {cheuk}
cheuk.member_of = Team.information_technology
cheuk._id = "Cheuk is back"
with pytest.raises(ValueError) as error:
client.update_document([uk])
assert (
str(error.value)
== f"{location._capture} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
with pytest.raises(ValueError) as error:
client.insert_document(uk)
assert (
str(error.value)
== "There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
client.update_document([location, uk, cheuk], commit_msg="Adding cheuk again")
assert location._backend_id and location._id
location.x = -0.7
result = client.replace_document([location], commit_msg="Fixing location")
assert len(result) == 1
result = client.get_all_documents()
for item in result:
if item.get("@type") == "Country":
assert item["name"] == "United Kingdom"
assert item["perimeter"]
elif item.get("@type") == "Employee":
assert item["@id"] == "Employee/Cheuk%20is%20back"
assert item["address_of"]["postal_code"] == "A12 345"
assert item["address_of"]["street"] == "123 Abc Street"
assert item["name"] == "Cheuk"
assert item["age"] == 21
assert item["contact_number"] == "07777123456"
assert item["managed_by"] == item["@id"]
elif item.get("@type") == "Coordinate":
assert item["x"] == -0.7
assert item["y"] == 51.3
else:
raise AssertionError()
def test_get_data_version(docker_url):
client = WOQLClient(docker_url)
client.connect(db="test_docapi")
result, version = client.get_all_branches(get_data_version=True)
assert version
result, version = client.get_all_documents(
graph_type="schema", get_data_version=True
)
assert version
result, version = client.get_all_documents(
graph_type="schema", get_data_version=True, as_list=True
)
assert version
result, version = client.get_documents_by_type(
"Class", graph_type="schema", get_data_version=True
)
assert version
result, version = client.get_documents_by_type(
"Class", graph_type="schema", get_data_version=True, as_list=True
)
assert version
result, version = client.get_document(
"Team", graph_type="schema", get_data_version=True
)
assert version
result, version = client.query_document(
{"@type": "Employee", "@id": "Employee/Cheuk%20is%20back"},
get_data_version=True,
as_list=True,
)
assert version
new_schema = WOQLSchema().from_db(client)
cheuk = new_schema.import_objects(result[0])
cheuk.name = "Cheuk Ting Ho"
client.replace_document(cheuk, last_data_version=version)
result, version2 = client.get_document(
"Employee/Cheuk%20is%20back", get_data_version=True
)
assert version != version2
with pytest.raises(DatabaseError) as error:
client.update_document(cheuk, last_data_version=version)
assert (
"Requested data version in header does not match actual data version."
in str(error.value)
)
client.update_document(cheuk, last_data_version=version2)
_, version = client.get_all_documents(get_data_version=True)
Country = new_schema.object.get("Country")
ireland = Country()
ireland.name = "The Republic of Ireland"
ireland.perimeter = []
client.insert_document(ireland, last_data_version=version)
with pytest.raises(DatabaseError) as error:
client.delete_document(ireland, last_data_version=version)
assert (
"Requested data version in header does not match actual data version."
in str(error.value)
)
_, version2 = client.get_all_documents(get_data_version=True)
client.delete_document(ireland, last_data_version=version2)
class CheckDatetime(DocumentTemplate):
datetime: dt.datetime
duration: dt.timedelta
def test_datetime_backend(docker_url):
datetime_obj = dt.datetime(2019, 5, 18, 15, 17, 8, 132263)
delta = dt.timedelta(
days=50,
seconds=27,
microseconds=10,
milliseconds=29000,
minutes=5,
hours=8,
weeks=2,
)
test_obj = CheckDatetime(datetime=datetime_obj, duration=delta)
client = WOQLClient(docker_url)
client.connect()
client.create_database("test_datetime")
client.insert_document(CheckDatetime, graph_type="schema")
client.insert_document(test_obj)
| 34.636364 | 119 | 0.636577 | import datetime as dt
import pytest
from terminusdb_client.errors import DatabaseError
from terminusdb_client.woqlclient.woqlClient import WOQLClient
from terminusdb_client.woqlschema.woql_schema import DocumentTemplate, WOQLSchema
def test_create_schema(docker_url, test_schema):
my_schema = test_schema
client = WOQLClient(docker_url)
client.connect()
client.create_database("test_docapi")
client.insert_document(
my_schema, commit_msg="I am checking in the schema", graph_type="schema"
)
result = client.get_all_documents(graph_type="schema")
for item in result:
if "@id" in item:
assert item["@id"] in [
"Employee",
"Person",
"Address",
"Team",
"Country",
"Coordinate",
"Role",
]
elif "@type" in item:
assert item["@type"] == "@context"
else:
raise AssertionError()
def test_create_schema2(docker_url, test_schema):
my_schema = test_schema
client = WOQLClient(docker_url)
client.connect()
client.create_database("test_docapi2")
my_schema.commit(client, "I am checking in the schema")
result = client.get_all_documents(graph_type="schema")
for item in result:
if "@id" in item:
assert item["@id"] in [
"Employee",
"Person",
"Address",
"Team",
"Country",
"Coordinate",
"Role",
]
elif "@type" in item:
assert item["@type"] == "@context"
else:
raise AssertionError()
def test_insert_cheuk(docker_url, test_schema):
my_schema = test_schema
Country = my_schema.object.get("Country")
Address = my_schema.object.get("Address")
Employee = my_schema.object.get("Employee")
Role = my_schema.object.get("Role")
Team = my_schema.object.get("Team")
uk = Country()
uk.name = "United Kingdom"
uk.perimeter = []
home = Address()
home.street = "123 Abc Street"
home.country = uk
home.postal_code = "A12 345"
cheuk = Employee()
cheuk.permisstion = {Role.Admin, Role.Read}
cheuk.address_of = home
cheuk.contact_number = "07777123456"
cheuk.age = 21
cheuk.name = "Cheuk"
cheuk.managed_by = cheuk
cheuk.friend_of = {cheuk}
cheuk.member_of = Team.IT
client = WOQLClient(docker_url)
client.connect(db="test_docapi")
with pytest.raises(ValueError) as error:
client.insert_document(home)
assert str(error.value) == "Subdocument cannot be added directly"
with pytest.raises(ValueError) as error:
client.insert_document([cheuk])
assert (
str(error.value)
== f"{uk._capture} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
with pytest.raises(ValueError) as error:
client.insert_document(cheuk)
assert (
str(error.value)
== "There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
assert cheuk._id is None and uk._id is None
client.insert_document([uk, cheuk], commit_msg="Adding cheuk")
assert cheuk._backend_id and cheuk._id
assert uk._backend_id and uk._id
result = client.get_all_documents()
for item in result:
if item.get("@type") == "Country":
assert item["name"] == "United Kingdom"
elif item.get("@type") == "Employee":
assert item["address_of"]["postal_code"] == "A12 345"
assert item["address_of"]["street"] == "123 Abc Street"
assert item["name"] == "Cheuk"
assert item["age"] == 21
assert item["contact_number"] == "07777123456"
assert item["managed_by"] == item["@id"]
else:
raise AssertionError()
def test_getting_and_deleting_cheuk(docker_url):
assert "cheuk" not in globals()
assert "cheuk" not in locals()
client = WOQLClient(docker_url)
client.connect(db="test_docapi")
new_schema = WOQLSchema()
new_schema.from_db(client)
cheuk = new_schema.import_objects(
client.get_documents_by_type("Employee", as_list=True)
)[0]
result = cheuk._obj_to_dict()
assert result["address_of"]["postal_code"] == "A12 345"
assert result["address_of"]["street"] == "123 Abc Street"
assert result["name"] == "Cheuk"
assert result["age"] == 21
assert result["contact_number"] == "07777123456"
assert result.get("@id")
client.delete_document(cheuk)
assert client.get_documents_by_type("Employee", as_list=True) == []
def test_insert_cheuk_again(docker_url, test_schema):
client = WOQLClient(docker_url)
client.connect(db="test_docapi")
new_schema = WOQLSchema()
new_schema.from_db(client)
uk = new_schema.import_objects(client.get_document("Country/United%20Kingdom"))
Address = new_schema.object.get("Address")
Employee = new_schema.object.get("Employee")
Role = new_schema.object.get("Role")
Team = new_schema.object.get("Team")
Coordinate = new_schema.object.get("Coordinate")
home = Address()
home.street = "123 Abc Street"
home.country = uk
home.postal_code = "A12 345"
location = Coordinate(x=0.7, y=51.3)
uk.perimeter = [location]
with pytest.raises(ValueError) as error:
uk.name = "United Kingdom of Great Britain and Northern Ireland"
assert (
str(error.value)
== "name has been used to generated id hance cannot be changed."
)
cheuk = Employee()
cheuk.permisstion = {Role.admin, Role.read}
cheuk.address_of = home
cheuk.contact_number = "07777123456"
cheuk.age = 21
cheuk.name = "Cheuk"
cheuk.managed_by = cheuk
cheuk.friend_of = {cheuk}
cheuk.member_of = Team.information_technology
cheuk._id = "Cheuk is back"
with pytest.raises(ValueError) as error:
client.update_document([uk])
assert (
str(error.value)
== f"{location._capture} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
with pytest.raises(ValueError) as error:
client.insert_document(uk)
assert (
str(error.value)
== "There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
client.update_document([location, uk, cheuk], commit_msg="Adding cheuk again")
assert location._backend_id and location._id
location.x = -0.7
result = client.replace_document([location], commit_msg="Fixing location")
assert len(result) == 1
result = client.get_all_documents()
for item in result:
if item.get("@type") == "Country":
assert item["name"] == "United Kingdom"
assert item["perimeter"]
elif item.get("@type") == "Employee":
assert item["@id"] == "Employee/Cheuk%20is%20back"
assert item["address_of"]["postal_code"] == "A12 345"
assert item["address_of"]["street"] == "123 Abc Street"
assert item["name"] == "Cheuk"
assert item["age"] == 21
assert item["contact_number"] == "07777123456"
assert item["managed_by"] == item["@id"]
elif item.get("@type") == "Coordinate":
assert item["x"] == -0.7
assert item["y"] == 51.3
else:
raise AssertionError()
def test_get_data_version(docker_url):
client = WOQLClient(docker_url)
client.connect(db="test_docapi")
result, version = client.get_all_branches(get_data_version=True)
assert version
result, version = client.get_all_documents(
graph_type="schema", get_data_version=True
)
assert version
result, version = client.get_all_documents(
graph_type="schema", get_data_version=True, as_list=True
)
assert version
result, version = client.get_documents_by_type(
"Class", graph_type="schema", get_data_version=True
)
assert version
result, version = client.get_documents_by_type(
"Class", graph_type="schema", get_data_version=True, as_list=True
)
assert version
result, version = client.get_document(
"Team", graph_type="schema", get_data_version=True
)
assert version
result, version = client.query_document(
{"@type": "Employee", "@id": "Employee/Cheuk%20is%20back"},
get_data_version=True,
as_list=True,
)
assert version
new_schema = WOQLSchema().from_db(client)
cheuk = new_schema.import_objects(result[0])
cheuk.name = "Cheuk Ting Ho"
client.replace_document(cheuk, last_data_version=version)
result, version2 = client.get_document(
"Employee/Cheuk%20is%20back", get_data_version=True
)
assert version != version2
with pytest.raises(DatabaseError) as error:
client.update_document(cheuk, last_data_version=version)
assert (
"Requested data version in header does not match actual data version."
in str(error.value)
)
client.update_document(cheuk, last_data_version=version2)
_, version = client.get_all_documents(get_data_version=True)
Country = new_schema.object.get("Country")
ireland = Country()
ireland.name = "The Republic of Ireland"
ireland.perimeter = []
client.insert_document(ireland, last_data_version=version)
with pytest.raises(DatabaseError) as error:
client.delete_document(ireland, last_data_version=version)
assert (
"Requested data version in header does not match actual data version."
in str(error.value)
)
_, version2 = client.get_all_documents(get_data_version=True)
client.delete_document(ireland, last_data_version=version2)
class CheckDatetime(DocumentTemplate):
datetime: dt.datetime
duration: dt.timedelta
def test_datetime_backend(docker_url):
datetime_obj = dt.datetime(2019, 5, 18, 15, 17, 8, 132263)
delta = dt.timedelta(
days=50,
seconds=27,
microseconds=10,
milliseconds=29000,
minutes=5,
hours=8,
weeks=2,
)
test_obj = CheckDatetime(datetime=datetime_obj, duration=delta)
client = WOQLClient(docker_url)
client.connect()
client.create_database("test_datetime")
client.insert_document(CheckDatetime, graph_type="schema")
client.insert_document(test_obj)
| true | true |
1c2ba08b86b59e9429b3258f1e7080d34292710c | 1,253 | py | Python | sixpack/analysis.py | mehrdad-shokri/sixpack | d14a3107fb2facdd18b644c1d8d5d673ca4dab21 | [
"BSD-2-Clause"
] | 779 | 2015-01-04T16:31:04.000Z | 2017-12-12T20:02:36.000Z | sixpack/analysis.py | mehrdad-shokri/sixpack | d14a3107fb2facdd18b644c1d8d5d673ca4dab21 | [
"BSD-2-Clause"
] | 134 | 2015-01-10T15:07:31.000Z | 2017-12-02T18:00:49.000Z | sixpack/analysis.py | mehrdad-shokri/sixpack | d14a3107fb2facdd18b644c1d8d5d673ca4dab21 | [
"BSD-2-Clause"
] | 136 | 2015-01-08T08:47:13.000Z | 2017-12-04T22:26:25.000Z | import cStringIO as StringIO
import csv
class ExportExperiment(object):
def __init__(self, experiment=None):
self.experiment = experiment
def __call__(self):
csvfile = StringIO.StringIO()
writer = csv.writer(csvfile)
writer.writerow(['Alternative Details'])
writer.writerow(['date', 'alternative', 'participants', 'conversions'])
obj = self.experiment.objectify_by_period('day')
for alt in obj['alternatives']:
for datum in alt['data']:
writer.writerow([datum['date'], alt['name'], datum['participants'], datum['conversions']])
writer.writerow([])
writer.writerow(['"{0}" Summary'.format(obj['name'])])
writer.writerow(['total participants', 'total_conversions', 'has_winner', 'description'])
writer.writerow([obj['total_participants'], obj['total_conversions'], obj['has_winner'], obj['description']])
writer.writerow([])
writer.writerow(['Alternative Summary'])
writer.writerow(['name', 'participant_count', 'completed_count'])
for alt in obj['alternatives']:
writer.writerow([alt['name'], alt['participant_count'], alt['completed_count']])
return csvfile.getvalue()
| 37.969697 | 117 | 0.63767 | import cStringIO as StringIO
import csv
class ExportExperiment(object):
def __init__(self, experiment=None):
self.experiment = experiment
def __call__(self):
csvfile = StringIO.StringIO()
writer = csv.writer(csvfile)
writer.writerow(['Alternative Details'])
writer.writerow(['date', 'alternative', 'participants', 'conversions'])
obj = self.experiment.objectify_by_period('day')
for alt in obj['alternatives']:
for datum in alt['data']:
writer.writerow([datum['date'], alt['name'], datum['participants'], datum['conversions']])
writer.writerow([])
writer.writerow(['"{0}" Summary'.format(obj['name'])])
writer.writerow(['total participants', 'total_conversions', 'has_winner', 'description'])
writer.writerow([obj['total_participants'], obj['total_conversions'], obj['has_winner'], obj['description']])
writer.writerow([])
writer.writerow(['Alternative Summary'])
writer.writerow(['name', 'participant_count', 'completed_count'])
for alt in obj['alternatives']:
writer.writerow([alt['name'], alt['participant_count'], alt['completed_count']])
return csvfile.getvalue()
| true | true |
1c2ba0d37cdd89dfd9e64adfe762767c37b5b6f9 | 5,582 | py | Python | src/models/densenet.py | HwangJohn/model_compression | 1df40c8a531313cc9e79255f4477f39d66d9b849 | [
"MIT"
] | 216 | 2020-08-24T04:09:06.000Z | 2022-03-10T01:28:16.000Z | src/models/densenet.py | bopker/model_compression | dd537d306d100ce53cc5f24ef0ff315cccf8c9da | [
"MIT"
] | 17 | 2020-08-24T16:54:59.000Z | 2022-02-15T10:52:47.000Z | src/models/densenet.py | bopker/model_compression | dd537d306d100ce53cc5f24ef0ff315cccf8c9da | [
"MIT"
] | 20 | 2020-08-27T02:45:43.000Z | 2022-03-10T01:27:52.000Z | # -*- coding: utf-8 -*-
"""Fixed DenseNet Model.
All blocks consist of ConvBNReLU for quantization.
- Author: Curt-Park
- Email: jwpark@jmarple.ai
- References:
https://github.com/bearpaw/pytorch-classification
https://github.com/gpleiss/efficient_densenet_pytorch
"""
import math
from typing import Any, Tuple
import torch
import torch.nn as nn
import torch.utils.checkpoint as cp
from src.models.common_layers import ConvBNReLU
class Bottleneck(nn.Module):
"""Bottleneck block for DenseNet."""
def __init__(
self, inplanes: int, expansion: int, growthRate: int, efficient: bool,
) -> None:
"""Initialize."""
super(Bottleneck, self).__init__()
planes = expansion * growthRate
self.conv1 = ConvBNReLU(inplanes, planes, kernel_size=1)
self.conv2 = ConvBNReLU(planes, growthRate, kernel_size=3)
self.efficient = efficient
def _expand(self, *features: torch.Tensor) -> torch.Tensor:
"""Bottleneck foward function."""
concated_features = torch.cat(features, 1)
bottleneck_output = self.conv1(concated_features)
return bottleneck_output
def forward(self, *prev_features: torch.Tensor) -> torch.Tensor:
"""Forward."""
if self.efficient and any(feat.requires_grad for feat in prev_features):
out = cp.checkpoint(self._expand, *prev_features)
else:
out = self._expand(*prev_features)
out = self.conv2(out)
return out
class DenseBlock(nn.Module):
def __init__(
self,
inplanes: int,
blocks: int,
expansion: int,
growth_rate: int,
efficient: bool,
Layer: "type" = Bottleneck,
):
super(DenseBlock, self).__init__()
self.layers = nn.ModuleList()
for i in range(blocks):
layer = Layer(
inplanes=inplanes + i * growth_rate,
expansion=expansion,
growthRate=growth_rate,
efficient=efficient,
)
self.layers.append(layer)
def forward(self, init_features: torch.Tensor) -> torch.Tensor:
features = [init_features]
for layer in self.layers:
new_features = layer(*features)
features.append(new_features)
return torch.cat(features, dim=1)
class Transition(nn.Module):
"""Transition between blocks."""
def __init__(self, inplanes: int, outplanes: int) -> None:
"""Initialize."""
super(Transition, self).__init__()
self.conv = ConvBNReLU(inplanes, outplanes, kernel_size=1)
self.avg_pool = nn.AvgPool2d(2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward."""
out = self.conv(x)
out = self.avg_pool(out)
return out
class DenseNet(nn.Module):
"""DenseNet architecture."""
def __init__(
self,
num_classes: int,
inplanes: int,
expansion: int = 4,
growthRate: int = 12,
compressionRate: int = 2,
block_configs: Tuple[int, ...] = (6, 12, 24, 16),
small_input: bool = True, # e.g. CIFAR100
efficient: bool = False, # memory efficient dense block
Block: "type" = DenseBlock,
) -> None:
"""Initialize."""
super(DenseNet, self).__init__()
self.growthRate = growthRate
self.inplanes = inplanes
self.expansion = expansion
if small_input:
self.stem = ConvBNReLU(3, self.inplanes, kernel_size=3, stride=1)
else:
self.stem = nn.Sequential(
ConvBNReLU(3, self.inplanes, kernel_size=7, stride=2),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=False),
)
layers = []
for i, n_bottleneck in enumerate(block_configs):
dense_block = Block(
self.inplanes, n_bottleneck, expansion, growthRate, efficient
)
layers.append(dense_block)
self.inplanes += n_bottleneck * self.growthRate
# not add transition at the end
if i < len(block_configs) - 1:
layers.append(self._make_transition(compressionRate))
self.dense_blocks = nn.Sequential(*layers)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.flatten = nn.Flatten() # type: ignore
self.fc = nn.Linear(self.inplanes, num_classes)
# Weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_transition(self, compressionRate: int) -> nn.Module:
"""Make a transition."""
inplanes = self.inplanes
outplanes = int(math.floor(self.inplanes // compressionRate))
self.inplanes = outplanes
return Transition(inplanes, outplanes)
def _forward_impl(self, x: torch.Tensor) -> torch.Tensor:
"""Actual forward procedures."""
x = self.stem(x)
x = self.dense_blocks(x)
x = self.avgpool(x)
x = self.flatten(x)
x = self.fc(x)
return x
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward."""
return self._forward_impl(x)
def get_model(**kwargs: Any) -> nn.Module:
"""Constructs a ResNet model. """
return DenseNet(**kwargs)
| 31.715909 | 82 | 0.596919 |
import math
from typing import Any, Tuple
import torch
import torch.nn as nn
import torch.utils.checkpoint as cp
from src.models.common_layers import ConvBNReLU
class Bottleneck(nn.Module):
def __init__(
self, inplanes: int, expansion: int, growthRate: int, efficient: bool,
) -> None:
super(Bottleneck, self).__init__()
planes = expansion * growthRate
self.conv1 = ConvBNReLU(inplanes, planes, kernel_size=1)
self.conv2 = ConvBNReLU(planes, growthRate, kernel_size=3)
self.efficient = efficient
def _expand(self, *features: torch.Tensor) -> torch.Tensor:
concated_features = torch.cat(features, 1)
bottleneck_output = self.conv1(concated_features)
return bottleneck_output
def forward(self, *prev_features: torch.Tensor) -> torch.Tensor:
if self.efficient and any(feat.requires_grad for feat in prev_features):
out = cp.checkpoint(self._expand, *prev_features)
else:
out = self._expand(*prev_features)
out = self.conv2(out)
return out
class DenseBlock(nn.Module):
def __init__(
self,
inplanes: int,
blocks: int,
expansion: int,
growth_rate: int,
efficient: bool,
Layer: "type" = Bottleneck,
):
super(DenseBlock, self).__init__()
self.layers = nn.ModuleList()
for i in range(blocks):
layer = Layer(
inplanes=inplanes + i * growth_rate,
expansion=expansion,
growthRate=growth_rate,
efficient=efficient,
)
self.layers.append(layer)
def forward(self, init_features: torch.Tensor) -> torch.Tensor:
features = [init_features]
for layer in self.layers:
new_features = layer(*features)
features.append(new_features)
return torch.cat(features, dim=1)
class Transition(nn.Module):
def __init__(self, inplanes: int, outplanes: int) -> None:
super(Transition, self).__init__()
self.conv = ConvBNReLU(inplanes, outplanes, kernel_size=1)
self.avg_pool = nn.AvgPool2d(2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = self.conv(x)
out = self.avg_pool(out)
return out
class DenseNet(nn.Module):
def __init__(
self,
num_classes: int,
inplanes: int,
expansion: int = 4,
growthRate: int = 12,
compressionRate: int = 2,
block_configs: Tuple[int, ...] = (6, 12, 24, 16),
small_input: bool = True,
efficient: bool = False,
Block: "type" = DenseBlock,
) -> None:
super(DenseNet, self).__init__()
self.growthRate = growthRate
self.inplanes = inplanes
self.expansion = expansion
if small_input:
self.stem = ConvBNReLU(3, self.inplanes, kernel_size=3, stride=1)
else:
self.stem = nn.Sequential(
ConvBNReLU(3, self.inplanes, kernel_size=7, stride=2),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=False),
)
layers = []
for i, n_bottleneck in enumerate(block_configs):
dense_block = Block(
self.inplanes, n_bottleneck, expansion, growthRate, efficient
)
layers.append(dense_block)
self.inplanes += n_bottleneck * self.growthRate
if i < len(block_configs) - 1:
layers.append(self._make_transition(compressionRate))
self.dense_blocks = nn.Sequential(*layers)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.flatten = nn.Flatten()
self.fc = nn.Linear(self.inplanes, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_transition(self, compressionRate: int) -> nn.Module:
inplanes = self.inplanes
outplanes = int(math.floor(self.inplanes // compressionRate))
self.inplanes = outplanes
return Transition(inplanes, outplanes)
def _forward_impl(self, x: torch.Tensor) -> torch.Tensor:
x = self.stem(x)
x = self.dense_blocks(x)
x = self.avgpool(x)
x = self.flatten(x)
x = self.fc(x)
return x
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self._forward_impl(x)
def get_model(**kwargs: Any) -> nn.Module:
return DenseNet(**kwargs)
| true | true |
1c2ba1405fb1578f973c04f6e8d59a5ab765ab33 | 8,137 | py | Python | liver_disease_detection_machine_learning.py | FahadMostafa91/Liver_disease_detection_by_Machine_learning_methods | fbe80344fc690a088dc7d2b1128c930194ca2abd | [
"MIT"
] | 1 | 2022-01-19T05:04:23.000Z | 2022-01-19T05:04:23.000Z | liver_disease_detection_machine_learning.py | FahadMostafa91/Liver_disease_detection_by_Machine_learning_methods | fbe80344fc690a088dc7d2b1128c930194ca2abd | [
"MIT"
] | null | null | null | liver_disease_detection_machine_learning.py | FahadMostafa91/Liver_disease_detection_by_Machine_learning_methods | fbe80344fc690a088dc7d2b1128c930194ca2abd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""PCA_Liver_disease_article.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1M6PyB8Awmb-osk4ZrxMPuHzKeQQAKI0b
"""
import pandas as pd
import seaborn as sns
sns.set(rc={'figure.figsize':(8,8)})
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import tensorflow as tf
from sklearn.metrics import confusion_matrix, accuracy_score
dataset = pd.read_csv('/mice_dat_pca.csv')
dataset.head(10)
X = dataset.iloc[:, 1:].values
y = dataset.iloc[:, 0].values
"""Plot the histogram of the terget value"""
sns.histplot(y)
"""Generate synthetic data points"""
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state=0)
X_res, y_res = sm.fit_resample(X, y)
sns.histplot(y_res)
scaler_orig = StandardScaler()
X_orig_norm = scaler_orig.fit_transform(X)
pca = PCA(n_components=2)
X_proj = pca.fit_transform(X_orig_norm)
sns.scatterplot(x = X_proj[:, 0], y = X_proj[:, 1], hue = y)
scaler_smote = StandardScaler()
X_res_norm = scaler_smote.fit_transform(X_res)
pca_smote = PCA(n_components=2)
X_sm_proj = pca_smote.fit_transform(X_res_norm)
sns.scatterplot(x = X_sm_proj[:, 0], y = X_sm_proj[:, 1], hue = y_res)
"""Data splitting into test and train """
X_train, X_test, y_train, y_test = train_test_split(X_res, y_res, random_state = 0, test_size = 0.2)
"""Now we normalize X_train and X_test separately to avoid information leakage"""
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
"""SEE tutorial: https://www.datacamp.com/community/tutorials/understanding-logistic-regression-python
Classification using ANN (I reduced the number of neurons to avoid excessive overfitting)
"""
ann = tf.keras.models.Sequential()
ann.add(tf.keras.layers.Dense(units= 6, activation='relu'))
ann.add(tf.keras.layers.Dense(units=1, activation='sigmoid'))
ann.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
ann.fit(X_train, y_train, batch_size = 32, epochs = 30)
y_pred_ANN = np.round(ann.predict(X_test), 0)
print(confusion_matrix(y_test, y_pred_ANN))
print(accuracy_score(y_test, y_pred_ANN))
"""Desicion trees"""
from sklearn.tree import DecisionTreeClassifier
classifier = DecisionTreeClassifier(criterion = 'entropy', random_state = 0)
classifier.fit(X_train, y_train)
y_pred_dt = classifier.predict(X_test)
print(confusion_matrix(y_test, y_pred_dt))
print(accuracy_score(y_test, y_pred_dt))
"""Random Forest"""
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)
classifier.fit(X_train, y_train)
y_pred_rf = classifier.predict(X_test)
print(confusion_matrix(y_test, y_pred_rf))
print(accuracy_score(y_test, y_pred_rf))
import sklearn
sklearn.__version__
classifier.get_params(deep=True)
"""Support Vector Machine """
from sklearn import svm
classifier = svm.SVC(C=10, kernel='rbf', random_state = 0)
classifier.fit(X_train, y_train)
y_pred_svm = classifier.predict(X_test)
print(confusion_matrix(y_test, y_pred_svm))
print(accuracy_score(y_test, y_pred_svm))
"""ROC curve for SVM
"""
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn.metrics import roc_curve, auc
from scipy import interp
from sklearn.metrics import roc_auc_score
"""https://www.datatechnotes.com/2019/11/how-to-create-roc-curve-in-python.html
ROC curve for SVM
"""
# Compute ROC curve and ROC area for each class
y_true = y_test # ground truth labels
y_pred = y_pred_svm # predicted probabilities generated by sklearn classifier
fpr, tpr, thresholds = roc_curve(y_true,y_pred)
roc_auc = roc_auc_score(y_true,y_pred)
print("AUC of ROC Curve:", roc_auc)
plt.plot(fpr, tpr)
plt.title("ROC Curve for SVM (0.9674)")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.show()
plt.plot(fpr, tpr, label='ROC curve(area = %.2f)' %roc_auc)
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Random guess')
plt.title('ROC curve for SVM')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.grid()
plt.legend()
plt.show()
"""ROC for ANN"""
# Compute ROC curve and ROC area for each class/ ANN
y_true = y_test # ground truth labels
y_pred = y_pred_ANN # predicted probabilities generated by sklearn classifier
fpr, tpr, thresholds = roc_curve(y_true,y_pred)
roc_auc = roc_auc_score(y_true,y_pred)
print("AUC of ROC Curve:", roc_auc)
plt.plot(fpr, tpr)
plt.title("ROC Curve for ANN (0.8906)")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.show()
plt.plot(fpr, tpr, label='ROC curve(area = %.2f)' %roc_auc)
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Random guess')
plt.title('ROC curve for ANN')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.grid()
plt.legend()
plt.show()
"""ROC for Random Forest"""
# Compute ROC curve and ROC area for each class/ rf
y_true = y_test # ground truth labels
y_pred = y_pred_rf # predicted probabilities generated by sklearn classifier
fpr, tpr, thresholds = roc_curve(y_true,y_pred)
roc_auc = roc_auc_score(y_true,y_pred)
print("AUC of ROC Curve:", roc_auc)
plt.plot(fpr, tpr)
plt.title("ROC Curve for RF (0.98597)")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.show()
plt.plot(fpr, tpr, label='ROC curve(area = %.2f)' %roc_auc)
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Random guess')
plt.title('ROC curve for RF')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.grid()
plt.legend()
plt.show()
"""Acuuracy, F1 score, Precision"""
from sklearn import metrics
# Model Accuracy: how often is the classifier correct?
print("Accuracy:",metrics.accuracy_score(y_test, y_pred_rf))
# Model Precision: what percentage of positive tuples are labeled as such?
print("Precision:",metrics.precision_score(y_test, y_pred_rf))
# Model Recall: what percentage of positive tuples are labelled as such?
print("Recall:",metrics.recall_score(y_test, y_pred_rf))
""" K-fold cross-validated paired t test : RV vs. SVM
"""
clf1 = RandomForestClassifier(random_state=1)
clf2 = svm.SVC(random_state=1)
score1 = clf1.fit(X_train, y_train).score(X_test, y_test)
score2 = clf2.fit(X_train, y_train).score(X_test, y_test)
print('Random forest accuracy: %.2f%%' % (score1*100))
print('SVM accuracy: %.2f%%' % (score2*100))
from mlxtend.evaluate import paired_ttest_kfold_cv
t, p = paired_ttest_kfold_cv(estimator1=clf1,
estimator2=clf2,
X=X, y=y,
random_seed=1)
print('t statistic: %.3f' % t)
print('p value: %.3f' % p)
""" K-fold cross-validated paired t test : SVM vs. ANN
"""
clf1 = DecisionTreeClassifier(criterion = 'entropy', random_state = 0)
clf2 = svm.SVC(random_state=1)
score1 = clf1.fit(X_train, y_train).score(X_test, y_test)
score2 = clf2.fit(X_train, y_train).score(X_test, y_test)
print('Random forest accuracy: %.2f%%' % (score1*100))
print('SVM accuracy: %.2f%%' % (score2*100))
from mlxtend.evaluate import paired_ttest_kfold_cv
t, p = paired_ttest_kfold_cv(estimator1=clf1,
estimator2=clf2,
X=X, y=y,
random_seed=1)
print('t statistic: %.3f' % t)
print('p value: %.3f' % p)
"""K-fold cross-validated paired t test : RF vs. ANN"""
from mlxtend.evaluate import paired_ttest_kfold_cv
t, p = paired_ttest_kfold_cv(estimator1=clf1,
estimator2=clf2,
X=X, y=y,
random_seed=1)
print('t statistic: %.3f' % t)
print('p value: %.3f' % p)
clf1 = RandomForestClassifier(random_state=1)
clf2 = ann(criterion = 'entropy', random_state = 0)
score1 = clf1.fit(X_train, y_train).score(X_test, y_test)
score2 = clf2.fit(X_train, y_train).score(X_test, y_test)
print('Random forest accuracy: %.2f%%' % (score1*100))
print('SVM accuracy: %.2f%%' % (score2*100)) | 29.805861 | 102 | 0.719798 |
import pandas as pd
import seaborn as sns
sns.set(rc={'figure.figsize':(8,8)})
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import tensorflow as tf
from sklearn.metrics import confusion_matrix, accuracy_score
dataset = pd.read_csv('/mice_dat_pca.csv')
dataset.head(10)
X = dataset.iloc[:, 1:].values
y = dataset.iloc[:, 0].values
sns.histplot(y)
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state=0)
X_res, y_res = sm.fit_resample(X, y)
sns.histplot(y_res)
scaler_orig = StandardScaler()
X_orig_norm = scaler_orig.fit_transform(X)
pca = PCA(n_components=2)
X_proj = pca.fit_transform(X_orig_norm)
sns.scatterplot(x = X_proj[:, 0], y = X_proj[:, 1], hue = y)
scaler_smote = StandardScaler()
X_res_norm = scaler_smote.fit_transform(X_res)
pca_smote = PCA(n_components=2)
X_sm_proj = pca_smote.fit_transform(X_res_norm)
sns.scatterplot(x = X_sm_proj[:, 0], y = X_sm_proj[:, 1], hue = y_res)
X_train, X_test, y_train, y_test = train_test_split(X_res, y_res, random_state = 0, test_size = 0.2)
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
ann = tf.keras.models.Sequential()
ann.add(tf.keras.layers.Dense(units= 6, activation='relu'))
ann.add(tf.keras.layers.Dense(units=1, activation='sigmoid'))
ann.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
ann.fit(X_train, y_train, batch_size = 32, epochs = 30)
y_pred_ANN = np.round(ann.predict(X_test), 0)
print(confusion_matrix(y_test, y_pred_ANN))
print(accuracy_score(y_test, y_pred_ANN))
from sklearn.tree import DecisionTreeClassifier
classifier = DecisionTreeClassifier(criterion = 'entropy', random_state = 0)
classifier.fit(X_train, y_train)
y_pred_dt = classifier.predict(X_test)
print(confusion_matrix(y_test, y_pred_dt))
print(accuracy_score(y_test, y_pred_dt))
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)
classifier.fit(X_train, y_train)
y_pred_rf = classifier.predict(X_test)
print(confusion_matrix(y_test, y_pred_rf))
print(accuracy_score(y_test, y_pred_rf))
import sklearn
sklearn.__version__
classifier.get_params(deep=True)
from sklearn import svm
classifier = svm.SVC(C=10, kernel='rbf', random_state = 0)
classifier.fit(X_train, y_train)
y_pred_svm = classifier.predict(X_test)
print(confusion_matrix(y_test, y_pred_svm))
print(accuracy_score(y_test, y_pred_svm))
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn.metrics import roc_curve, auc
from scipy import interp
from sklearn.metrics import roc_auc_score
y_true = y_test
y_pred = y_pred_svm
fpr, tpr, thresholds = roc_curve(y_true,y_pred)
roc_auc = roc_auc_score(y_true,y_pred)
print("AUC of ROC Curve:", roc_auc)
plt.plot(fpr, tpr)
plt.title("ROC Curve for SVM (0.9674)")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.show()
plt.plot(fpr, tpr, label='ROC curve(area = %.2f)' %roc_auc)
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Random guess')
plt.title('ROC curve for SVM')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.grid()
plt.legend()
plt.show()
y_true = y_test
y_pred = y_pred_ANN
fpr, tpr, thresholds = roc_curve(y_true,y_pred)
roc_auc = roc_auc_score(y_true,y_pred)
print("AUC of ROC Curve:", roc_auc)
plt.plot(fpr, tpr)
plt.title("ROC Curve for ANN (0.8906)")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.show()
plt.plot(fpr, tpr, label='ROC curve(area = %.2f)' %roc_auc)
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Random guess')
plt.title('ROC curve for ANN')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.grid()
plt.legend()
plt.show()
y_true = y_test
y_pred = y_pred_rf
fpr, tpr, thresholds = roc_curve(y_true,y_pred)
roc_auc = roc_auc_score(y_true,y_pred)
print("AUC of ROC Curve:", roc_auc)
plt.plot(fpr, tpr)
plt.title("ROC Curve for RF (0.98597)")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.show()
plt.plot(fpr, tpr, label='ROC curve(area = %.2f)' %roc_auc)
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Random guess')
plt.title('ROC curve for RF')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.grid()
plt.legend()
plt.show()
from sklearn import metrics
print("Accuracy:",metrics.accuracy_score(y_test, y_pred_rf))
print("Precision:",metrics.precision_score(y_test, y_pred_rf))
print("Recall:",metrics.recall_score(y_test, y_pred_rf))
clf1 = RandomForestClassifier(random_state=1)
clf2 = svm.SVC(random_state=1)
score1 = clf1.fit(X_train, y_train).score(X_test, y_test)
score2 = clf2.fit(X_train, y_train).score(X_test, y_test)
print('Random forest accuracy: %.2f%%' % (score1*100))
print('SVM accuracy: %.2f%%' % (score2*100))
from mlxtend.evaluate import paired_ttest_kfold_cv
t, p = paired_ttest_kfold_cv(estimator1=clf1,
estimator2=clf2,
X=X, y=y,
random_seed=1)
print('t statistic: %.3f' % t)
print('p value: %.3f' % p)
clf1 = DecisionTreeClassifier(criterion = 'entropy', random_state = 0)
clf2 = svm.SVC(random_state=1)
score1 = clf1.fit(X_train, y_train).score(X_test, y_test)
score2 = clf2.fit(X_train, y_train).score(X_test, y_test)
print('Random forest accuracy: %.2f%%' % (score1*100))
print('SVM accuracy: %.2f%%' % (score2*100))
from mlxtend.evaluate import paired_ttest_kfold_cv
t, p = paired_ttest_kfold_cv(estimator1=clf1,
estimator2=clf2,
X=X, y=y,
random_seed=1)
print('t statistic: %.3f' % t)
print('p value: %.3f' % p)
from mlxtend.evaluate import paired_ttest_kfold_cv
t, p = paired_ttest_kfold_cv(estimator1=clf1,
estimator2=clf2,
X=X, y=y,
random_seed=1)
print('t statistic: %.3f' % t)
print('p value: %.3f' % p)
clf1 = RandomForestClassifier(random_state=1)
clf2 = ann(criterion = 'entropy', random_state = 0)
score1 = clf1.fit(X_train, y_train).score(X_test, y_test)
score2 = clf2.fit(X_train, y_train).score(X_test, y_test)
print('Random forest accuracy: %.2f%%' % (score1*100))
print('SVM accuracy: %.2f%%' % (score2*100)) | true | true |
1c2ba1b72a92c6e4aee2fff64b518521515c3292 | 499 | py | Python | tests/api/test_status.py | felliott/SHARE | 8fd60ff4749349c9b867f6188650d71f4f0a1a56 | [
"Apache-2.0"
] | 87 | 2015-01-06T18:24:45.000Z | 2021-08-08T07:59:40.000Z | tests/api/test_status.py | fortress-biotech/SHARE | 9c5a05dd831447949fa6253afec5225ff8ab5d4f | [
"Apache-2.0"
] | 442 | 2015-01-01T19:16:01.000Z | 2022-03-30T21:10:26.000Z | tests/api/test_status.py | fortress-biotech/SHARE | 9c5a05dd831447949fa6253afec5225ff8ab5d4f | [
"Apache-2.0"
] | 67 | 2015-03-10T16:32:58.000Z | 2021-11-12T16:33:41.000Z | from django.test import override_settings
class TestAPIStatusView:
@override_settings(VERSION='TESTCASE')
def test_works(self, client):
resp = client.get('/api/v2/status/')
assert resp.status_code == 200
assert resp.json() == {
'data': {
'id': '1',
'type': 'Status',
'attributes': {
'status': 'up',
'version': 'TESTCASE',
}
}
}
| 24.95 | 44 | 0.452906 | from django.test import override_settings
class TestAPIStatusView:
@override_settings(VERSION='TESTCASE')
def test_works(self, client):
resp = client.get('/api/v2/status/')
assert resp.status_code == 200
assert resp.json() == {
'data': {
'id': '1',
'type': 'Status',
'attributes': {
'status': 'up',
'version': 'TESTCASE',
}
}
}
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.