hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ffde90a5d50130a41d1a595028693dbb1f0d9990 | 2,077 | py | Python | evaluation/test_polygon_iou.py | karstenBehrendt/boxy | 2695dbd632a416bbb543d90f9e303f6cb3987cf2 | [
"MIT"
] | 50 | 2019-05-27T08:48:12.000Z | 2021-12-17T12:39:13.000Z | evaluation/test_polygon_iou.py | karstenBehrendt/boxy | 2695dbd632a416bbb543d90f9e303f6cb3987cf2 | [
"MIT"
] | 10 | 2019-08-23T06:41:37.000Z | 2022-01-31T18:43:22.000Z | evaluation/test_polygon_iou.py | karstenBehrendt/boxy | 2695dbd632a416bbb543d90f9e303f6cb3987cf2 | [
"MIT"
] | 10 | 2019-11-29T09:49:41.000Z | 2021-12-19T09:34:14.000Z | #!/usr/bin/env python3
"""
Test polygon intersections
Covers a very limited sample set to check if the iou functions are working
as intended. The tests are too limited to assume correctness. The tests
also only cover quadrilaterals.
"""
# NOTE Ignoring incorrect inputs for first (and likely last) version of tests
# NOTE The tests are in now way covering necessary test cases.
# TODO Negative Values
# TODO Self intersecting polygon
# TODO Different ordering of points
# TODO More challenging shapes
import copy
import unittest
from polygon_iou import polygon_iou as iou
class TestZeroIOU(unittest.TestCase):
def test_zero_iou(self):
poly1 = [(0, 0), (3, 0), (3, 3), (0, 3)]
poly2 = [[coord + 15 for coord in point] for point in poly1]
self.assertEqual(0, iou(poly1, poly2))
class TestFullIOU(unittest.TestCase):
def test_full_iou(self):
poly1 = [(0, 0), (3, 0), (3, 3), (0, 3)]
self.assertEqual(1, iou(poly1, poly1))
class TestInputUnchanged(unittest.TestCase):
def test_input_equality(self):
poly1 = [(1, 3), (3.14, 15), (13, 12), (4, 4)]
original_poly1 = copy.deepcopy(poly1)
poly2 = [[1.33, 3.33], [3.14, 15.12], [13.123, 12.13], [4.3, 4]]
original_poly2 = copy.deepcopy(poly2)
iou(poly1, poly2)
iou(poly2, poly1) # only one may be changed, checking tuple and list
self.assertSequenceEqual(poly1, original_poly1)
self.assertSequenceEqual(poly2, original_poly2)
class TestEasySquares(unittest.TestCase):
def test_easy_square(self):
poly1 = [(0, 0), (3, 0), (3, 3), (0, 3)]
poly2 = [[coord / 4.0 for coord in point] for point in poly1]
self.assertAlmostEqual(0.0625, iou(poly1, poly2))
class TestEasyPhombus(unittest.TestCase):
def test_easy_rhombus(self):
poly1 = [(0, 0), (1, 1), (3, 1), (2, 0)]
poly2 = [(1, 0), (2, 1), (4, 1), (3, 0)]
# Area each 2, intersection 1, union 3
self.assertAlmostEqual(1 / 3.0, iou(poly1, poly2))
if __name__ == '__main__':
unittest.main()
| 31.469697 | 77 | 0.647569 |
9bac08031e5a5b915e87471a59a3ad45c53e6e68 | 8,804 | py | Python | emtask/ced/test/test_cedobject_factory.py | vecin2/python-emtask | b7334cadb7012c17b49bbb470e345cc84b25c5cf | [
"MIT"
] | null | null | null | emtask/ced/test/test_cedobject_factory.py | vecin2/python-emtask | b7334cadb7012c17b49bbb470e345cc84b25c5cf | [
"MIT"
] | null | null | null | emtask/ced/test/test_cedobject_factory.py | vecin2/python-emtask | b7334cadb7012c17b49bbb470e345cc84b25c5cf | [
"MIT"
] | null | null | null | import lxml.etree as ET
from emtask.ced import cedobject_factory as of
class ProcessAssertor(object):
def __init__(self, process):
self.process = process
self.rootelem = process.rootnode
def assert_params(self, expected_fields):
assert_equal_elems(expected_fields, self.process.get_parameters())
def assert_results(self, expected_fields):
assert_equal_elems(expected_fields, self.process.get_results())
def assert_imports(self, expected_imports):
assert_equal_elems(expected_imports, self.process.get_imports())
def assert_process_in_graph(self, process_ref, node_name):
childprocesses = self.process.process_def.findall("ChildProcess")
for childprocess in childprocesses:
process_ref_elem = childprocess.find("ProcessDefinitionReference")
if (
childprocess.get("name") == node_name
and process_ref_elem.get("name") == process_ref
):
assert True
return
assert False, (
"Not found a process node in graph with named "
+ node_name
+ " referencing "
+ process_ref
)
def assert_equal_elems(wrapper_params, process_params):
assert len(wrapper_params) == len(process_params)
for i in range(len(wrapper_params)):
assert_equal_elem(process_params[i], wrapper_params[i])
def assert_equal_elem(expected, actual):
assert ET.tostring(expected) == ET.tostring(actual)
def assert_dataflow(dataflow, fromnode=None, tonode=None, data_entries=None):
assert fromnode == dataflow.find("FromNode").get("name")
assert tonode == dataflow.find("ToNode").get("name")
dataflowentries = dataflow.findall("DataFlowEntry")
assert len(data_entries) == len(dataflowentries)
param_assignment = dataflowentries[0].find("FromField").find("ParameterAssignment")
assert param_assignment is not None
fromfield = data_entries[0][0]
assert fromfield == param_assignment.find("Verbatim").text
field_ref = dataflowentries[0].find("ToField").find("FieldDefinitionReference")
tofield = data_entries[0][1]
assert tofield == field_ref.get("name")
def test_process_wrapper_when_process_has_object_params_imports_object(ced):
process = ced.new_process("PRJContact.Implementation.Contact.Verbs.ViewContact")
imported_process = ced.new_process(
"PRJContact.Implementation.Contact.Processes.InlineView"
)
inlineview_import = of.make_import(imported_process.path)
process.add_import(inlineview_import)
inlineview_field = of.make_object_field("InlineView", "inlineView")
process.add_field(inlineview_field)
process.mark_as_parameter("inlineView")
street_field = of.make_object_field("IContext", "context")
process.add_field(street_field)
process.mark_as_parameter("context")
process.mark_as_result("context")
output_field = of.make_field("Integer", "output")
process.add_field(output_field)
process.mark_as_result("output")
wrapper_path = "PRJContact.Implementation.Contact.Verbs.ViewContactWrapper"
wrapper_process = process.wrapper(wrapper_path)
process.save()
imported_process.save()
wrapper_process.save()
assert ced.open(wrapper_path)
wrapper_assertor = ProcessAssertor(wrapper_process)
# check wrapper has field inlineView as parameter and imports the neccesary
wrapper_assertor.assert_params([inlineview_field, street_field])
wrapper_assertor.assert_results([street_field, output_field])
wrapper_assertor.assert_imports([inlineview_import])
wrapper_assertor.assert_process_in_graph("ViewContact", "viewContact")
transitions = wrapper_process.process_def.findall("Transition")
assert 2 == len(transitions)
assert transitions[0].find("StartNodeReference") is not None
assert "viewContact" == transitions[0].find("ToNode").get("name")
assert transitions[1].find("EndNodeReference") is not None
assert "viewContact" == transitions[1].find("FromNode").get("name")
fieldstores = wrapper_process.process_def.findall("ThisNode")
assert 1 == len(fieldstores)
assert "fieldStore0" == fieldstores[0].get("name")
dataflows = wrapper_process.process_def.findall("DataFlow")
assert 2 == len(dataflows)
assert_dataflow(
dataflows[0],
fromnode="fieldStore0",
tonode="viewContact",
data_entries=[("inlineView", "inlineView"), ("address", "address")],
)
assert_dataflow(
dataflows[1],
fromnode="viewContact",
tonode="fieldStore0",
data_entries=[("context", "context"), ("output", "output")],
)
def test_add_import(ced):
mainprocess = ced.new_process("Test.MainProcess")
childprocess = ced.new_process("Test.Processes.ChildProcess")
mainprocess.add_import(of.make_import(childprocess.path))
import_elem = mainprocess.rootnode.findall("ImportDeclaration")[0]
packagename_elems = import_elem.find("PackageSpecifier").findall("PackageName")
assert "ChildProcess" == import_elem.get("name")
assert "Test" == packagename_elems[0].get("name")
assert "Processes" == packagename_elems[1].get("name")
package_entry_ref = import_elem.find("PackageEntryReference")
assert "ChildProcess" == package_entry_ref.get("name")
def test_add_all_basic_types_fields(ced):
process = ced.new_process("Test.TestProcess")
process.add_field(of.make_field("String", "name1"))
process.add_field(of.make_field("Number", "referenceNo"))
process.add_field(of.make_field("Integer", "streetNumber"))
process.add_field(of.make_field("Float", "partialAmount"))
# defaults to precision 42 and scale 1
process.add_field(of.make_field("Decimal", "totalAmount"))
process.add_field(of.make_field("Character", "oneLetter"))
process.add_field(of.make_field("Date", "dob"))
assert process.get_field("name1") is not None
assert process.get_field("referenceNo") is not None
assert process.get_field("streetNumber") is not None
assert process.get_field("partialAmount") is not None
assert process.get_field("totalAmount") is not None
assert process.get_field("oneLetter") is not None
assert process.get_field("dob") is not None
# todo type Form
def test_add_object_field(ced):
childprocess = ced.new_process("Test.TestChildProcess")
process = ced.new_process("Test.TestMainProcess")
field = of.make_object_field("TestChildProcess", "childProcess")
process.add_field(field)
returned_field = process.get_field("childProcess")
assert_equal_elem(returned_field, field)
def test_add_parameters(ced):
process = ced.new_process("Test.TestProcessParameter")
process.add_field(of.make_field("String", "street"))
process.add_field(of.make_field("Number", "streetNumber"))
process.mark_as_parameter("street")
process.mark_as_parameter("streetNumber")
assert process.get_field("street") is not None
assert "street" == process.get_parameters()[0].get("name")
assert process.get_field("streetNumber") is not None
assert "streetNumber" == process.get_parameters()[1].get("name")
def test_add_result(ced):
process = ced.new_process("Test.TestProcessResult")
process.add_field(of.make_field("String", "street"))
process.add_field(of.make_field("Number", "streetNumber"))
process.mark_as_result("street")
process.mark_as_result("streetNumber")
assert process.get_field("street") is not None
assert "street" == process.get_results()[0].get("name")
assert process.get_field("streetNumber") is not None
assert "streetNumber" == process.get_results()[1].get("name")
def test_add_as_param_and_result(ced):
process = ced.new_process("Test.TestProcessParamAndResult")
process.add_field(of.make_field("String", "street"))
process.mark_as_parameter("street")
process.mark_as_result("street")
assert process.get_field("street") is not None
assert "street" == process.get_parameters()[0].get("name")
assert "street" == process.get_results()[0].get("name")
def test_add_procedure(ced):
process = ced.new_process("Test.TestProcessResult")
process.add_general_procedure("setUp")
process.save()
procedure = process.get_procedure("setUp")
assert procedure is not None
assert "Test.TestProcessResult.setUp" == procedure.path
def test_add_procedure2(ced):
procedure = of.make_procedure(ced.root, "Test.TestBuildProcedure.procedure1")
procedure.add_local_vars(age="Integer")
# process.add_general_procedure(
# "setUp",
# parameter="Integer accountId",
# local_vars="Integer age, TestEmTaskProcess process",
# returns="Integer",
# contents="var i=0",
# )
| 39.128889 | 87 | 0.71706 |
1c367edc638ba223845ce4f661ce8bfd0b5db630 | 76 | wsgi | Python | 1/index.wsgi | catsky/rebang | 7053de769a3a7093f84cb6117a361ed394472ef0 | [
"MIT"
] | 29 | 2015-01-04T09:34:43.000Z | 2019-02-20T20:16:03.000Z | 1/index.wsgi | catsky/rebang | 7053de769a3a7093f84cb6117a361ed394472ef0 | [
"MIT"
] | 1 | 2015-01-19T07:05:54.000Z | 2015-06-02T05:01:38.000Z | 1/index.wsgi | catsky/rebang | 7053de769a3a7093f84cb6117a361ed394472ef0 | [
"MIT"
] | 17 | 2015-01-11T06:31:52.000Z | 2018-09-03T05:55:23.000Z | import sae
from chartnet import app
application = sae.create_wsgi_app(app) | 15.2 | 38 | 0.815789 |
fe9c5677d4c3baa644b2b00423a5999db8916542 | 780 | py | Python | sensor/consumers.py | saurbkumar/dataDisplay | f76b68bb554901e494eb5c39cec54dc9c95bd6ac | [
"Apache-2.0"
] | null | null | null | sensor/consumers.py | saurbkumar/dataDisplay | f76b68bb554901e494eb5c39cec54dc9c95bd6ac | [
"Apache-2.0"
] | null | null | null | sensor/consumers.py | saurbkumar/dataDisplay | f76b68bb554901e494eb5c39cec54dc9c95bd6ac | [
"Apache-2.0"
] | null | null | null | # In consumers.py
# consumer function is mapped from url file , in routing
from channels import Group
# Connected to websocket.connect
def ws_connect(message):
# Accept the connection
message.reply_channel.send({"accept": True})
Group('sensor').add(message.reply_channel)
message.reply_channel.send({ # Reply to individual directly
"text": "You're connected to sensor group :) ",
})
def ws_message(message):
# ASGI WebSocket packet-received and send-packet message types
# both have a "text" key for their textual data.
print("message")
print("Received!!" + message['text'])
# Connected to websocket.disconnect
def ws_disconnect(message):
Group("chat").discard(message.reply_channel) | 35.454545 | 106 | 0.674359 |
af37ddd55b26c0cfbf19acdeb23332eff6448733 | 3,200 | py | Python | packages/fetchai/skills/erc1155_client/behaviours.py | bryanchriswhite/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | 126 | 2019-09-07T09:32:44.000Z | 2022-03-29T14:28:41.000Z | packages/fetchai/skills/erc1155_client/behaviours.py | salman6049/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | 1,814 | 2019-08-24T10:08:07.000Z | 2022-03-31T14:28:36.000Z | packages/fetchai/skills/erc1155_client/behaviours.py | salman6049/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | 46 | 2019-09-03T22:13:58.000Z | 2022-03-22T01:25:16.000Z | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2020 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This package contains the behaviour for the erc-1155 client skill."""
from typing import Any, cast
from aea.skills.behaviours import TickerBehaviour
from packages.fetchai.connections.ledger.base import (
CONNECTION_ID as LEDGER_CONNECTION_PUBLIC_ID,
)
from packages.fetchai.protocols.ledger_api.message import LedgerApiMessage
from packages.fetchai.protocols.oef_search.message import OefSearchMessage
from packages.fetchai.skills.erc1155_client.dialogues import (
LedgerApiDialogues,
OefSearchDialogues,
)
from packages.fetchai.skills.erc1155_client.strategy import Strategy
DEFAULT_SEARCH_INTERVAL = 5.0
LEDGER_API_ADDRESS = str(LEDGER_CONNECTION_PUBLIC_ID)
class SearchBehaviour(TickerBehaviour):
"""This class implements a search behaviour."""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the search behaviour."""
search_interval = cast(
float, kwargs.pop("search_interval", DEFAULT_SEARCH_INTERVAL)
)
super().__init__(tick_interval=search_interval, **kwargs)
def setup(self) -> None:
"""Implement the setup for the behaviour."""
strategy = cast(Strategy, self.context.strategy)
ledger_api_dialogues = cast(
LedgerApiDialogues, self.context.ledger_api_dialogues
)
ledger_api_msg, _ = ledger_api_dialogues.create(
counterparty=LEDGER_API_ADDRESS,
performative=LedgerApiMessage.Performative.GET_BALANCE,
ledger_id=strategy.ledger_id,
address=cast(str, self.context.agent_addresses.get(strategy.ledger_id)),
)
self.context.outbox.put_message(message=ledger_api_msg)
def act(self) -> None:
"""Implement the act."""
strategy = cast(Strategy, self.context.strategy)
if strategy.is_searching:
query = strategy.get_location_and_service_query()
oef_search_dialogues = cast(
OefSearchDialogues, self.context.oef_search_dialogues
)
oef_search_msg, _ = oef_search_dialogues.create(
counterparty=self.context.search_service_address,
performative=OefSearchMessage.Performative.SEARCH_SERVICES,
query=query,
)
self.context.outbox.put_message(message=oef_search_msg)
def teardown(self) -> None:
"""Implement the task teardown."""
| 38.554217 | 84 | 0.667188 |
d6be6a9f3c730183d1031becfda3cffc131086f8 | 2,070 | py | Python | setup.py | swcho/dbt-spark | e58ed5bbf00bd467be6892ce4921d7d5b1e11043 | [
"Apache-2.0"
] | null | null | null | setup.py | swcho/dbt-spark | e58ed5bbf00bd467be6892ce4921d7d5b1e11043 | [
"Apache-2.0"
] | null | null | null | setup.py | swcho/dbt-spark | e58ed5bbf00bd467be6892ce4921d7d5b1e11043 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from setuptools import find_namespace_packages, setup
import os
import re
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README.md')) as f:
long_description = f.read()
package_name = "dbt-spark"
# get this from a separate file
def _dbt_spark_version():
_version_path = os.path.join(
this_directory, 'dbt', 'adapters', 'spark', '__version__.py'
)
_version_pattern = r'''version\s*=\s*["'](.+)["']'''
with open(_version_path) as f:
match = re.search(_version_pattern, f.read().strip())
if match is None:
raise ValueError(f'invalid version at {_version_path}')
return match.group(1)
package_version = _dbt_spark_version()
description = """The SparkSQL plugin for dbt (data build tool)"""
dbt_version = '0.18.1'
# the package version should be the dbt version, with maybe some things on the
# ends of it. (0.18.1 vs 0.18.1a1, 0.18.1.1, ...)
if not package_version.startswith(dbt_version):
raise ValueError(
f'Invalid setup.py: package_version={package_version} must start with '
f'dbt_version={dbt_version}'
)
setup(
name=package_name,
version=package_version,
description=description,
long_description=long_description,
long_description_content_type='text/markdown',
author='Fishtown Analytics',
author_email='info@fishtownanalytics.com',
url='https://github.com/fishtown-analytics/dbt-spark',
packages=find_namespace_packages(include=['dbt', 'dbt.*']),
package_data={
'dbt': [
'include/spark/dbt_project.yml',
'include/spark/sample_profiles.yml',
'include/spark/macros/*.sql',
'include/spark/macros/**/*.sql',
]
},
install_requires=[
f'dbt-core=={dbt_version}',
'sqlparams>=3.0.0',
],
extras_require={
"ODBC": ['pyodbc>=4.0.30'],
"PyHive": [
'PyHive[hive]>=0.6.0,<0.7.0',
'thrift>=0.11.0,<0.12.0',
],
}
)
| 27.972973 | 79 | 0.633816 |
760818987ea0fa9668a98c8482d3c44727811859 | 6,104 | py | Python | runners/image_editing.py | donglinwu6066/SDEdit | a97f0e8f76bb7905858db18c464bdd77d1fcc3d0 | [
"MIT"
] | 330 | 2021-05-13T09:17:13.000Z | 2022-03-28T06:48:50.000Z | runners/image_editing.py | donglinwu6066/SDEdit | a97f0e8f76bb7905858db18c464bdd77d1fcc3d0 | [
"MIT"
] | 9 | 2021-05-26T09:18:14.000Z | 2022-03-14T17:33:40.000Z | runners/image_editing.py | donglinwu6066/SDEdit | a97f0e8f76bb7905858db18c464bdd77d1fcc3d0 | [
"MIT"
] | 33 | 2021-08-03T09:15:48.000Z | 2022-03-18T09:04:49.000Z | import os
import numpy as np
from tqdm import tqdm
import torch
import torchvision.utils as tvu
from models.diffusion import Model
from functions.process_data import *
def get_beta_schedule(*, beta_start, beta_end, num_diffusion_timesteps):
betas = np.linspace(beta_start, beta_end,
num_diffusion_timesteps, dtype=np.float64)
assert betas.shape == (num_diffusion_timesteps,)
return betas
def extract(a, t, x_shape):
"""Extract coefficients from a based on t and reshape to make it
broadcastable with x_shape."""
bs, = t.shape
assert x_shape[0] == bs
out = torch.gather(torch.tensor(a, dtype=torch.float, device=t.device), 0, t.long())
assert out.shape == (bs,)
out = out.reshape((bs,) + (1,) * (len(x_shape) - 1))
return out
def image_editing_denoising_step_flexible_mask(x, t, *,
model,
logvar,
betas):
"""
Sample from p(x_{t-1} | x_t)
"""
alphas = 1.0 - betas
alphas_cumprod = alphas.cumprod(dim=0)
model_output = model(x, t)
weighted_score = betas / torch.sqrt(1 - alphas_cumprod)
mean = extract(1 / torch.sqrt(alphas), t, x.shape) * (x - extract(weighted_score, t, x.shape) * model_output)
logvar = extract(logvar, t, x.shape)
noise = torch.randn_like(x)
mask = 1 - (t == 0).float()
mask = mask.reshape((x.shape[0],) + (1,) * (len(x.shape) - 1))
sample = mean + mask * torch.exp(0.5 * logvar) * noise
sample = sample.float()
return sample
class Diffusion(object):
def __init__(self, args, config, device=None):
self.args = args
self.config = config
if device is None:
device = torch.device(
"cuda") if torch.cuda.is_available() else torch.device("cpu")
self.device = device
self.model_var_type = config.model.var_type
betas = get_beta_schedule(
beta_start=config.diffusion.beta_start,
beta_end=config.diffusion.beta_end,
num_diffusion_timesteps=config.diffusion.num_diffusion_timesteps
)
self.betas = torch.from_numpy(betas).float().to(self.device)
self.num_timesteps = betas.shape[0]
alphas = 1.0 - betas
alphas_cumprod = np.cumprod(alphas, axis=0)
alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1])
posterior_variance = betas * \
(1.0 - alphas_cumprod_prev) / (1.0 - alphas_cumprod)
if self.model_var_type == "fixedlarge":
self.logvar = np.log(np.append(posterior_variance[1], betas[1:]))
elif self.model_var_type == 'fixedsmall':
self.logvar = np.log(np.maximum(posterior_variance, 1e-20))
def image_editing_sample(self):
print("Loading model")
if self.config.data.dataset == "LSUN":
if self.config.data.category == "bedroom":
url = "https://image-editing-test-12345.s3-us-west-2.amazonaws.com/checkpoints/bedroom.ckpt"
elif self.config.data.category == "church_outdoor":
url = "https://image-editing-test-12345.s3-us-west-2.amazonaws.com/checkpoints/church_outdoor.ckpt"
elif self.config.data.dataset == "CelebA_HQ":
url = "https://image-editing-test-12345.s3-us-west-2.amazonaws.com/checkpoints/celeba_hq.ckpt"
else:
raise ValueError
model = Model(self.config)
ckpt = torch.hub.load_state_dict_from_url(url, map_location=self.device)
model.load_state_dict(ckpt)
model.to(self.device)
model = torch.nn.DataParallel(model)
print("Model loaded")
ckpt_id = 0
download_process_data(path="colab_demo")
n = self.config.sampling.batch_size
model.eval()
print("Start sampling")
with torch.no_grad():
name = self.args.npy_name
[mask, img] = torch.load("colab_demo/{}.pth".format(name))
mask = mask.to(self.config.device)
img = img.to(self.config.device)
img = img.unsqueeze(dim=0)
img = img.repeat(n, 1, 1, 1)
x0 = img
tvu.save_image(x0, os.path.join(self.args.image_folder, f'original_input.png'))
x0 = (x0 - 0.5) * 2.
for it in range(self.args.sample_step):
e = torch.randn_like(x0)
total_noise_levels = self.args.t
a = (1 - self.betas).cumprod(dim=0)
x = x0 * a[total_noise_levels - 1].sqrt() + e * (1.0 - a[total_noise_levels - 1]).sqrt()
tvu.save_image((x + 1) * 0.5, os.path.join(self.args.image_folder, f'init_{ckpt_id}.png'))
with tqdm(total=total_noise_levels, desc="Iteration {}".format(it)) as progress_bar:
for i in reversed(range(total_noise_levels)):
t = (torch.ones(n) * i).to(self.device)
x_ = image_editing_denoising_step_flexible_mask(x, t=t, model=model,
logvar=self.logvar,
betas=self.betas)
x = x0 * a[i].sqrt() + e * (1.0 - a[i]).sqrt()
x[:, (mask != 1.)] = x_[:, (mask != 1.)]
# added intermediate step vis
if (i - 99) % 100 == 0:
tvu.save_image((x + 1) * 0.5, os.path.join(self.args.image_folder,
f'noise_t_{i}_{it}.png'))
progress_bar.update(1)
x0[:, (mask != 1.)] = x[:, (mask != 1.)]
torch.save(x, os.path.join(self.args.image_folder,
f'samples_{it}.pth'))
tvu.save_image((x + 1) * 0.5, os.path.join(self.args.image_folder,
f'samples_{it}.png'))
| 42.096552 | 115 | 0.546199 |
e197cf33de116a4d47523b594e5058b73d221fab | 1,808 | py | Python | utils/misc/distribution_utils_test.py | CQUlearningsystemgroup/BitwiseBottlneck | db2b3a19d56740a8e933c609fed45eb667378c37 | [
"MIT"
] | 3 | 2021-09-02T16:38:36.000Z | 2022-03-19T14:00:13.000Z | utils/misc/distribution_utils_test.py | CQUlearningsystemgroup/BitwiseBottlneck | db2b3a19d56740a8e933c609fed45eb667378c37 | [
"MIT"
] | null | null | null | utils/misc/distribution_utils_test.py | CQUlearningsystemgroup/BitwiseBottlneck | db2b3a19d56740a8e933c609fed45eb667378c37 | [
"MIT"
] | null | null | null |
""" Tests for distribution util functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-bad-import-order
from utils.misc import distribution_utils
class GetDistributionStrategyTest(tf.test.TestCase):
"""Tests for get_distribution_strategy."""
def test_one_device_strategy_cpu(self):
ds = distribution_utils.get_distribution_strategy(num_gpus=0)
self.assertEquals(ds.num_replicas_in_sync, 1)
self.assertEquals(len(ds.extended.worker_devices), 1)
self.assertIn('CPU', ds.extended.worker_devices[0])
def test_one_device_strategy_gpu(self):
ds = distribution_utils.get_distribution_strategy(num_gpus=1)
self.assertEquals(ds.num_replicas_in_sync, 1)
self.assertEquals(len(ds.extended.worker_devices), 1)
self.assertIn('GPU', ds.extended.worker_devices[0])
def test_mirrored_strategy(self):
ds = distribution_utils.get_distribution_strategy(num_gpus=5)
self.assertEquals(ds.num_replicas_in_sync, 5)
self.assertEquals(len(ds.extended.worker_devices), 5)
for device in ds.extended.worker_devices:
self.assertIn('GPU', device)
class PerReplicaBatchSizeTest(tf.test.TestCase):
"""Tests for per_replica_batch_size."""
def test_batch_size(self):
self.assertEquals(
distribution_utils.per_replica_batch_size(147, num_gpus=0), 147)
self.assertEquals(
distribution_utils.per_replica_batch_size(147, num_gpus=1), 147)
self.assertEquals(
distribution_utils.per_replica_batch_size(147, num_gpus=7), 21)
def test_batch_size_with_remainder(self):
with self.assertRaises(ValueError):
distribution_utils.per_replica_batch_size(147, num_gpus=5)
if __name__ == "__main__":
tf.test.main()
| 34.113208 | 72 | 0.770465 |
79297f7f2ad8bebcec03c0f656404689a7e5d6b0 | 1,201 | py | Python | irmacl/tests/functionals/test_zipBomb.py | quarkslab/irmacl | 4eaa662957c29ada25c72018197897dd39e9c481 | [
"Apache-2.0"
] | 7 | 2015-11-06T10:36:08.000Z | 2018-06-06T05:26:57.000Z | irmacl/tests/functionals/test_zipBomb.py | quarkslab/irma-cli | 4eaa662957c29ada25c72018197897dd39e9c481 | [
"Apache-2.0"
] | 3 | 2016-09-22T10:31:47.000Z | 2018-03-23T17:21:07.000Z | irmacl/tests/functionals/test_zipBomb.py | quarkslab/irma-cli | 4eaa662957c29ada25c72018197897dd39e9c481 | [
"Apache-2.0"
] | 4 | 2016-01-29T04:15:08.000Z | 2018-03-23T16:19:03.000Z | # -*- coding: utf-8 -*-
import unittest
import os
import requests
from irmacl.helpers import scan_files, \
scan_proberesults, probe_list
cwd = os.path.dirname(__file__)
SAMPLES_DIR = os.path.join(cwd, "samples")
ZIP_SAMPLE = "zipbomb.zip"
SESSION = requests.Session()
class TestZipBomb(unittest.TestCase):
def test_zipbomb(self):
probelist = probe_list(session=SESSION)
probe = u'Unarchive'
if probe not in probelist:
raise unittest.SkipTest("Skipping %s not present" % probe)
force = True
sample = os.path.join(SAMPLES_DIR, ZIP_SAMPLE)
scan = scan_files([sample], force, probe=[probe], blocking=True,
session=SESSION)
self.assertEqual(len(scan.results), 1)
self.assertEqual(scan.probes_finished, 1)
result = scan_proberesults(scan.results[0].result_id, session=SESSION)
self.assertEqual(len(result.probe_results), 1)
probe_result = result.probe_results[0]
self.assertEqual(probe_result.status, -1)
self.assertNotEqual(probe_result.error, None)
self.assertEqual(probe_result.results, None)
if __name__ == "__main__":
unittest.main()
| 32.459459 | 78 | 0.673605 |
610214877a4501f5dbc103d058719b51d3620f21 | 665 | py | Python | load_users.py | EleutherAI/poll_website_demo | 166ae474a9f06cdacf0e368ae9d76244e4babdaa | [
"MIT"
] | null | null | null | load_users.py | EleutherAI/poll_website_demo | 166ae474a9f06cdacf0e368ae9d76244e4babdaa | [
"MIT"
] | null | null | null | load_users.py | EleutherAI/poll_website_demo | 166ae474a9f06cdacf0e368ae9d76244e4babdaa | [
"MIT"
] | null | null | null | import csv
from models import User
from app import db
from generate_password import randomStringwithDigitsAndSymbols
users = User.query.all()
for u in users:
db.session.delete(u)
with open('users.csv') as f:
reader = csv.reader(f)
for row in reader:
newUser = User(email=row[0], name=row[1], roles=row[3])
plaintextPassword = row[2]
if plaintextPassword == "":
plaintextPassword = randomStringwithDigitsAndSymbols()
newUser.password = plaintextPassword
print(newUser.email, newUser.name, plaintextPassword, newUser.password_hash)
db.session.add(newUser)
db.session.commit() | 25.576923 | 84 | 0.681203 |
c1cc5381431e0422710ffb97571d2bd2055881e1 | 11,723 | py | Python | src/ocspdash/models.py | scolby33/OCSPdash | 3eac8841e93ee50f3199ed685fd54eef372e21a3 | [
"MIT"
] | null | null | null | src/ocspdash/models.py | scolby33/OCSPdash | 3eac8841e93ee50f3199ed685fd54eef372e21a3 | [
"MIT"
] | 44 | 2017-08-16T10:28:51.000Z | 2021-04-30T20:46:12.000Z | src/ocspdash/models.py | scolby33/OCSPdash | 3eac8841e93ee50f3199ed685fd54eef372e21a3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""SQLAlchemy models for OCSPdash."""
import operator
import uuid
from base64 import urlsafe_b64decode as b64decode, urlsafe_b64encode as b64encode
from datetime import datetime, timedelta, timezone
from enum import Enum
from typing import Mapping, Optional # noqa: F401 imported for PyCharm type checking
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from oscrypto import asymmetric
from sqlalchemy import (
Boolean,
Column,
DateTime,
ForeignKey,
Integer,
LargeBinary,
String,
Text,
UniqueConstraint,
)
from sqlalchemy.ext.declarative import DeclarativeMeta, declarative_base
from sqlalchemy.orm import backref, relationship
from sqlalchemy.sql import functions as func
import ocspdash.util
from ocspdash.constants import (
NAMESPACE_OCSPDASH_CERTIFICATE_CHAIN_ID,
NAMESPACE_OCSPDASH_KID,
OCSPSCRAPE_PRIVATE_KEY_ALGORITHMS,
)
from ocspdash.custom_columns import UUID
from ocspdash.security import pwd_context
Base: DeclarativeMeta = declarative_base()
class OCSPResponderStatus(Enum):
"""The possible statuses of an OCSP responder."""
good = 'good'
questionable = 'questionable'
bad = 'bad'
unknown = 'unknown'
class Authority(Base):
"""Represents the authority that issues certificates."""
__tablename__ = 'authority'
id = Column(Integer, primary_key=True)
name = Column(
String(255), nullable=False, index=True, doc='the name of the authority'
)
cardinality = Column(
Integer,
doc='The number of certs observed from this authority in the wild. Update this '
'when rankings change. From the Censys crawler.',
)
last_updated = Column(DateTime, server_default=func.now(), onupdate=func.now())
@property
def old(self) -> bool:
"""Return True if the last_updated time is older than 7 days, False otherwise."""
return self.last_updated < datetime.utcnow() - timedelta(days=7)
def __repr__(self):
return self.name
def to_json(self):
"""Return a representation of the instance suitable for passing in to JSON conversion."""
return {
'id': self.id,
'name': self.name,
'cardinality': self.cardinality,
'responders': [
{
'id': responder.id,
'url': responder.url,
'cardinality': responder.cardinality,
'current': responder.current,
}
for responder in self.responders
],
}
class Responder(Base):
"""Represents the unique pair of authority/endpoint."""
__tablename__ = 'responder'
id = Column(Integer, primary_key=True)
authority_id = Column(
Integer, ForeignKey('authority.id'), nullable=False, doc='the authority'
)
authority = relationship('Authority', backref=backref('responders'))
url = Column(Text, nullable=False, doc='the URL of the OCSP endpoint')
cardinality = Column(
Integer,
doc='The number of certs observed using this authority/endpoint pair in the '
'wild. Update this when rankings are updated.',
)
last_updated = Column(DateTime, server_default=func.now(), onupdate=func.now())
__table_args__ = (UniqueConstraint(authority_id, url),)
def __repr__(self):
return f'{self.authority} at {self.url}'
@property
def current(self) -> bool:
"""Calculate if this responder is current by the status of its most recent result over all chains."""
return not all(chain.expired for chain in self.chains)
@property
def most_recent_chain(self) -> 'Optional[Chain]':
"""Get the most recent chain for this Responder."""
try:
return max(self.chains, key=operator.attrgetter('retrieved'))
except ValueError:
return None
@property
def old(self) -> bool:
"""Return True if the last_updated time is older than 7 days, False otherwise."""
return self.last_updated < datetime.utcnow() - timedelta(days=7)
def to_json(self):
"""Return a representation of the instance suitable for passing in to JSON conversion."""
return {
'id': self.id,
'authority': {
'id': self.authority.id,
'name': self.authority.name,
'cardinality': self.authority.cardinality,
},
'url': self.url,
'cardinality': self.cardinality,
'current': self.current,
}
def _certificate_uuid_default(context) -> uuid.UUID:
parameters = context.get_current_parameters()
subject = parameters['subject']
issuer = parameters['issuer']
return ocspdash.util.uuid5(
NAMESPACE_OCSPDASH_CERTIFICATE_CHAIN_ID, subject + issuer
)
class Chain(Base):
"""Represents a certificate and its issuing certificate."""
__tablename__ = 'chain'
id = Column(Integer, primary_key=True)
responder_id = Column(Integer, ForeignKey('responder.id'))
responder = relationship('Responder', backref=backref('chains'))
subject = Column(LargeBinary, nullable=False, doc='raw bytes of the subject certificate')
issuer = Column(
LargeBinary, nullable=False, doc="raw bytes of the subject's issuer certificate"
)
retrieved = Column(
DateTime,
default=datetime.utcnow,
nullable=False,
doc='expire the cached chain when this date is more than 7 days ago',
)
certificate_chain_uuid = Column(
UUID,
nullable=False,
unique=True,
default=_certificate_uuid_default,
onupdate=_certificate_uuid_default,
index=True,
doc='',
)
@property
def expired(self) -> bool:
"""Return True if the subject certificate has expired, False otherwise."""
certificate = asymmetric.load_certificate(self.subject)
expires_on = certificate.asn1['tbs_certificate']['validity']['not_after'].native
return expires_on < datetime.utcnow().replace(tzinfo=timezone.utc)
@property
def old(self) -> bool:
"""Return True if the last_updated time is older than 7 days, False otherwise."""
return self.retrieved < datetime.utcnow() - timedelta(days=7)
def get_manifest_json(self) -> Mapping:
"""Get a mapping suitable for creating a manifest line in the API."""
return {
'responder_url': self.responder.url,
'subject_certificate': b64encode(self.subject).decode('utf-8'),
'issuer_certificate': b64encode(self.issuer).decode('utf-8'),
'certificate_chain_uuid': str(self.certificate_chain_uuid),
}
def __repr__(self):
return f'{self.responder} at {self.retrieved}'
def to_json(self):
"""Return a representation of the instance suitable for passing in to JSON conversion."""
return {
'id': self.id,
'retrieved': str(self.retrieved),
'expired': self.expired,
'old': self.old,
}
class Location(Base):
"""An invite for a new testing location."""
__tablename__ = 'location'
id = Column(Integer, primary_key=True)
name = Column(String(255), doc='the name of the invited location')
selector = Column(LargeBinary(16), nullable=False, unique=True, index=True, doc='')
validator_hash = Column(String(255), nullable=False, doc='')
pubkey = Column(LargeBinary, doc="the location's public signing key")
key_id = Column(UUID, doc="the UUID of the location's public key", index=True)
@property
def accepted(self) -> bool:
"""Check if this location has a public key and key identifier pair."""
return self.pubkey is not None and self.key_id is not None
def verify(self, validator: bytes) -> bool:
"""Verify a validator against the Location's validator_hash.
:param validator: The validator to be verified.
:returns: True if the validator is valid, False otherwise.
"""
return pwd_context.verify(validator, self.validator_hash)
def set_public_key(self, public_key: str):
"""Set the pubkey and key_id for the Location based on an input public key.
:param public_key: The public key for the Location.
"""
pubkey = b64decode(public_key)
loaded_pubkey = serialization.load_pem_public_key(pubkey, default_backend())
if not any(
isinstance(getattr(loaded_pubkey, 'curve', None), algorithm)
for algorithm in OCSPSCRAPE_PRIVATE_KEY_ALGORITHMS
):
raise ValueError('Key type not in accepted algorithms')
self.pubkey = b64decode(public_key)
self.key_id = uuid.uuid5(NAMESPACE_OCSPDASH_KID, public_key)
@property
def b64encoded_pubkey(self) -> str: # noqa: D401
"""A URL-safe Base64 string encoding of the Location's public key.
:returns: The encoded public key.
"""
return b64encode(self.pubkey).decode('utf-8')
def __repr__(self):
if self.accepted:
return f'Location {self.name}'
return f'Invite for {self.name}'
def to_json(self):
"""Return a representation of the instance suitable for passing in to JSON conversion."""
return {
'id': self.id,
'name': self.name,
'selector': str(self.selector),
'validator_hash': self.validator_hash,
'pubkey': str(self.pubkey),
'key_id': str(self.key_id),
'results': [result.id for result in self.results],
}
class Result(Base):
"""The information about the result from a ping."""
__tablename__ = 'result'
id = Column(Integer, primary_key=True)
chain_id = Column(
Integer,
ForeignKey('chain.id'),
doc='the certificate chain that was used for the OCSP test',
)
chain = relationship('Chain', backref=backref('results'))
location_id = Column(
Integer,
ForeignKey('location.id'),
nullable=False,
doc='the location that ran the test',
)
location = relationship('Location', backref=backref('results', lazy='dynamic'))
retrieved = Column(DateTime, default=datetime.utcnow, doc='when the test was run')
ping = Column(Boolean, nullable=False, doc='did the server respond to a ping?')
ocsp = Column(
Boolean, nullable=False, doc='did a valid OCSP request get a good response?'
)
@property
def status(self) -> OCSPResponderStatus: # relates to the glyphicon displayed
"""Get the status of the responder.
Relates to the icon displayed in the web UI.
"""
if not self.ocsp:
return OCSPResponderStatus.bad
if self.ping:
return OCSPResponderStatus.good
return OCSPResponderStatus.questionable
def __repr__(self):
return f'<{self.__class__.__name__}, ping={self.ping}, ocsp={self.ocsp}>'
def to_json(self):
"""Return a representation of the instance suitable for passing in to JSON conversion."""
return {
'id': self.id,
'location': {'id': self.location.id, 'location': self.location.name},
'chain': {
'id': self.chain.id,
'retrieved': str(self.chain.retrieved),
'expired': self.chain.expired,
'old': self.chain.old,
},
'retrieved': str(self.retrieved),
'ping': self.ping,
'ocsp': self.ocsp,
}
| 32.383978 | 109 | 0.635588 |
10d1ecf12d59123ea0c69281f0d7dc52d1607e16 | 555 | py | Python | Data Science is Software/src/features/test_features.py | atalebizadeh/DS-Career-Track | 8bf78ef11041aef94810a392022cd51b94462d9c | [
"MIT"
] | 316 | 2016-07-11T18:19:20.000Z | 2022-03-09T23:23:34.000Z | src/features/test_features.py | waigwamacha/data-science-is-software | 2ade61a883098b3e4f231eced751c974f2759fb4 | [
"MIT"
] | 3 | 2016-07-25T04:52:18.000Z | 2021-04-23T17:48:05.000Z | src/features/test_features.py | waigwamacha/data-science-is-software | 2ade61a883098b3e4f231eced751c974f2759fb4 | [
"MIT"
] | 126 | 2016-07-21T03:06:31.000Z | 2022-02-27T19:22:15.000Z | import os
import pandas as pd
from .build_features import remove_invalid_data
PROJ_ROOT = os.path.join(__file__,
os.pardir,
os.pardir,
os.pardir)
PROJ_ROOT = os.path.abspath(PROJ_ROOT)
def test_remove_invalid_data():
data_path = os.path.join(PROJ_ROOT,
"data",
"raw",
"training_values.csv")
df = remove_invalid_data(data_path)
assert pd.notnull(df.values).all(axis=(0, 1))
| 23.125 | 51 | 0.524324 |
ccd343ce2c53be499037207a52c623dacd71561b | 2,215 | py | Python | siamreppoints/models/model_builder.py | gyzcode/RPT | ea924100ff12aded15cdc9233c1a430e74052ab2 | [
"MIT"
] | null | null | null | siamreppoints/models/model_builder.py | gyzcode/RPT | ea924100ff12aded15cdc9233c1a430e74052ab2 | [
"MIT"
] | null | null | null | siamreppoints/models/model_builder.py | gyzcode/RPT | ea924100ff12aded15cdc9233c1a430e74052ab2 | [
"MIT"
] | null | null | null | # Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn as nn
from siamreppoints.core.config import cfg
from siamreppoints.models.backbone import get_backbone
from siamreppoints.models.head import get_rpn_head
from siamreppoints.models.neck import get_neck
import time
class ModelBuilder(nn.Module):
def __init__(self):
super(ModelBuilder, self).__init__()
# build backbone
self.backbone = get_backbone(cfg.BACKBONE.TYPE,
**cfg.BACKBONE.KWARGS)
# build adjust layer
if cfg.ADJUST.ADJUST:
self.neck = get_neck(cfg.ADJUST.TYPE,
**cfg.ADJUST.KWARGS)
# build rpn head
self.rpn_head = get_rpn_head(cfg.RPN.TYPE,
**cfg.RPN.KWARGS)
# for time cost measure
self.time_cost = 0
self.count = 0
def instance(self, x):
xf = self.backbone(x)
if cfg.ADJUST.ADJUST:
xf = self.neck(xf)
#self.cf = xf[cfg.ADJUST.LAYER-1]
self.cf = torch.cat([xf[2], xf[1]], dim=1)
def template(self, z):
zf = self.backbone(z)
if cfg.ADJUST.ADJUST:
zf = self.neck(zf)
self.zf = zf
def track(self, x, instance_size):
xf = self.backbone(x)
if cfg.ADJUST.ADJUST:
xf = self.neck(xf)
torch.cuda.synchronize()
start = time.time()
cls, pts_preds_init, pts_preds_refine = self.rpn_head(self.zf, xf, instance_size)
torch.cuda.synchronize()
time_cost = time.time() - start
self.time_cost = self.time_cost + time.time() - start
self.count = self.count + 1
cls = cls.permute(0, 2, 3, 1)
cls = cls.reshape(cls.shape[0], -1, 1)
cls = torch.sigmoid(cls)
#self.cf = xf[cfg.ADJUST.LAYER-1]
self.cf = torch.cat([xf[2], xf[1]], dim=1)
return {
'score': cls,
'bbox': pts_preds_refine,
}
| 29.144737 | 89 | 0.576524 |
527af642ae0ce12ed4cc88a260fc5f16cbafd1e5 | 12,383 | py | Python | src/unity/python/turicreate/toolkits/graph_analytics/shortest_path.py | shantanuchhabra/turicreate-c-api | 360f7e4d3622a2766472dfce7d36d2ceadc52788 | [
"BSD-3-Clause"
] | null | null | null | src/unity/python/turicreate/toolkits/graph_analytics/shortest_path.py | shantanuchhabra/turicreate-c-api | 360f7e4d3622a2766472dfce7d36d2ceadc52788 | [
"BSD-3-Clause"
] | null | null | null | src/unity/python/turicreate/toolkits/graph_analytics/shortest_path.py | shantanuchhabra/turicreate-c-api | 360f7e4d3622a2766472dfce7d36d2ceadc52788 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
import turicreate as _tc
from turicreate.data_structures.sgraph import SGraph as _SGraph
import turicreate.toolkits._main as _main
from turicreate.toolkits.graph_analytics._model_base import GraphAnalyticsModel as _ModelBase
from turicreate.cython.cy_server import QuietProgress
import copy as _copy
_HAS_IPYTHON = True
try:
import IPython.core.display as _IPython
except:
_HAS_IPYTHON = False
class ShortestPathModel(_ModelBase):
"""
Model object containing the distance for each vertex in the graph to a
single source vertex, which is specified during
:func:`turicreate.shortest_path.create`.
The model also allows querying for one of the shortest paths from the source
vertex to any other vertex in the graph.
Below is a list of queryable fields for this model:
+----------------+------------------------------------------------------------+
| Field | Description |
+================+============================================================+
| graph | A new SGraph with the distance as a vertex property |
+----------------+------------------------------------------------------------+
| distance | An SFrame with each vertex's distance to the source vertex |
+----------------+------------------------------------------------------------+
| weight_field | The edge field for weight |
+----------------+------------------------------------------------------------+
| source_vid | The source vertex id |
+----------------+------------------------------------------------------------+
| max_distance | Maximum distance between any two vertices |
+----------------+------------------------------------------------------------+
| training_time | Total training time of the model |
+----------------+------------------------------------------------------------+
This model cannot be constructed directly. Instead, use
:func:`turicreate.shortest_path.create` to create an instance
of this model. A detailed list of parameter options and code samples
are available in the documentation for the create function.
See Also
--------
create
"""
def __init__(self, model):
'''__init__(self)'''
self.__proxy__ = model
self._path_query_table = None
def _result_fields(self):
"""
Return results information
Fields should NOT be wrapped by _precomputed_field
"""
ret = super(ShortestPathModel, self)._result_fields()
ret['vertex distance to the source vertex'] = "SFrame. m.distance"
return ret
def _setting_fields(self):
"""
Return model fields related to input setting
Fields SHOULD be wrapped by _precomputed_field, if necessary
"""
ret = super(ShortestPathModel, self)._setting_fields()
ret['source vertex id'] = 'source_vid'
ret['edge weight field id'] = 'weight_field'
ret['maximum distance between vertices'] = 'max_distance'
return ret
def _method_fields(self):
"""
Return model fields related to model methods
Fields should NOT be wrapped by _precomputed_field
"""
return {'get shortest path': 'get_path() e.g. m.get_path(vid=target_vid)'}
def get_path(self, vid, highlight=None):
"""
Get the shortest path.
Return one of the shortest paths between the source vertex defined
in the model and the query vertex.
The source vertex is specified by the original call to shortest path.
Optionally, plots the path with networkx.
Parameters
----------
vid : string
ID of the destination vertex. The source vertex ID is specified
when the shortest path result is first computed.
highlight : list
If the path is plotted, identifies the vertices (by vertex ID) that
should be highlighted by plotting in a different color.
Returns
-------
path : list
List of pairs of (vertex_id, distance) in the path.
Examples
--------
>>> m.get_path(vid=0)
"""
if self._path_query_table is None:
self._path_query_table = self._generate_path_sframe()
source_vid = self.source_vid
path = []
path_query_table = self._path_query_table
if not vid in path_query_table['vid']:
raise ValueError('Destination vertex id ' + str(vid) + ' not found')
record = path_query_table[path_query_table['vid'] == vid][0]
dist = record['distance']
if dist > 1e5:
raise ValueError('The distance to {} is too large to show the path.'.format(vid))
path = [(vid, dist)]
max_iter = len(path_query_table)
num_iter = 0
while record['distance'] != 0 and num_iter < max_iter:
parent_id = record['parent_row_id']
assert parent_id < len(path_query_table)
assert parent_id >= 0
record = path_query_table[parent_id]
path.append((record['vid'], record['distance']))
num_iter += 1
assert record['vid'] == source_vid
assert num_iter < max_iter
path.reverse()
return path
def _generate_path_sframe(self):
"""
Generates an sframe with columns: vid, parent_row_id, and distance.
Used for speed up the path query.
"""
source_vid = self.source_vid
weight_field = self.weight_field
query_table = _copy.copy(self.distance)
query_table = query_table.add_row_number('row_id')
g = self.graph.add_vertices(query_table)
# The sequence id which a vertex is visited, initialized with 0 meaning not visited.
g.vertices['__parent__'] = -1
weight_field = self.weight_field
if (weight_field == ""):
weight_field = '__unit_weight__'
g.edges[weight_field] = 1
# Traverse the graph once and get the parent row id for each vertex
# def traverse_fun(src, edge, dst):
# if src['__id'] == source_vid:
# src['__parent__'] = src['row_id']
# if dst['distance'] == src['distance'] + edge[weight_field]:
# dst['__parent__'] = max(dst['__parent__'], src['row_id'])
# return (src, edge, dst)
#
# the internal lambda appear to have some issues.
traverse_fun = lambda src, edge, dst: \
_tc.extensions._toolkits.graph.sssp.shortest_path_traverse_function(
src, edge, dst, source_vid, weight_field)
g = g.triple_apply(traverse_fun, ['__parent__'])
query_table = query_table.join(g.get_vertices()[['__id', '__parent__']], '__id').sort('row_id')
query_table.rename({'__parent__': 'parent_row_id', '__id': 'vid'}, inplace=True)
return query_table
def _get_version(self):
return 0
@classmethod
def _native_name(cls):
return "shortest_path"
def _get_native_state(self):
return {'model':self.__proxy__}
@classmethod
def _load_version(cls, state, version):
assert(version == 0)
return cls(state['model'])
def create(graph, source_vid, weight_field="", max_distance=1e30, verbose=True):
"""
Compute the single source shortest path distance from the source vertex to
all vertices in the graph. Note that because SGraph is directed, shortest
paths are also directed. To find undirected shortest paths add edges to the
SGraph in both directions. Return a model object with distance each of
vertex in the graph.
Parameters
----------
graph : SGraph
The graph on which to compute shortest paths.
source_vid : vertex ID
ID of the source vertex.
weight_field : string, optional
The edge field representing the edge weights. If empty, uses unit
weights.
verbose : bool, optional
If True, print progress updates.
Returns
-------
out : ShortestPathModel
References
----------
- `Wikipedia - ShortestPath <http://en.wikipedia.org/wiki/Shortest_path_problem>`_
Examples
--------
If given an :class:`~turicreate.SGraph` ``g``, we can create
a :class:`~turicreate.shortest_path.ShortestPathModel` as follows:
>>> g = turicreate.load_graph('http://snap.stanford.edu/data/email-Enron.txt.gz', format='snap')
>>> sp = turicreate.shortest_path.create(g, source_vid=1)
We can obtain the shortest path distance from the source vertex to each
vertex in the graph ``g`` as follows:
>>> sp_sframe = sp['distance'] # SFrame
We can add the new distance field to the original graph g using:
>>> g.vertices['distance_to_1'] = sp['graph'].vertices['distance']
Note that the task above does not require a join because the vertex
ordering is preserved through ``create()``.
To get the actual path from the source vertex to any destination vertex:
>>> path = sp.get_path(vid=10)
We can obtain an auxiliary graph with additional information corresponding
to the shortest path from the source vertex to each vertex in the graph
``g`` as follows:
>>> sp_graph = sp.get.graph # SGraph
See Also
--------
ShortestPathModel
"""
if not isinstance(graph, _SGraph):
raise TypeError('graph input must be a SGraph object.')
opts = {'source_vid': source_vid, 'weight_field': weight_field,
'max_distance': max_distance, 'graph': graph.__proxy__}
with QuietProgress(verbose):
params = _tc.extensions._toolkits.graph.sssp.create(opts)
return ShortestPathModel(params['model'])
def _compute_shortest_path(graph, source_vids, dest_vids, weight_field=""):
"""
Computes shortest paths from any vertex in source_vids to any vertex
in dest_vids. Note that because SGraph is directed, shortest paths are
also directed. To find undirected shortest paths add edges to the SGraph in
both directions. Returns a list of shortest paths between source_vids
and dest_vids.
Note that this function does not compute all shortest paths between every
(source, dest) pair. It computes
Parameters
----------
graph : SGraph
The graph on which to compute shortest paths.
source_vids : vertex ID or list of vertex IDs
ID of the source vertices
dest_vids : vertex ID or list of vertex IDs
ID of the destination vertices
weight_field : str, optional.
The edge field representing the edge weights. If empty, uses unit
weights.
Returns
-------
out : An SArray of lists of all the same length.
Each list describes a path of vertices leading from one source
vertex to one destination vertex.
References
----------
- `Wikipedia - ShortestPath <http://en.wikipedia.org/wiki/Shortest_path_problem>`_
Examples
--------
If given an :class:`~turicreate.SGraph` ``g``, we can create
a :class:`~turicreate.shortest_path.ShortestPathModel` as follows:
>>> edge_src_ids = ['src1', 'src2', 'a', 'b', 'c' ]
>>> edge_dst_ids = [ 'a', 'b', 'dst', 'c', 'dst']
>>> edges = turicreate.SFrame({'__src_id': edge_src_ids, '__dst_id': edge_dst_ids})
>>> g=tc.SGraph().add_edges(edges)
>>> turicreate.shortest_path.compute_shortest_path(g, ["src1","src2"], "dst")
[['a','dst']]
See Also
--------
ShortestPathModel
"""
if type(source_vids) != list:
source_vids = [source_vids]
if type(dest_vids) != list:
dest_vids = [dest_vids]
return _tc.extensions._toolkits.graph.sssp.all_shortest_paths(
graph, source_vids, dest_vids, weight_field)
| 37.07485 | 103 | 0.604135 |
054a32949db6f6618d19e65c08f52f4096da56a5 | 502 | py | Python | lcb/cases.py | arydevy/LABO | 90882436dade8cf6b07a44600f725f7ee7d72c31 | [
"MIT"
] | 1 | 2020-06-04T13:28:32.000Z | 2020-06-04T13:28:32.000Z | lcb/cases.py | arydevy/LABO | 90882436dade8cf6b07a44600f725f7ee7d72c31 | [
"MIT"
] | null | null | null | lcb/cases.py | arydevy/LABO | 90882436dade8cf6b07a44600f725f7ee7d72c31 | [
"MIT"
] | null | null | null |
import random,datetime
from adons import *
from stats import *
from sys import exit
input_output = {"hi": greeting,
'hello': greeting,
"what is your name": myname,
"my name": user,
"about": about,
"where are you from": home,
"how old are you":brithCalc(day,mon),
"what is the date": today,
"what is the time": timeNow,
"bye" : exit()
}
| 22.818182 | 53 | 0.464143 |
1f771aee720cf5f5a4b467302669bb0e4a4c85a1 | 28,972 | py | Python | tasks/__init__.py | brianjgeiger/osf.io | aef962685056ac4f721cf9680fca5ec2dbfed675 | [
"Apache-2.0"
] | null | null | null | tasks/__init__.py | brianjgeiger/osf.io | aef962685056ac4f721cf9680fca5ec2dbfed675 | [
"Apache-2.0"
] | 80 | 2015-02-25T15:12:15.000Z | 2015-06-11T18:44:55.000Z | tasks/__init__.py | brianjgeiger/osf.io | aef962685056ac4f721cf9680fca5ec2dbfed675 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Invoke tasks. To run a task, run ``$ invoke <COMMAND>``. To see a list of
commands, run ``$ invoke --list``.
"""
import os
import sys
import json
import platform
import subprocess
import logging
import sqlite3
import invoke
from invoke import Collection
from website import settings
from .utils import pip_install, bin_prefix
try:
from tasks import local # noqa
except ImportError as error:
print('No tasks/local.py file found. '
'Did you remember to copy local-dist.py to local.py?')
logging.getLogger('invoke').setLevel(logging.CRITICAL)
# gets the root path for all the scripts that rely on it
HERE = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
WHEELHOUSE_PATH = os.environ.get('WHEELHOUSE')
CONSTRAINTS_PATH = os.path.join(HERE, 'requirements', 'constraints.txt')
NO_TESTS_COLLECTED = 5
ns = Collection()
try:
from tasks import local as local_tasks
ns.add_collection(Collection.from_module(local_tasks), name='local')
except ImportError:
pass
try:
from admin import tasks as admin_tasks
ns.add_collection(Collection.from_module(admin_tasks), name='admin')
except ImportError:
pass
def task(*args, **kwargs):
"""Behaves the same way as invoke.task. Adds the task
to the root namespace.
"""
if len(args) == 1 and callable(args[0]):
new_task = invoke.task(args[0])
ns.add_task(new_task)
return new_task
def decorator(f):
new_task = invoke.task(f, *args, **kwargs)
ns.add_task(new_task)
return new_task
return decorator
@task
def server(ctx, host=None, port=5000, debug=True, gitlogs=False):
"""Run the app server."""
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true' or not debug:
if os.environ.get('WEB_REMOTE_DEBUG', None):
import pydevd
# e.g. '127.0.0.1:5678'
remote_parts = os.environ.get('WEB_REMOTE_DEBUG').split(':')
pydevd.settrace(remote_parts[0], port=int(remote_parts[1]), suspend=False, stdoutToServer=True, stderrToServer=True)
if gitlogs:
git_logs(ctx)
from website.app import init_app
os.environ['DJANGO_SETTINGS_MODULE'] = 'api.base.settings'
app = init_app(set_backends=True, routes=True)
settings.API_SERVER_PORT = port
else:
from framework.flask import app
context = None
if settings.SECURE_MODE:
context = (settings.OSF_SERVER_CERT, settings.OSF_SERVER_KEY)
app.run(host=host, port=port, debug=debug, threaded=debug, extra_files=[settings.ASSET_HASH_PATH], ssl_context=context)
@task
def git_logs(ctx, branch=None):
from scripts.meta import gatherer
gatherer.main(branch=branch)
@task
def apiserver(ctx, port=8000, wait=True, autoreload=True, host='127.0.0.1', pty=True):
"""Run the API server."""
env = os.environ.copy()
cmd = 'DJANGO_SETTINGS_MODULE=api.base.settings {} manage.py runserver {}:{} --nothreading'\
.format(sys.executable, host, port)
if not autoreload:
cmd += ' --noreload'
if settings.SECURE_MODE:
cmd = cmd.replace('runserver', 'runsslserver')
cmd += ' --certificate {} --key {}'.format(settings.OSF_SERVER_CERT, settings.OSF_SERVER_KEY)
if wait:
return ctx.run(cmd, echo=True, pty=pty)
from subprocess import Popen
return Popen(cmd, shell=True, env=env)
@task
def adminserver(ctx, port=8001, host='127.0.0.1', pty=True):
"""Run the Admin server."""
env = 'DJANGO_SETTINGS_MODULE="admin.base.settings"'
cmd = '{} python manage.py runserver {}:{} --nothreading'.format(env, host, port)
if settings.SECURE_MODE:
cmd = cmd.replace('runserver', 'runsslserver')
cmd += ' --certificate {} --key {}'.format(settings.OSF_SERVER_CERT, settings.OSF_SERVER_KEY)
ctx.run(cmd, echo=True, pty=pty)
@task
def shell(ctx, transaction=True, print_sql=False, notebook=False):
cmd = 'DJANGO_SETTINGS_MODULE="api.base.settings" python manage.py osf_shell'
if print_sql:
cmd += ' --print-sql'
if notebook:
cmd += ' --notebook'
if not transaction:
cmd += ' --no-transaction'
return ctx.run(cmd, pty=True, echo=True)
@task
def sharejs(ctx, host=None, port=None, db_url=None, cors_allow_origin=None):
"""Start a local ShareJS server."""
if host:
os.environ['SHAREJS_SERVER_HOST'] = host
if port:
os.environ['SHAREJS_SERVER_PORT'] = port
if db_url:
os.environ['SHAREJS_DB_URL'] = db_url
if cors_allow_origin:
os.environ['SHAREJS_CORS_ALLOW_ORIGIN'] = cors_allow_origin
if settings.SENTRY_DSN:
os.environ['SHAREJS_SENTRY_DSN'] = settings.SENTRY_DSN
share_server = os.path.join(settings.ADDON_PATH, 'wiki', 'shareServer.js')
ctx.run('node {0}'.format(share_server))
@task(aliases=['celery'])
def celery_worker(ctx, level='debug', hostname=None, beat=False, queues=None, concurrency=None, max_tasks_per_child=None):
"""Run the Celery process."""
os.environ['DJANGO_SETTINGS_MODULE'] = 'api.base.settings'
cmd = 'celery worker -A framework.celery_tasks -Ofair -l {0}'.format(level)
if hostname:
cmd = cmd + ' --hostname={}'.format(hostname)
# beat sets up a cron like scheduler, refer to website/settings
if beat:
cmd = cmd + ' --beat'
if queues:
cmd = cmd + ' --queues={}'.format(queues)
if concurrency:
cmd = cmd + ' --concurrency={}'.format(concurrency)
if max_tasks_per_child:
cmd = cmd + ' --maxtasksperchild={}'.format(max_tasks_per_child)
ctx.run(bin_prefix(cmd), pty=True)
@task(aliases=['beat'])
def celery_beat(ctx, level='debug', schedule=None):
"""Run the Celery process."""
os.environ['DJANGO_SETTINGS_MODULE'] = 'api.base.settings'
# beat sets up a cron like scheduler, refer to website/settings
cmd = 'celery beat -A framework.celery_tasks -l {0} --pidfile='.format(level)
if schedule:
cmd = cmd + ' --schedule={}'.format(schedule)
ctx.run(bin_prefix(cmd), pty=True)
@task
def migrate_search(ctx, delete=True, remove=False, index=settings.ELASTIC_INDEX):
"""Migrate the search-enabled models."""
from website.app import init_app
init_app(routes=False, set_backends=False)
from website.search_migration.migrate import migrate
# NOTE: Silence the warning:
# "InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly advised."
SILENT_LOGGERS = ['py.warnings']
for logger in SILENT_LOGGERS:
logging.getLogger(logger).setLevel(logging.ERROR)
migrate(delete, remove=remove, index=index)
@task
def rebuild_search(ctx):
"""Delete and recreate the index for elasticsearch"""
from website.app import init_app
import requests
from website import settings
init_app(routes=False, set_backends=True)
if not settings.ELASTIC_URI.startswith('http'):
protocol = 'http://' if settings.DEBUG_MODE else 'https://'
else:
protocol = ''
url = '{protocol}{uri}/{index}'.format(
protocol=protocol,
uri=settings.ELASTIC_URI.rstrip('/'),
index=settings.ELASTIC_INDEX,
)
print('Deleting index {}'.format(settings.ELASTIC_INDEX))
print('----- DELETE {}*'.format(url))
requests.delete(url + '*')
print('Creating index {}'.format(settings.ELASTIC_INDEX))
print('----- PUT {}'.format(url))
requests.put(url)
migrate_search(ctx, delete=False)
@task
def mailserver(ctx, port=1025):
"""Run a SMTP test server."""
cmd = 'python -m smtpd -n -c DebuggingServer localhost:{port}'.format(port=port)
ctx.run(bin_prefix(cmd), pty=True)
@task
def syntax(ctx):
"""Use pre-commit to run formatters and linters."""
ctx.run('pre-commit run --all-files --show-diff-on-failure', echo=True)
@task(aliases=['req'])
def requirements(ctx, base=False, addons=False, release=False, dev=False, all=False):
"""Install python dependencies.
Examples:
inv requirements
inv requirements --all
You should use --all for updating your developement environment.
--all will install (in order): addons, dev and the base requirements.
By default, base requirements will run. However, if any set of addons, release, or dev are chosen, base
will have to be mentioned explicitly in order to run. This is to remain compatible with previous usages. Release
requirements will prevent dev, and base from running.
"""
if all:
base = True
addons = True
dev = True
if not(addons or dev):
base = True
if release or addons:
addon_requirements(ctx)
# "release" takes precedence
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
ctx.run(
pip_install(req_file, constraints_file=CONSTRAINTS_PATH),
echo=True
)
else:
if dev: # then dev requirements
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
ctx.run(
pip_install(req_file, constraints_file=CONSTRAINTS_PATH),
echo=True
)
if base: # then base requirements
req_file = os.path.join(HERE, 'requirements.txt')
ctx.run(
pip_install(req_file, constraints_file=CONSTRAINTS_PATH),
echo=True
)
# fix URITemplate name conflict h/t @github
ctx.run('pip uninstall uritemplate.py --yes || true')
ctx.run('pip install --no-cache-dir uritemplate.py==0.3.0')
@task
def test_module(ctx, module=None, numprocesses=None, nocapture=False, params=None, coverage=False, testmon=False):
"""Helper for running tests.
"""
os.environ['DJANGO_SETTINGS_MODULE'] = 'osf_tests.settings'
import pytest
if not numprocesses:
from multiprocessing import cpu_count
numprocesses = cpu_count()
numprocesses = int(numprocesses)
# NOTE: Subprocess to compensate for lack of thread safety in the httpretty module.
# https://github.com/gabrielfalcao/HTTPretty/issues/209#issue-54090252
args = []
if coverage:
args.extend([
'--cov-report', 'term-missing',
'--cov', 'admin',
'--cov', 'addons',
'--cov', 'api',
'--cov', 'framework',
'--cov', 'osf',
'--cov', 'website',
])
if not nocapture:
args += ['-s']
if numprocesses > 1:
args += ['-n {}'.format(numprocesses), '--max-slave-restart=0']
modules = [module] if isinstance(module, basestring) else module
args.extend(modules)
if testmon:
args.extend(['--testmon'])
if params:
params = [params] if isinstance(params, basestring) else params
args.extend(params)
retcode = pytest.main(args)
# exit code 5 is all tests skipped which is the same as passing with testmon
sys.exit(0 if retcode == NO_TESTS_COLLECTED else retcode)
OSF_TESTS = [
'osf_tests',
]
WEBSITE_TESTS = [
'tests',
]
API_TESTS1 = [
'api_tests/identifiers',
'api_tests/institutions',
'api_tests/licenses',
'api_tests/logs',
'api_tests/schemas',
'api_tests/providers',
'api_tests/preprints',
'api_tests/registrations',
'api_tests/users',
]
API_TESTS2 = [
'api_tests/actions',
'api_tests/chronos',
'api_tests/meetings',
'api_tests/metrics',
'api_tests/nodes',
'api_tests/osf_groups',
'api_tests/requests',
'api_tests/subscriptions',
'api_tests/waffle',
'api_tests/wb',
]
API_TESTS3 = [
'api_tests/addons_tests',
'api_tests/alerts',
'api_tests/applications',
'api_tests/banners',
'api_tests/base',
'api_tests/collections',
'api_tests/comments',
'api_tests/crossref',
'api_tests/files',
'api_tests/guids',
'api_tests/reviews',
'api_tests/regions',
'api_tests/search',
'api_tests/scopes',
'api_tests/taxonomies',
'api_tests/test',
'api_tests/tokens',
'api_tests/view_only_links',
'api_tests/wikis',
]
ADDON_TESTS = [
'addons',
]
ADMIN_TESTS = [
'admin_tests',
]
@task
def test_osf(ctx, numprocesses=None, coverage=False, testmon=False):
"""Run the OSF test suite."""
print('Testing modules "{}"'.format(OSF_TESTS))
test_module(ctx, module=OSF_TESTS, numprocesses=numprocesses, coverage=coverage, testmon=testmon)
@task
def test_website(ctx, numprocesses=None, coverage=False, testmon=False):
"""Run the old test suite."""
print('Testing modules "{}"'.format(WEBSITE_TESTS))
test_module(ctx, module=WEBSITE_TESTS, numprocesses=numprocesses, coverage=coverage, testmon=testmon)
@task
def test_api1(ctx, numprocesses=None, coverage=False, testmon=False):
"""Run the API test suite."""
print('Testing modules "{}"'.format(API_TESTS1 + ADMIN_TESTS))
test_module(ctx, module=API_TESTS1 + ADMIN_TESTS, numprocesses=numprocesses, coverage=coverage, testmon=testmon)
@task
def test_api2(ctx, numprocesses=None, coverage=False, testmon=False):
"""Run the API test suite."""
print('Testing modules "{}"'.format(API_TESTS2))
test_module(ctx, module=API_TESTS2, numprocesses=numprocesses, coverage=coverage, testmon=testmon)
@task
def test_api3(ctx, numprocesses=None, coverage=False, testmon=False):
"""Run the API test suite."""
print('Testing modules "{}"'.format(API_TESTS3 + OSF_TESTS))
# NOTE: There may be some concurrency issues with ES
test_module(ctx, module=API_TESTS3 + OSF_TESTS, numprocesses=numprocesses, coverage=coverage, testmon=testmon)
@task
def test_admin(ctx, numprocesses=None, coverage=False, testmon=False):
"""Run the Admin test suite."""
print('Testing module "admin_tests"')
test_module(ctx, module=ADMIN_TESTS, numprocesses=numprocesses, coverage=coverage, testmon=testmon)
@task
def test_addons(ctx, numprocesses=None, coverage=False, testmon=False):
"""Run all the tests in the addons directory.
"""
print('Testing modules "{}"'.format(ADDON_TESTS))
test_module(ctx, module=ADDON_TESTS, numprocesses=numprocesses, coverage=coverage, testmon=testmon)
@task
def test(ctx, all=False, lint=False):
"""
Run unit tests: OSF (always), plus addons and syntax checks (optional)
"""
if lint:
syntax(ctx)
test_website(ctx) # /tests
test_api1(ctx)
test_api2(ctx)
test_api3(ctx) # also /osf_tests
if all:
test_addons(ctx)
# TODO: Enable admin tests
test_admin(ctx)
karma(ctx)
@task
def remove_failures_from_testmon(ctx, db_path=None):
conn = sqlite3.connect(db_path)
tests_decached = conn.execute("delete from node where result <> '{}'").rowcount
ctx.run('echo {} failures purged from travis cache'.format(tests_decached))
@task
def travis_setup(ctx):
ctx.run('npm install -g bower', echo=True)
with open('package.json', 'r') as fobj:
package_json = json.load(fobj)
ctx.run('npm install @centerforopenscience/list-of-licenses@{}'.format(package_json['dependencies']['@centerforopenscience/list-of-licenses']), echo=True)
with open('bower.json', 'r') as fobj:
bower_json = json.load(fobj)
ctx.run('bower install {}'.format(bower_json['dependencies']['styles']), echo=True)
@task
def test_travis_addons(ctx, numprocesses=None, coverage=False, testmon=False):
"""
Run half of the tests to help travis go faster.
"""
travis_setup(ctx)
syntax(ctx)
test_addons(ctx, numprocesses=numprocesses, coverage=coverage, testmon=testmon)
@task
def test_travis_website(ctx, numprocesses=None, coverage=False, testmon=False):
"""
Run other half of the tests to help travis go faster.
"""
travis_setup(ctx)
test_website(ctx, numprocesses=numprocesses, coverage=coverage, testmon=testmon)
@task
def test_travis_api1_and_js(ctx, numprocesses=None, coverage=False, testmon=False):
# TODO: Uncomment when https://github.com/travis-ci/travis-ci/issues/8836 is resolved
# karma(ctx)
travis_setup(ctx)
test_api1(ctx, numprocesses=numprocesses, coverage=coverage, testmon=testmon)
@task
def test_travis_api2(ctx, numprocesses=None, coverage=False, testmon=False):
travis_setup(ctx)
test_api2(ctx, numprocesses=numprocesses, coverage=coverage, testmon=testmon)
@task
def test_travis_api3_and_osf(ctx, numprocesses=None, coverage=False, testmon=False):
travis_setup(ctx)
test_api3(ctx, numprocesses=numprocesses, coverage=coverage, testmon=testmon)
@task
def karma(ctx, travis=False):
"""Run JS tests with Karma. Requires Chrome to be installed."""
if travis:
return ctx.run('yarn test-travis', echo=True)
ctx.run('yarn test', echo=True)
@task
def wheelhouse(ctx, addons=False, release=False, dev=False, pty=True):
"""Build wheels for python dependencies.
Examples:
inv wheelhouse --dev
inv wheelhouse --addons
inv wheelhouse --release
"""
if release or addons:
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
if os.path.isdir(path):
req_file = os.path.join(path, 'requirements.txt')
if os.path.exists(req_file):
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={} -c {}'.format(
WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH, CONSTRAINTS_PATH,
)
ctx.run(cmd, pty=pty)
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
elif dev:
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
else:
req_file = os.path.join(HERE, 'requirements.txt')
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={} -c {}'.format(
WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH, CONSTRAINTS_PATH,
)
ctx.run(cmd, pty=pty)
@task
def addon_requirements(ctx):
"""Install all addon requirements."""
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
requirements_file = os.path.join(path, 'requirements.txt')
if os.path.isdir(path) and os.path.isfile(requirements_file):
print('Installing requirements for {0}'.format(directory))
ctx.run(
pip_install(requirements_file, constraints_file=CONSTRAINTS_PATH),
echo=True
)
print('Finished installing addon requirements')
@task
def travis_addon_settings(ctx):
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path):
try:
open(os.path.join(path, 'local-travis.py'))
ctx.run('cp {path}/local-travis.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_addon_settings(ctx):
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path) and not os.path.isfile(os.path.join(path, 'local.py')):
try:
open(os.path.join(path, 'local-dist.py'))
ctx.run('cp {path}/local-dist.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_settings(ctx, addons=False):
# Website settings
if not os.path.isfile('website/settings/local.py'):
print('Creating local.py file')
ctx.run('cp website/settings/local-dist.py website/settings/local.py')
# Addon settings
if addons:
copy_addon_settings(ctx)
@task(aliases=['bower'])
def bower_install(ctx):
print('Installing bower-managed packages')
bower_bin = os.path.join(HERE, 'node_modules', '.bin', 'bower')
ctx.run('{} prune --allow-root'.format(bower_bin), echo=True)
ctx.run('{} install --allow-root'.format(bower_bin), echo=True)
@task
def docker_init(ctx):
"""Initial docker setup"""
print('You will be asked for your sudo password to continue...')
if platform.system() == 'Darwin': # Mac OSX
ctx.run('sudo ifconfig lo0 alias 192.168.168.167')
else:
print('Your system is not recognized, you will have to setup docker manually')
def ensure_docker_env_setup(ctx):
if hasattr(os.environ, 'DOCKER_ENV_SETUP') and os.environ['DOCKER_ENV_SETUP'] == '1':
pass
else:
os.environ['WEB_REMOTE_DEBUG'] = '192.168.168.167:11000'
os.environ['API_REMOTE_DEBUG'] = '192.168.168.167:12000'
os.environ['WORKER_REMOTE_DEBUG'] = '192.168.168.167:13000'
os.environ['DOCKER_ENV_SETUP'] = '1'
docker_init(ctx)
@task
def docker_requirements(ctx):
ensure_docker_env_setup(ctx)
ctx.run('docker-compose up requirements requirements_mfr requirements_wb')
@task
def docker_appservices(ctx):
ensure_docker_env_setup(ctx)
ctx.run('docker-compose up assets fakecas elasticsearch tokumx postgres')
@task
def docker_osf(ctx):
ensure_docker_env_setup(ctx)
ctx.run('docker-compose up mfr wb web api')
@task
def clear_sessions(ctx, months=1, dry_run=False):
from website.app import init_app
init_app(routes=False, set_backends=True)
from scripts import clear_sessions
clear_sessions.clear_sessions_relative(months=months, dry_run=dry_run)
# Release tasks
@task
def hotfix(ctx, name, finish=False, push=False):
"""Rename hotfix branch to hotfix/<next-patch-version> and optionally
finish hotfix.
"""
print('Checking out master to calculate curent version')
ctx.run('git checkout master')
latest_version = latest_tag_info()['current_version']
print('Current version is: {}'.format(latest_version))
major, minor, patch = latest_version.split('.')
next_patch_version = '.'.join([major, minor, str(int(patch) + 1)])
print('Bumping to next patch version: {}'.format(next_patch_version))
print('Renaming branch...')
new_branch_name = 'hotfix/{}'.format(next_patch_version)
ctx.run('git checkout {}'.format(name), echo=True)
ctx.run('git branch -m {}'.format(new_branch_name), echo=True)
if finish:
ctx.run('git flow hotfix finish {}'.format(next_patch_version), echo=True, pty=True)
if push:
ctx.run('git push --follow-tags origin master', echo=True)
ctx.run('git push origin develop', echo=True)
@task
def feature(ctx, name, finish=False, push=False):
"""Rename the current branch to a feature branch and optionally finish it."""
print('Renaming branch...')
ctx.run('git branch -m feature/{}'.format(name), echo=True)
if finish:
ctx.run('git flow feature finish {}'.format(name), echo=True)
if push:
ctx.run('git push origin develop', echo=True)
# Adapted from bumpversion
def latest_tag_info():
try:
# git-describe doesn't update the git-index, so we do that
# subprocess.check_output(["git", "update-index", "--refresh"])
# get info about the latest tag in git
describe_out = subprocess.check_output([
'git',
'describe',
'--dirty',
'--tags',
'--long',
'--abbrev=40'
], stderr=subprocess.STDOUT
).decode().split('-')
except subprocess.CalledProcessError as err:
raise err
# logger.warn("Error when running git describe")
return {}
info = {}
if describe_out[-1].strip() == 'dirty':
info['dirty'] = True
describe_out.pop()
info['commit_sha'] = describe_out.pop().lstrip('g')
info['distance_to_latest_tag'] = int(describe_out.pop())
info['current_version'] = describe_out.pop().lstrip('v')
# assert type(info["current_version"]) == str
assert 0 == len(describe_out)
return info
# Tasks for generating and bundling SSL certificates
# See http://cosdev.readthedocs.org/en/latest/osf/ops.html for details
@task
def generate_key(ctx, domain, bits=2048):
cmd = 'openssl genrsa -des3 -out {0}.key {1}'.format(domain, bits)
ctx.run(cmd)
@task
def generate_key_nopass(ctx, domain):
cmd = 'openssl rsa -in {domain}.key -out {domain}.key.nopass'.format(
domain=domain
)
ctx.run(cmd)
@task
def generate_csr(ctx, domain):
cmd = 'openssl req -new -key {domain}.key.nopass -out {domain}.csr'.format(
domain=domain
)
ctx.run(cmd)
@task
def request_ssl_cert(ctx, domain):
"""Generate a key, a key with password removed, and a signing request for
the specified domain.
Usage:
> invoke request_ssl_cert pizza.osf.io
"""
generate_key(ctx, domain)
generate_key_nopass(ctx, domain)
generate_csr(ctx, domain)
@task
def bundle_certs(ctx, domain, cert_path):
"""Concatenate certificates from NameCheap in the correct order. Certificate
files must be in the same directory.
"""
cert_files = [
'{0}.crt'.format(domain),
'COMODORSADomainValidationSecureServerCA.crt',
'COMODORSAAddTrustCA.crt',
'AddTrustExternalCARoot.crt',
]
certs = ' '.join(
os.path.join(cert_path, cert_file)
for cert_file in cert_files
)
cmd = 'cat {certs} > {domain}.bundle.crt'.format(
certs=certs,
domain=domain,
)
ctx.run(cmd)
@task
def clean_assets(ctx):
"""Remove built JS files."""
public_path = os.path.join(HERE, 'website', 'static', 'public')
js_path = os.path.join(public_path, 'js')
ctx.run('rm -rf {0}'.format(js_path), echo=True)
@task(aliases=['pack'])
def webpack(ctx, clean=False, watch=False, dev=False, colors=False):
"""Build static assets with webpack."""
if clean:
clean_assets(ctx)
args = ['yarn run webpack-{}'.format('dev' if dev else 'prod')]
args += ['--progress']
if watch:
args += ['--watch']
if colors:
args += ['--colors']
command = ' '.join(args)
ctx.run(command, echo=True)
@task()
def build_js_config_files(ctx):
from website import settings
print('Building JS config files...')
with open(os.path.join(settings.STATIC_FOLDER, 'built', 'nodeCategories.json'), 'wb') as fp:
json.dump(settings.NODE_CATEGORY_MAP, fp)
print('...Done.')
@task()
def assets(ctx, dev=False, watch=False, colors=False):
"""Install and build static assets."""
command = 'yarn install --frozen-lockfile'
if not dev:
command += ' --production'
ctx.run(command, echo=True)
bower_install(ctx)
build_js_config_files(ctx)
# Always set clean=False to prevent possible mistakes
# on prod
webpack(ctx, clean=False, watch=watch, dev=dev, colors=colors)
@task
def generate_self_signed(ctx, domain):
"""Generate self-signed SSL key and certificate.
"""
cmd = (
'openssl req -x509 -nodes -days 365 -newkey rsa:2048'
' -keyout {0}.key -out {0}.crt'
).format(domain)
ctx.run(cmd)
@task
def update_citation_styles(ctx):
from scripts import parse_citation_styles
total = parse_citation_styles.main()
print('Parsed {} styles'.format(total))
@task
def clean(ctx, verbose=False):
ctx.run('find . -name "*.pyc" -delete', echo=True)
@task(default=True)
def usage(ctx):
ctx.run('invoke --list')
### Maintenance Tasks ###
@task
def set_maintenance(ctx, message='', level=1, start=None, end=None):
from website.app import setup_django
setup_django()
from website.maintenance import set_maintenance
"""Display maintenance notice across OSF applications (incl. preprints, registries, etc.)
start - Start time for the maintenance period
end - End time for the mainteance period
NOTE: If no start or end values are provided, default to starting now
and ending 24 hours from now.
message - Message to display. If omitted, will be:
"The site will undergo maintenance between <localized start time> and <localized end time>. Thank you
for your patience."
level - Severity level. Modifies the color of the displayed notice. Must be one of 1 (info), 2 (warning), 3 (danger).
Examples:
invoke set_maintenance --start 2016-03-16T15:41:00-04:00 --end 2016-03-16T15:42:00-04:00
invoke set_maintenance --message 'The OSF is experiencing issues connecting to a 3rd party service' --level 2 --start 2016-03-16T15:41:00-04:00 --end 2016-03-16T15:42:00-04:00
"""
state = set_maintenance(message, level, start, end)
print('Maintenance notice up {} to {}.'.format(state['start'], state['end']))
@task
def unset_maintenance(ctx):
from website.app import setup_django
setup_django()
from website.maintenance import unset_maintenance
print('Taking down maintenance notice...')
unset_maintenance()
print('...Done.')
| 32.262806 | 183 | 0.660224 |
35e1ba6164187e38c5cd0fe22c4900338e2b5897 | 6,098 | py | Python | tests/unit/test_exceptions.py | nickatnight/praw | 6ba5c92e5d5210338c0a2a2755a5e5e226a002fa | [
"BSD-2-Clause"
] | 1 | 2020-11-29T23:47:28.000Z | 2020-11-29T23:47:28.000Z | tests/unit/test_exceptions.py | nickatnight/praw | 6ba5c92e5d5210338c0a2a2755a5e5e226a002fa | [
"BSD-2-Clause"
] | null | null | null | tests/unit/test_exceptions.py | nickatnight/praw | 6ba5c92e5d5210338c0a2a2755a5e5e226a002fa | [
"BSD-2-Clause"
] | null | null | null | # coding: utf-8
import pytest
from praw.exceptions import (
APIException,
ClientException,
DuplicateReplaceException,
InvalidFlairTemplateID,
InvalidImplicitAuth,
InvalidURL,
MediaPostFailed,
MissingRequiredAttributeException,
PRAWException,
RedditAPIException,
RedditErrorItem,
WebSocketException,
)
class TestPRAWException:
def test_inheritance(self):
assert issubclass(PRAWException, Exception)
def test_str(self):
assert str(PRAWException()) == ""
assert str(PRAWException("foo")) == "foo"
class TestRedditErrorItem:
def test_equality(self):
resp = ["BAD_SOMETHING", "invalid something", "some_field"]
error = RedditErrorItem(*resp)
error2 = RedditErrorItem(*resp)
assert error == error2
assert error != 0
def test_property(self):
error = RedditErrorItem("BAD_SOMETHING", "invalid something", "some_field")
assert (
error.error_message
== "BAD_SOMETHING: 'invalid something' on field 'some_field'"
)
def test_str(self):
error = RedditErrorItem("BAD_SOMETHING", "invalid something", "some_field")
assert str(error) == "BAD_SOMETHING: 'invalid something' on field 'some_field'"
def test_repr(self):
error = RedditErrorItem("BAD_SOMETHING", "invalid something", "some_field")
assert (
repr(error) == "RedditErrorItem(error_type='BAD_SOMETHING', message="
"'invalid something', field='some_field')"
)
class TestAPIException:
def test_catch(self):
exc = RedditAPIException([["test", "testing", "test"]])
with pytest.raises(APIException):
raise exc
class TestRedditAPIException:
def test_inheritance(self):
assert issubclass(RedditAPIException, PRAWException)
def test_items(self):
container = RedditAPIException(
[
["BAD_SOMETHING", "invalid something", "some_field"],
RedditErrorItem("BAD_SOMETHING", "invalid something", "some_field"),
]
)
for exception in container.items:
assert isinstance(exception, RedditErrorItem)
@pytest.mark.filterwarnings("ignore", category=DeprecationWarning)
def test_apiexception_value(self):
exc = RedditAPIException("test", "testing", "test")
assert exc.error_type == "test"
exc2 = RedditAPIException(["test", "testing", "test"])
assert exc2.message == "testing"
exc3 = RedditAPIException([["test", "testing", "test"]])
assert exc3.field == "test"
class TestClientException:
def test_inheritance(self):
assert issubclass(ClientException, PRAWException)
def test_str(self):
assert str(ClientException()) == ""
assert str(ClientException("error message")) == "error message"
class TestDuplicateReplaceException:
def test_inheritance(self):
assert issubclass(DuplicateReplaceException, ClientException)
def test_message(self):
assert (
str(DuplicateReplaceException())
== "A duplicate comment has been detected. Are you attempting to "
"call ``replace_more_comments`` more than once?"
)
class TestInvalidFlairTemplateID:
def test_inheritance(self):
assert issubclass(InvalidFlairTemplateID, ClientException)
def test_str(self):
assert (
str(InvalidFlairTemplateID("123"))
== "The flair template id ``123`` is invalid. If you are "
"trying to create a flair, please use the ``add`` method."
)
class TestInvalidImplicitAuth:
def test_inheritance(self):
assert issubclass(InvalidImplicitAuth, ClientException)
def test_message(self):
assert (
str(InvalidImplicitAuth())
== "Implicit authorization can only be used with installed apps."
)
class TestInvalidURL:
def test_inheritance(self):
assert issubclass(InvalidURL, ClientException)
def test_message(self):
assert (
str(InvalidURL("https://www.google.com"))
== "Invalid URL: https://www.google.com"
)
def test_custom_message(self):
assert (
str(InvalidURL("https://www.google.com", message="Test custom {}"))
== "Test custom https://www.google.com"
)
class TestMissingRequiredAttributeException:
def test_inheritance(self):
assert issubclass(MissingRequiredAttributeException, ClientException)
def test_str(self):
assert str(MissingRequiredAttributeException()) == ""
assert (
str(MissingRequiredAttributeException("error message")) == "error message"
)
class TestWebSocketException:
def test_inheritance(self):
assert issubclass(WebSocketException, ClientException)
def test_str(self):
assert str(WebSocketException("", None)) == ""
assert str(WebSocketException("error message", None)) == "error message"
@pytest.mark.filterwarnings("ignore", category=DeprecationWarning)
def test_exception_attr(self):
exc = WebSocketException(None, None)
assert exc.original_exception is None
assert isinstance(WebSocketException(None, Exception()), Exception)
assert (
str(WebSocketException(None, Exception("test")).original_exception)
== "test"
)
exc.original_exception = Exception()
assert isinstance(exc.original_exception, Exception)
del exc.original_exception
assert "_original_exception" not in vars(exc)
class TestMediaPostFailed:
def test_inheritance(self):
assert issubclass(MediaPostFailed, WebSocketException)
def test_message(self):
assert (
str(MediaPostFailed())
== "The attempted media upload action has failed. Possible causes"
" include the corruption of media files. Check that the media "
"file can be opened on your local machine."
)
| 31.595855 | 87 | 0.649557 |
7343486db45fc5d96cb203ba1b739d532dcc2f7e | 401 | py | Python | manage.py | andela-oadeniran/bucket_list_app | a4eaf5a706c74cfee919e8a466681db14cbb71c8 | [
"MIT"
] | null | null | null | manage.py | andela-oadeniran/bucket_list_app | a4eaf5a706c74cfee919e8a466681db14cbb71c8 | [
"MIT"
] | null | null | null | manage.py | andela-oadeniran/bucket_list_app | a4eaf5a706c74cfee919e8a466681db14cbb71c8 | [
"MIT"
] | null | null | null | from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from bucketlist_api import db
from bucketlist_api.app import app
app.config.from_object('config.DevelopmentConfig')
app.config.from_envvar('BUCKETLIST_SETTINGS', silent=True)
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| 22.277778 | 58 | 0.795511 |
176e3ae6afcc7280127f36eb09acf6f2813046bd | 234 | py | Python | magy.py | abcsds/Logo | 1a9d69adc48e21df9fbf9f2a3d223cd223c8e9be | [
"MIT"
] | null | null | null | magy.py | abcsds/Logo | 1a9d69adc48e21df9fbf9f2a3d223cd223c8e9be | [
"MIT"
] | null | null | null | magy.py | abcsds/Logo | 1a9d69adc48e21df9fbf9f2a3d223cd223c8e9be | [
"MIT"
] | null | null | null | from turtle import *
def tree(size):
if size >= 20:
fd(size)
lt(137.5/2)
tree(size/1.618)
rt(137.5/2)
tree(size/1.618)
rt(137.5/2)
tree(size/1.618)
lt(137.5/2)
bk(size)
lt(90)
bk(300)
tree(300)
| 13 | 20 | 0.542735 |
0e8d69635454c7094857a8424d68f2bc5da7379b | 968 | py | Python | SearchEngine/SearchEngine/urls.py | wangzihan424/SearchEngine | 7f6968369089f78f24f5df0bad5d3e81bcec94e6 | [
"MIT"
] | 1 | 2017-12-28T01:40:58.000Z | 2017-12-28T01:40:58.000Z | SearchEngine/SearchEngine/urls.py | wangzihan424/SearchEngine | 7f6968369089f78f24f5df0bad5d3e81bcec94e6 | [
"MIT"
] | null | null | null | SearchEngine/SearchEngine/urls.py | wangzihan424/SearchEngine | 7f6968369089f78f24f5df0bad5d3e81bcec94e6 | [
"MIT"
] | null | null | null | """SearchEngine URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from main import views as main_views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^index/',main_views.index),
url(r'^$',main_views.index),
url(r'^suggest/',main_views.suggest),
url(r'^search/',main_views.search),
]
| 35.851852 | 79 | 0.700413 |
a618fdfe7990dbdbbe169c1bd74ba16a35d66443 | 3,520 | py | Python | scripts/polspice_obsolete/parsl_config.py | tilmantroester/KiDS-1000xtSZ | 190f193d5d2fc514bcbe96ea15d882ea59c7a1cc | [
"MIT"
] | 1 | 2021-09-24T16:02:32.000Z | 2021-09-24T16:02:32.000Z | scripts/polspice_obsolete/parsl_config.py | tilmantroester/KiDS-1000xtSZ | 190f193d5d2fc514bcbe96ea15d882ea59c7a1cc | [
"MIT"
] | null | null | null | scripts/polspice_obsolete/parsl_config.py | tilmantroester/KiDS-1000xtSZ | 190f193d5d2fc514bcbe96ea15d882ea59c7a1cc | [
"MIT"
] | null | null | null | import os
import logging
import parsl
from parsl.config import Config
from parsl.executors.ipp import IPyParallelExecutor
from libsubmit.providers import LocalProvider
from libsubmit.providers import SlurmProvider
from libsubmit.launchers import SrunLauncher
from libsubmit.channels import LocalChannel
def setup_parsl(machine, n_slots, n_thread=1, walltime="00:30:00", memory=16000, parsl_dir="./parsl", partition="all"):
parsl.clear()
if machine == "local":
script_dir = os.path.join(parsl_dir, "parsl_scripts")
run_dir = os.path.join(parsl_dir, "runinfo")
local_ipp_config = Config(
executors=[
IPyParallelExecutor(
label="local_ipp",
provider=LocalProvider(
channel=LocalChannel(userhome=parsl_dir, script_dir=parsl_dir),
init_blocks=1,
max_blocks=n_slots,
script_dir=parsl_dir,
),
engine_dir=parsl_dir,
working_dir=parsl_dir,
)
],
app_cache=False,
run_dir=run_dir,
lazy_errors=False,
)
parsl.load(local_ipp_config)
parsl.set_stream_logger(level=logging.INFO)
elif machine == "cuillin":
script_dir = os.path.join(parsl_dir, "parsl_scripts")
run_dir = os.path.join(parsl_dir, "runinfo")
slurm_overrides = "#SBATCH --mem-per-cpu {memory}\n#SBATCH --cpus-per-task {n_thread}\n" \
"#SBATCH --constraint=datadisk".format(memory=memory, n_thread=n_thread)
cuillin_ipp_config = Config(
executors=[
IPyParallelExecutor(
label="cuillin_ipp",
provider=SlurmProvider(
partition=partition,
channel=LocalChannel(userhome=parsl_dir, script_dir=parsl_dir),
launcher=SrunLauncher(),
walltime=walltime,
init_blocks=1,
max_blocks=n_slots,
script_dir=parsl_dir,
overrides=slurm_overrides,
),
working_dir=parsl_dir,
)
],
app_cache=False,
run_dir=run_dir,
)
parsl.load(cuillin_ipp_config)
parsl.set_stream_logger(level=logging.INFO)
else:
raise ValueError(f"Machine '{machine}' not supported.")
os.makedirs(parsl_dir, exist_ok=True)
os.makedirs(script_dir, exist_ok=True)
os.makedirs(run_dir, exist_ok=True)
return parsl_dir
| 47.567568 | 119 | 0.436648 |
55fa09c25cf95180f4705c0695e33bfdc15139c1 | 602 | py | Python | budgetApp/migrations/0001_initial.py | PresterJuan/budget-tool | 9965e15fc3a0199c6f4f568360887b47ec4716bb | [
"MIT"
] | null | null | null | budgetApp/migrations/0001_initial.py | PresterJuan/budget-tool | 9965e15fc3a0199c6f4f568360887b47ec4716bb | [
"MIT"
] | null | null | null | budgetApp/migrations/0001_initial.py | PresterJuan/budget-tool | 9965e15fc3a0199c6f4f568360887b47ec4716bb | [
"MIT"
] | null | null | null | # Generated by Django 2.0.3 on 2018-04-23 02:44
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Stuff',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('top_name', models.CharField(max_length=264, unique=True)),
('budgeted', models.FloatField()),
('actual', models.FloatField()),
],
),
]
| 25.083333 | 114 | 0.561462 |
2f64bbedf64d40b4146e853d9a01673baf0d8716 | 613 | py | Python | test/fun_test.py | afcarl/toolbox | c6c9263bab58981bbd7211a0c760d4d9d75e098f | [
"MIT"
] | 1 | 2019-04-22T16:55:09.000Z | 2019-04-22T16:55:09.000Z | test/fun_test.py | afcarl/toolbox | c6c9263bab58981bbd7211a0c760d4d9d75e098f | [
"MIT"
] | null | null | null | test/fun_test.py | afcarl/toolbox | c6c9263bab58981bbd7211a0c760d4d9d75e098f | [
"MIT"
] | null | null | null | import toolbox.fun as fun
def fun_coverage():
"""Execute all the function of the fun module"""
fun.flatten([range(10), range(5)])
fun.flattenLists([range(10), range(5)])
fun.clip(1.5, 0.0, 1.0)
fun.norm((0.0, 0.0), (1.0, 1.0))
fun.norm_sq((0.0, 0.0), (2.0, -1.0))
fun.gaussian_kernel(1.0, 2.0)
fun.roulette_wheel(range(10))
return True
tests = [fun_coverage]
if __name__ == "__main__":
print("\033[1m%s\033[0m" % (__file__,))
for t in tests:
print('%s %s' % ('\033[1;32mPASS\033[0m' if t() else
'\033[1;31mFAIL\033[0m', t.__doc__)) | 27.863636 | 61 | 0.5677 |
4b06e2494dfdae2fe6827d70615386aaf53805ac | 6,854 | py | Python | psq/queue_test.py | Tomesco/bookshelf-demo-project | 9d422f3aa04edbb3312d3e177caf699653ed6a73 | [
"Apache-2.0"
] | 210 | 2015-07-29T16:50:01.000Z | 2022-03-02T15:24:52.000Z | psq/queue_test.py | Tomesco/bookshelf-demo-project | 9d422f3aa04edbb3312d3e177caf699653ed6a73 | [
"Apache-2.0"
] | 60 | 2015-12-03T23:15:57.000Z | 2021-01-21T09:25:42.000Z | psq/queue_test.py | Tomesco/bookshelf-demo-project | 9d422f3aa04edbb3312d3e177caf699653ed6a73 | [
"Apache-2.0"
] | 47 | 2015-12-21T06:09:36.000Z | 2021-09-04T13:20:21.000Z | # Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from pickle import dumps
from google.cloud import pubsub_v1
import google.cloud.exceptions
import google.cloud.pubsub_v1.subscriber.message
import mock
from psq import current_queue
from psq.queue import Queue
from psq.task import Task
import pytest
def make_publisher_client():
return mock.create_autospec(
pubsub_v1.PublisherClient, instance=True)
def make_subscriber_client():
return mock.create_autospec(
pubsub_v1.SubscriberClient, instance=True)
class TestStorage(object):
def __init__(self):
self._data = {}
def get_task(self, task_id):
return self._data.get(task_id)
def put_task(self, task):
self._data[task.id] = task
def dummy_queue_func():
return "Hello"
def test_constructor_creates_topic():
publisher_client = make_publisher_client()
subscriber_client = make_subscriber_client()
publisher_client.get_topic.side_effect = (
google.cloud.exceptions.NotFound(None, None))
q = Queue(publisher_client, subscriber_client, 'test-project')
publisher_client.create_topic.assert_called_once_with(q._get_topic_path())
def test_constructor_existing_topic():
publisher_client = make_publisher_client()
subscriber_client = make_subscriber_client()
Queue(publisher_client, subscriber_client, 'test-project')
publisher_client.create_topic.assert_not_called()
def test_constructor_conflict():
publisher_client = make_publisher_client()
subscriber_client = make_subscriber_client()
publisher_client.get_topic.side_effect = (
google.cloud.exceptions.NotFound(None, None))
publisher_client.create_topic.side_effect = (
google.cloud.exceptions.Conflict(None, None))
q = Queue(publisher_client, subscriber_client, 'test-project')
publisher_client.get_topic.assert_called_once_with(
q._get_topic_path())
publisher_client.create_topic.assert_called_once_with(
q._get_topic_path())
def make_queue(**kwargs):
publisher_client = make_publisher_client()
subscriber_client = make_subscriber_client()
return Queue(publisher_client, subscriber_client, 'test-project', **kwargs)
def test_get_or_create_subscription_creates_new():
q = make_queue()
q.subscriber_client.get_subscription.side_effect = (
google.cloud.exceptions.NotFound(None, None))
subscription_path = q._get_or_create_subscription()
q.subscriber_client.get_subscription.assert_called_once_with(
subscription_path)
q.subscriber_client.create_subscription.assert_called_once_with(
subscription_path,
topic=q._get_topic_path())
def test_get_or_create_subscription_existing():
q = make_queue()
subscription_path = q._get_or_create_subscription()
q.subscriber_client.get_subscription.assert_called_once_with(
subscription_path)
q.subscriber_client.create_subscription.assert_not_called()
def test_get_or_create_subscription_conflict():
q = make_queue()
q.subscriber_client.get_subscription.side_effect = (
google.cloud.exceptions.NotFound(None, None))
q.subscriber_client.create_subscription.side_effect = (
google.cloud.exceptions.Conflict(None, None))
subscription_path = q._get_or_create_subscription()
q.subscriber_client.get_subscription.assert_called_once_with(
subscription_path)
q.subscriber_client.create_subscription.assert_called_once_with(
subscription_path,
topic=q._get_topic_path())
def test_queue():
storage = TestStorage()
q = make_queue(storage=storage)
r = q.enqueue(sum, 1, 2, arg='c')
assert q.publisher_client.publish.called
task = storage.get_task(r.task_id)
assert task.f == sum
assert task.args == (1, 2)
assert task.kwargs == {'arg': 'c'}
def test_listen():
q = make_queue()
callback = mock.Mock()
future = q.listen(callback)
# Should create the subscription
assert q.subscription
# Should invoke the underlying pub/sub listen
q.subscriber_client.subscribe.assert_called_once_with(
q.subscription, callback=mock.ANY)
assert future == q.subscriber_client.subscribe.return_value
# Grab the callback and make sure the invoking it decodes the task and
# passes it to the callback.
wrapped_callback = q.subscriber_client.subscribe.call_args[1]['callback']
t = Task('1', sum, (1, 2), {'arg': 'c'})
message = mock.create_autospec(
google.cloud.pubsub_v1.subscriber.message.Message, instance=True)
message.data = dumps(t)
wrapped_callback(message)
message.ack.assert_called_once_with()
callback.assert_called_once_with(mock.ANY)
invoked_task = callback.call_args[0][0]
assert invoked_task.id == t.id
def test_listen_existing_subscription():
q = make_queue()
q.subscription = mock.sentinel.subscription
q.listen(mock.sentinel.callback)
# Should not create the subscription
assert not q.subscriber_client.create_subscription.called
def test__pubsub_message_callback_bad_value():
callback = mock.Mock()
message = mock.create_autospec(
google.cloud.pubsub_v1.subscriber.message.Message, instance=True)
message.data = b'bad'
Queue._pubsub_message_callback(callback, message)
assert not callback.called
assert message.ack.called
def test_context():
q = make_queue()
with q.queue_context():
assert current_queue == q
# Test additional context manager.
spy = mock.Mock()
@contextmanager
def extra_context():
spy()
yield
q.extra_context = extra_context
with q.queue_context():
assert spy.called
def test_cleanup():
q = make_queue()
q.cleanup()
def test_synchronous_success():
q = make_queue(storage=TestStorage(), asynchronous=False)
r = q.enqueue(sum, [1, 2])
assert r.result() == 3
def test_synchronous_fail():
q = make_queue(storage=TestStorage(), asynchronous=False)
r = q.enqueue(sum, "2")
with pytest.raises(TypeError):
r.result()
def test_string_function():
q = make_queue(storage=TestStorage(), asynchronous=False)
r = q.enqueue('psq.queue_test.dummy_queue_func')
assert r.result() == "Hello"
| 27.748988 | 79 | 0.730376 |
6423fea75204277b89d991fdf5d8fcecd31f17ed | 7,149 | py | Python | models/model_builder.py | Yuxin33/unmask-1 | 1c51c9c47f0d9e778d779df48364a46ef8921f77 | [
"MIT"
] | 7 | 2021-02-22T11:16:28.000Z | 2021-11-12T01:33:08.000Z | models/model_builder.py | Yuxin33/unmask-1 | 1c51c9c47f0d9e778d779df48364a46ef8921f77 | [
"MIT"
] | null | null | null | models/model_builder.py | Yuxin33/unmask-1 | 1c51c9c47f0d9e778d779df48364a46ef8921f77 | [
"MIT"
] | 3 | 2020-11-16T05:10:11.000Z | 2021-12-07T03:35:10.000Z | from __future__ import print_function
import torch
import torch.utils.data as data
import torchvision
from torchvision import transforms
import numpy as np
import torch.nn as nn
# from models.vgg import vgg16, vgg19
# from models.resnet import resnet50
from torchvision.models import resnet50, vgg16, densenet161, resnet101, densenet121
# https://gist.github.com/Fuchai/12f2321e6c8fa53058f5eb23aeddb6ab
class GenHelper(data.Dataset):
def __init__(self, mother, length, mapping):
# here is a mapping from this index to the mother ds index
self.mapping = mapping
self.length = length
self.mother = mother
def __getitem__(self, index):
return self.mother[self.mapping[index]]
def __len__(self):
return self.length
def train_valid_split(ds, train_split=0.9, random_seed=None):
'''
This is a pytorch generic function that takes a data.Dataset object and splits it to validation and training
efficiently.
:return:
'''
if random_seed != None:
np.random.seed(random_seed)
dslen = len(ds)
indices = list(range(dslen))
train_size = int(dslen * train_split)
valid_size = dslen - train_size
np.random.shuffle(indices)
train_mapping = indices[0:train_size]
valid_mapping = indices[train_size:]
train = GenHelper(ds, dslen - valid_size, train_mapping)
valid = GenHelper(ds, valid_size, valid_mapping)
return train, valid
class Model_Builder:
def __init__(self, model_type, dataset, model_path, args):
self.args = args
self.model_type = model_type
self.dataset = dataset
self.model_path = model_path
self.data_dir = args['data_dir']
self.min_value = np.Inf
self.max_value = -np.Inf
if 'cifar10' in dataset:
output = 10
elif 'unmask' in dataset:
if 'cs3' in args['class_set']:
output = 3
elif 'cs5' in args['class_set']:
output = 5
if model_type == 'vgg16':
self.model = vgg16(pretrained=True)
self.model.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, output))
elif model_type == 'densenet121':
self.model = densenet121(pretrained=True)
self.model.classifier = nn.Linear(1024, output)
elif model_type == 'resnet50':
self.model = resnet50(pretrained=True)
self.model.fc = nn.Linear(2048, output)
elif model_type == 'resnet101':
self.model = resnet101(pretrained=True)
self.model.fc = nn.Linear(2048, output)
self.model.cuda()
device = args['device']
self.model = self.model.to(device)
if dataset == 'cifar10':
self.train_loader, self.val_loader, self.test_loader = self.get_cifar10()
elif dataset == 'cifar100':
self.train_loader, self.test_loader = self.get_cifar100()
elif dataset == 'unmask':
self.train_loader, self.val_loader, self.test_loader = self.get_unmask()
def get_cifar10(self):
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data/cifar10/train', train=True, download=True, transform=transform)
trainset, validset = train_valid_split(trainset)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=self.args['batch_size'], shuffle=True, num_workers=2)
valid_loader = torch.utils.data.DataLoader(validset, batch_size=self.args['batch_size'], shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data/cifar10/test', train=False, download=True, transform=transform)
test_loader = torch.utils.data.DataLoader(testset, batch_size=self.args['batch_size'], shuffle=False, num_workers=2)
return train_loader, valid_loader, test_loader
def get_cifar100(self):
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize((0.5070751592371323, 0.48654887331495095, 0.4409178433670343),
(0.2673342858792401, 0.2564384629170883, 0.27615047132568404))
])
cifar100_training = torchvision.datasets.CIFAR100(root='./data/cifar100/', train=True, download=True, transform=transform_train)
cifar100_training_loader = data.DataLoader(cifar100_training, shuffle=True, num_workers=2, batch_size=self.args['batch_size'])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5070751592371323, 0.48654887331495095, 0.4409178433670343),
(0.2673342858792401, 0.2564384629170883, 0.27615047132568404))
])
cifar100_test = torchvision.datasets.CIFAR100(root='./data/cifar100/', train=False, download=True, transform=transform_test)
cifar100_test_loader = data.DataLoader(cifar100_test, shuffle=True, num_workers=2, batch_size=self.args['batch_size'])
return cifar100_training_loader, cifar100_test_loader
def get_unmask(self):
transform = transforms.Compose(
[
transforms.Resize(150),
transforms.CenterCrop(150),
transforms.ToTensor(),
])
# torch.min(next(iter(data.DataLoader(trainset, batch_size=len(train_loader.dataset.targets), shuffle=True, num_workers=2)))[0])
trainset = torchvision.datasets.ImageFolder(root='./data/unmask/{}/train/'.format(self.args['class_set']), transform=transform)
train_loader = data.DataLoader(trainset, batch_size=self.args['batch_size'], shuffle=True, num_workers=0)
valset = torchvision.datasets.ImageFolder(root='./data/unmask/{}/val/'.format(self.args['class_set']), transform=transform)
val_loader = data.DataLoader(valset, batch_size=self.args['batch_size'], shuffle=True, num_workers=0)
testset = torchvision.datasets.ImageFolder(root='./data/unmask/{}/test'.format(self.args['class_set']), transform=transform)
test_loader = data.DataLoader(testset, batch_size=self.args['batch_size'], shuffle=True, num_workers=0)
return train_loader, val_loader, test_loader
def get_bounds(self):
return self.min_value, self.max_value
def get_model(self):
return self.model
def refresh_model_builder(self):
self.__init__(self.model_type, self.dataset, self.args)
def get_loaders(self):
return self.train_loader, self.val_loader, self.test_loader
| 39.938547 | 137 | 0.643307 |
cde7af01bb1e5cae439adac06a0a91081f1704a5 | 1,141 | py | Python | Python Codes/CombineCSV.py | wcmears/python-codes | 22a787a3fd29ddea0d02dc86e150f8316cc8db9d | [
"Apache-2.0"
] | null | null | null | Python Codes/CombineCSV.py | wcmears/python-codes | 22a787a3fd29ddea0d02dc86e150f8316cc8db9d | [
"Apache-2.0"
] | null | null | null | Python Codes/CombineCSV.py | wcmears/python-codes | 22a787a3fd29ddea0d02dc86e150f8316cc8db9d | [
"Apache-2.0"
] | null | null | null | import csv
import os
import glob
import pandas as pd
import numpy as np
print("\CSV Combine and Remove Duplicate Headers, Written by W. Mears.\n")
directory = input(r"Enter the directory name (Relative or full pathnames accepted. Ex: C:\Users\yourname\path): ")
os.chdir(directory) #change directory so csv gets saved to input
name = input("What would you like to name your combined file? Ex. name.csv : ")
extension = 'csv' #set type of file
all_filenames = [i for i in glob.glob('*.{}'.format(extension))] #Create data structure of file names
combinedInfo = pd.read_csv(all_filenames[1]) #Initialize structure with data from first file
for i in range(2, len(all_filenames)): #Iterate from second to last file in directory
csvInfo = pd.read_csv(all_filenames[i]) #read info from file
conc = [combinedInfo, csvInfo] #create structure of all previous files and current file in loop to concatenate
combinedInfo = pd.concat(conc) #concatenate file data
combinedInfo.to_csv( name, index=False, encoding='utf-8-sig') #write combined csv to input directory
print("Combined file successfuly saved to input directory")
| 60.052632 | 115 | 0.750219 |
34a8a09664e4d1572215d28155f11c5447679d97 | 3,786 | py | Python | karma.py | dashezup/Telegram_Karma_Bot | 5b8a4035167aac8a5ceca539664d407bd49ce8d9 | [
"MIT"
] | null | null | null | karma.py | dashezup/Telegram_Karma_Bot | 5b8a4035167aac8a5ceca539664d407bd49ce8d9 | [
"MIT"
] | null | null | null | karma.py | dashezup/Telegram_Karma_Bot | 5b8a4035167aac8a5ceca539664d407bd49ce8d9 | [
"MIT"
] | 1 | 2022-02-17T14:47:25.000Z | 2022-02-17T14:47:25.000Z | import json
import glob
from os import path
from pyrogram import Client, filters
from config import bot_token, owner_id
app = Client(
":memory:",
bot_token=bot_token,
api_id=6,
api_hash="eb06d4abfb49dc3eeb1aeb98ae0f581e",
)
regex_upvote = r"^((?i)\+|\+\+|\+1|thx|tnx|ty|thank you|thanx|thanks|pro|cool|good|👍)$"
regex_downvote = r"^(\-|\-\-|\-1|👎)$"
@app.on_message(filters.command(["start"]))
async def start(_, message):
await message.reply_text(
"Hey, I'm A Karma Bot, You Can Upvote Or Downvote Someone Using Me, Join @TheHamkerChat For Support!"
)
@app.on_message(filters.command(["help"]))
async def help(_, message):
await message.reply_text(
"""+ To Upvote A Message.
- To Downvote A Message.
/karma To Check Karma Points Of This Group."""
)
@app.on_message(filters.regex(regex_upvote))
async def upvote(_, message):
if not message.reply_to_message:
await message.reply_text("Reply To A Message To Upvote.")
return
if message.reply_to_message.from_user.id == message.from_user.id:
await message.reply_text(
"Public masturbation is not allowed."
)
return
chat_id = message.chat.id
user_id = message.reply_to_message.from_user.id
user_mention = message.reply_to_message.from_user.mention
filename = f"{chat_id}.json"
if not path.exists(filename):
sample_bot = {"1527962675": 1}
with open(filename, "w") as f:
f.write(json.dumps(sample_bot))
with open(filename) as f2:
members = json.load(f2)
if not f"{user_id}" in members:
members[f"{user_id}"] = 1
else:
members[f"{user_id}"] += 1
with open(filename, "w") as f3:
f3.write(json.dumps(members))
await message.reply_text(
f'Incremented Karma of {user_mention} By 1 \nTotal Points: {members[f"{user_id}"]}'
)
@app.on_message(filters.regex(regex_downvote))
async def downvote(_, message):
if not message.reply_to_message:
await message.reply_text("Reply To A Message To Downvote.")
return
if message.reply_to_message.from_user.id == message.from_user.id:
await message.reply_text(
"Public masturbation is not allowed."
)
return
chat_id = message.chat.id
user_id = message.reply_to_message.from_user.id
user_mention = message.reply_to_message.from_user.mention
filename = f"{chat_id}.json"
if not path.exists(filename):
sample_bot = {"1527962675": 1}
with open(filename, "w") as f:
f.write(json.dumps(sample_bot))
with open(filename) as f2:
members = json.load(f2)
if not f"{user_id}" in members:
members[f"{user_id}"] = 1
else:
members[f"{user_id}"] -= 1
with open(filename, "w") as f3:
f3.write(json.dumps(members))
await message.reply_text(
f'Decremented Karma Of {user_mention} By 1 \nTotal Points: {members[f"{user_id}"]}'
)
@app.on_message(filters.command(["karma"]))
async def karma(_, message):
if not message.reply_to_message:
chat_id = message.chat.id
filename = f"{chat_id}.json"
with open(filename) as f2:
members = json.load(f2)
output = ""
m = 0
for i in members.keys():
print(i, m)
output += f"`{(await app.get_users(i)).username}: {list(members.values())[m]}`\n"
m += 1
await message.reply_text(output)
@app.on_message(filters.command(["backup"]) & filters.user(owner_id))
async def backup(_, message):
m = await message.reply_text("Sending..")
files = glob.glob("*.json")
for i in files:
await app.send_document(owner_id, i)
await m.edit("Backup Sent In Your PM")
app.run()
| 30.288 | 109 | 0.634443 |
d9658def53a7dbfe8aee5cc0746d4728dc586dde | 14,273 | py | Python | Datasets/svg.py | Dianezzy/YOLaT-VectorGraphicsRecognition | ae21ad5850a49048f639d9b283ded927c3b367f7 | [
"MIT"
] | 44 | 2021-12-24T02:49:10.000Z | 2022-03-30T06:50:26.000Z | Datasets/svg.py | Dianezzy/YOLaT-VectorGraphicsRecognition | ae21ad5850a49048f639d9b283ded927c3b367f7 | [
"MIT"
] | 2 | 2021-12-30T07:25:37.000Z | 2022-03-22T11:29:48.000Z | Datasets/svg.py | Dianezzy/YOLaT-VectorGraphicsRecognition | ae21ad5850a49048f639d9b283ded927c3b367f7 | [
"MIT"
] | 7 | 2021-12-24T02:30:16.000Z | 2022-03-22T10:59:29.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import os
import numpy as np
from xml.dom.minidom import parse, Node, parseString
from torch_geometric.data import Data
from Datasets.svg_parser import SVGParser, SVGGraphBuilderBezier
from sklearn.metrics.pairwise import euclidean_distances
import networkx as nx
#from a2c import a2c
class SESYDFloorPlan(torch.utils.data.Dataset):
def __init__(self, root, opt, partition = 'train', data_aug = False):
super(SESYDFloorPlan, self).__init__()
svg_list = open(os.path.join(root, partition + '_list.txt')).readlines()
svg_list = [os.path.join(root, line.strip()) for line in svg_list]
self.graph_builder = SVGGraphBuilderBezier()
#print(svg_list)
self.pos_edge_th = opt.pos_edge_th
self.data_aug = data_aug
self.svg_list = svg_list
self.class_dict = {
'armchair':0,
'bed':1,
'door1':2,
'door2':3,
'sink1':4,
'sink2':5,
'sink3':6,
'sink4':7,
'sofa1':8,
'sofa2':9,
'table1':10,
'table2':11,
'table3':12,
'tub':13,
'window1':14,
'window2':15
}
'''
self.class_dict = {
'armchair':0,
'bed':1,
'door1':2,
'door2':2,
'sink1':3,
'sink2':3,
'sink3':3,
'sink4':3,
'sofa1':4,
'sofa2':4,
'table1':5,
'table2':5,
'table3':5,
'tub':6,
'window1':7,
'window2':7
}
'''
#self.anchors = self.get_anchor()
'''
self.n_objects = 0
for idx in range(len(self.svg_list)):
filepath = self.svg_list[idx]
print(filepath)
p = SVGParser(filepath)
width, height = p.get_image_size()
#graph_dict = self.graph_builder.buildGraph(p.get_all_shape())
gt_bbox, gt_labels = self._get_bbox(filepath, width, height)
self.n_objects += gt_bbox.shape[0]
print(self.n_objects)
'''
self.n_objects = 13238
def __len__(self):
return len(self.svg_list)
def get_anchor(self):
bboxes = [[] for i in range(len(list(self.class_dict.keys())))]
for filepath in self.svg_list:
p = SVGParser(filepath)
width, height = p.get_image_size()
gt_bbox, gt_labels = self._get_bbox(filepath, width, height)
whs = gt_bbox[:, 2:] - gt_bbox[:, 0:2]
for wh, l in zip(whs, gt_labels):
print(l)
bboxes[l].append(wh)
bboxes = np.array(bboxes)
for wh in bboxes:
mean_box = np.median(wh, 0)
print(mean_box, np.mean(wh, 0), np.max(wh, 0), np.min(wh, 0))
print(bboxes.shape)
raise SystemExit
def _get_bbox(self, path, width, height):
dom = parse(path.replace('.svg', '.xml'))
root = dom.documentElement
nodes = []
for tagname in ['a', 'o']:
nodes += root.getElementsByTagName(tagname)
bbox = []
labels = []
for node in nodes:
for n in node.childNodes:
if n.nodeType != Node.ELEMENT_NODE:
continue
x0 = float(n.getAttribute('x0')) / width
y0 = float(n.getAttribute('y0')) / height
x1 = float(n.getAttribute('x1')) / width
y1 = float(n.getAttribute('y1')) / height
label = n.getAttribute('label')
bbox.append((x0, y0, x1, y1))
labels.append(self.class_dict[label])
return np.array(bbox), np.array(labels)
def gen_y(self, graph_dict, bbox, labels, width, height):
pos = graph_dict['pos']['spatial']
is_control = graph_dict['attr']['is_control']
#print(pos.shape, bbox.shape, labels.shape)
#print(np.max(pos[:, 0]), np.max(pos[:, 1]))
th = 1e-3
gt_bb = []
gt_cls = []
gt_object = []
for node_idx, p in enumerate(pos):
if is_control[node_idx]:
gt_bb.append((0, 0, 0, 0))
gt_cls.append((0))
gt_object.append((0))
continue
diff_0 = p[None, :] - bbox[:, 0:2]
diff_1 = p[None, :] - bbox[:, 2:]
in_object = (diff_0[:, 0] >= -th) & (diff_0[:, 1] >= -th) & (diff_1[:, 0] <= th) & (diff_1[:, 1] <= th)
object_index = np.where(in_object)[0]
if len(object_index) > 1:
#print(object_index)
#print('node', p[0] * width, p[1] * height, 'is inside more than one object')
candidates = bbox[object_index]
s = euclidean_distances(p[None, :], candidates[:, 0:2])[0]
#print(np.argsort(s))
object_index = object_index[np.argsort(s)]
#print(candidates, s, object_index)
elif len(object_index) == 0:
#print(diff_0 * [width, height], diff_1* [width, height])
#print(object_index)
print('node', p[0] * width, p[1] * height, 'outside all object')
#for i, line in enumerate(bbox[:, 0:2] * [width, height]):
# print(i, line)
raise SystemExit
cls = labels[object_index[0]]
bb = bbox[object_index[0]]
'''
h = bb[3] - bb[1]
w = bb[2] - bb[0]
offset_x = bb[0] - p[0]
offset_y = bb[1] - p[1]
gt_bb.append((offset_x, offset_y, w, h))
'''
gt_bb.append(bb)
gt_cls.append(cls)
gt_object.append(object_index[0])
#assign label to control
control_neighboor = {}
for e in graph_dict['edge']['control']:
#print(is_control[e[0]], is_control[e[1]])
if not is_control[e[0]] and is_control[e[1]]:
c_node = e[1]
node = e[0]
elif not is_control[e[1]] and is_control[e[0]]:
c_node = e[0]
node = e[1]
else:
continue
if c_node not in control_neighboor:
control_neighboor[c_node] = []
control_neighboor[c_node].append(node)
#print(graph_dict['edge']['control'])
#print(control_neighboor)
#print(gt_bb, gt_cls)
for node_idx, p in enumerate(pos):
if is_control[node_idx]:
#print(control_neighboor[node_idx][0])
gt_bb[node_idx] = gt_bb[control_neighboor[node_idx][0]]
gt_cls[node_idx] = gt_cls[control_neighboor[node_idx][0]]
gt_object[node_idx] = gt_object[control_neighboor[node_idx][0]]
#raise SystemExit
#print(gt_bb, gt_cls)
return np.array(gt_bb), np.array(gt_cls), np.array(gt_object)
def __transform__(self, pos, scale, angle, translate):
scale_m = np.eye(2)
scale_m[0, 0] = scale
scale_m[1, 1] = scale
rot_m = np.eye(2)
rot_m[0, 0:2] = [np.cos(angle), np.sin(angle)]
rot_m[1, 0:2] = [-np.sin(angle), np.cos(angle)]
#print(pos.shape, scale_m[0:2].shape)
#pos = np.matmul(pos, scale_m[0:2])
#print(pos.shape)
center = np.array((0.5, 0.5))[None, :]
pos -= center
pos = np.matmul(pos, rot_m[0:2])
pos += center
#pos += np.array(translate)[None, :]
return pos
def __transform_bbox__(self, bbox, scale, angle, translate):
p0 = bbox[:, 0:2]
p2 = bbox[:, 2:]
p1 = np.concatenate([p2[:, 0][:, None], p0[:, 1][:, None]], axis = 1)
p3 = np.concatenate([p0[:, 0][:, None], p2[:, 1][:, None]], axis = 1)
p0 = self.__transform__(p0, scale, angle, translate)
p1 = self.__transform__(p1, scale, angle, translate)
p2 = self.__transform__(p2, scale, angle, translate)
p3 = self.__transform__(p3, scale, angle, translate)
def bound_rect(p0, p1, p2, p3):
x = np.concatenate((p0[:, 0][:, None], p1[:, 0][:, None], p2[:, 0][:, None], p3[:, 0][:, None]), axis = 1)
y = np.concatenate((p0[:, 1][:, None], p1[:, 1][:, None], p2[:, 1][:, None], p3[:, 1][:, None]), axis = 1)
x_min = x.min(1, keepdims = True)
x_max = x.max(1, keepdims = True)
y_min = y.min(1, keepdims = True)
y_max = y.max(1, keepdims = True)
return np.concatenate([x_min, y_min, x_max, y_max], axis = 1)
return bound_rect(p0, p1, p2, p3)
def random_transfer(self, pos, bbox, gt_bbox):
scale = np.random.random() * 0.1 + 0.9
angle = np.random.random() * np.pi * 2
translate = [0, 0]
translate[0] = np.random.random() * 0.2 - 0.1
translate[1] = np.random.random() * 0.2 - 0.1
pos = self.__transform__(pos, scale, angle, translate)
bbox = self.__transform_bbox__(bbox, scale, angle, translate)
gt_bbox = self.__transform_bbox__(gt_bbox, scale, angle, translate)
return pos, bbox, gt_bbox
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
filepath = self.svg_list[idx]
#filepath = '/home/xinyangjiang/Datasets/SESYD/FloorPlans/floorplans16-06/file_97.svg'
#print(filepath)
p = SVGParser(filepath)
width, height = p.get_image_size()
graph_dict = self.graph_builder.buildGraph(p.get_all_shape())
gt_bbox, gt_labels = self._get_bbox(filepath, width, height)
bbox, labels, gt_object = self.gen_y(graph_dict, gt_bbox, gt_labels, width, height)
if self.data_aug:
graph_dict['pos']['spatial'], bbox, gt_bbox = self.random_transfer(graph_dict['pos']['spatial'], bbox, gt_bbox)
feats = np.concatenate((
graph_dict['attr']['color'],
#graph_dict['attr']['stroke_width'],
graph_dict['pos']['spatial']),
axis = 1)
#feats = graph_dict['pos']['spatial']
pos = graph_dict['pos']['spatial']
is_control = graph_dict['attr']['is_control']
edge = graph_dict['edge']['shape']
edge_control = graph_dict['edge']['control']
edge_pos, e_weight_pos = self.graph_builder.buildPosEdge(pos, is_control, th = self.pos_edge_th)
e_attr = graph_dict['edge_attr']['shape']
if False:
top = 2000
left = 80
bottom = 2700
right = 620
A = np.zeros((pos.shape[0], pos.shape[0]))
for e in edge:
print(e)
p0 = pos[e[0]]
p1 = pos[e[1]]
#print(p0, p1)
p0[0] *= width
p0[1] *= height
p1[0] *= width
p1[1] *= height
print(p0, p1)
#raise SystemExit
#if p0[0] > left and p0[0] < right and p1[1] > top and p1[1] < bottom and p0[0] > left and p0[0] < right and p1[1] > top and p1[1] < bottom:
# print('foo')
# A[e[0], e[1]] = 1
G = nx.from_numpy_array(A)
#print(G.edges)
raise SystemExit
feats = torch.tensor(feats, dtype=torch.float32)
pos = torch.tensor(pos, dtype=torch.float32)
edge = torch.tensor(edge, dtype=torch.long)
edge_pos = torch.tensor(edge_pos, dtype=torch.long)
edge_control = torch.tensor(edge_control, dtype=torch.long)
is_control = torch.tensor(is_control, dtype=torch.bool)
bbox = torch.tensor(bbox, dtype=torch.float32)
labels = torch.tensor(labels, dtype=torch.long)
gt_bbox = torch.tensor(gt_bbox, dtype=torch.float32)
gt_labels = torch.tensor(gt_labels, dtype=torch.long)
gt_object = torch.tensor(gt_object, dtype=torch.long)
e_weight = torch.ones(edge.size(0))
e_weight_control = torch.ones(edge_control.size(0))
e_weight_pos = torch.tensor(e_weight_pos, dtype=torch.float32)
e_attr = torch.tensor(e_attr, dtype=torch.float32)
e_attr_pos = torch.zeros((edge_pos.size(0)), 4, dtype=torch.float32)
#print('bbox', bbox.size())
#print('labels', labels.size())
#raise SystemExit
data = Data(x = feats, pos = pos)
data.edge = edge
data.edge_control = edge_control
data.edge_pos = edge_pos
data.is_control = is_control
data.bbox = bbox
data.labels = labels
data.gt_bbox = gt_bbox
data.gt_labels = gt_labels
data.gt_object = gt_object
data.filepath = filepath
data.width = width
data.height = height
data.e_weight = e_weight
data.e_weight_control = e_weight_control
data.e_weight_pos = e_weight_pos
data.e_attr = e_attr
data.e_attr_pos = e_attr_pos
return data
if __name__ == '__main__':
svg_list = open('/home/xinyangjiang/Datasets/SESYD/FloorPlans/train_list.txt').readlines()
svg_list = ['/home/xinyangjiang/Datasets/SESYD/FloorPlans/' + line.strip() for line in svg_list]
builder = SVGGraphBuilderBezier()
for line in svg_list:
print(line)
#line = '/home/xinyangjiang/Datasets/SESYD/FloorPlans/floorplans16-01/file_56.svg'
p = SVGParser(line)
builder.buildGraph(p.get_all_shape())
#train_dataset = SESYDFloorPlan(opt.data_dir, pre_transform=T.NormalizeScale())
#train_loader = DataLoader(train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=4)
#for batch in train_loader:
# pass
#paths, attributes, svg_attributes = svg2paths2('/home/xinyangjiang/Datasets/SESYD/FloorPlans/floorplans16-05/file_47.svg')
#print(paths, attributes, svg_attributes)
| 37.072727 | 156 | 0.533875 |
97e52610c70782f28a7590ddff308374178f4f4a | 1,076 | py | Python | src/contextual_bandits/Bibtex/parse_data.py | renan-cunha/Multi-Armed-Bandits | 054fc26627cb136a94b5394a5aab7ab3b2509704 | [
"MIT"
] | null | null | null | src/contextual_bandits/Bibtex/parse_data.py | renan-cunha/Multi-Armed-Bandits | 054fc26627cb136a94b5394a5aab7ab3b2509704 | [
"MIT"
] | 1 | 2020-05-05T20:26:49.000Z | 2020-05-08T14:27:40.000Z | src/contextual_bandits/Bibtex/parse_data.py | renan-cunha/Bandits | 054fc26627cb136a94b5394a5aab7ab3b2509704 | [
"MIT"
] | null | null | null | """I took this script from David Cortes tutorial
https://nbviewer.jupyter.org/github/david-cortes/contextualbandits/blob/master/example/online_contextual_bandits.ipynb"""
import pandas as pd, numpy as np, re
from sklearn.preprocessing import MultiLabelBinarizer
def parse_data(file_name):
features = list()
labels = list()
with open(file_name, 'rt') as f:
f.readline()
for l in f:
if bool(re.search("^[0-9]", l)):
g = re.search("^(([0-9]{1,2},?)+)\s(.*)$", l)
labels.append([int(i) for i in g.group(1).split(",")])
features.append(eval("{" + re.sub("\s", ",", g.group(3)) + "}"))
else:
l = l.strip()
labels.append([])
features.append(eval("{" + re.sub("\s", ",", l) + "}"))
features = pd.DataFrame.from_dict(features).fillna(0).as_matrix()
mlb = MultiLabelBinarizer()
y = mlb.fit_transform(labels)
return features, y
X, y = parse_data("Bibtex_data.txt")
np.save("bibtex_x.npy", X)
np.save("bibtex_y.npy", y)
| 35.866667 | 121 | 0.575279 |
815920af4f74da63d13a42c7074b6bab1b57ab94 | 1,033 | py | Python | main.py | calacuda/network-notifier | 546c86775dfa61700f8035e24157d2e7ca729c0a | [
"MIT"
] | null | null | null | main.py | calacuda/network-notifier | 546c86775dfa61700f8035e24157d2e7ca729c0a | [
"MIT"
] | null | null | null | main.py | calacuda/network-notifier | 546c86775dfa61700f8035e24157d2e7ca729c0a | [
"MIT"
] | null | null | null | """
main.py
for running a periodic scan to see if any new devices have connected to the
network.
By: Calacuda | MIT Licence
"""
import scan
import notify
import time
import json
def get_database():
with open("database.json", "r") as database:
return json.loads(database.read())
def periodic_scan(wait_time):
"""
does a periodic scan of the network if any new device connects it will
send out an email to all the emails in 'alert_emails.txt'.
wait_time = the number of minuets between scans.
"""
try:
f = open("database.json", "r")
f.close()
except FileNotFoundError:
print("please run the init.py script to create the database. then come back")
exit()
while True:
database = get_database()
new_devices = scan.get_hosts(database)
if new_devices:
# names = "\n > ".join(new_devices)
notify.send_message(new_devices)
time.sleep(wait_time * 60)
if __name__ == "__main__":
periodic_scan(20)
| 22.456522 | 85 | 0.644724 |
6b79353404348e9bd91c66a825e2cd80b7690969 | 13,000 | py | Python | tests/test_composition_integration.py | c4dt/zksk | 647a0d969634098ba56f944c0d057263f3780a98 | [
"MIT"
] | null | null | null | tests/test_composition_integration.py | c4dt/zksk | 647a0d969634098ba56f944c0d057263f3780a98 | [
"MIT"
] | null | null | null | tests/test_composition_integration.py | c4dt/zksk | 647a0d969634098ba56f944c0d057263f3780a98 | [
"MIT"
] | null | null | null | import random
import pytest
from petlib.bn import Bn
from petlib.ec import EcGroup
from zksk import DLRep, Secret
from zksk.exceptions import (
InvalidExpression,
InvalidSecretsError,
ValidationError,
GroupMismatchError,
)
from zksk.composition import AndProofStmt, OrProofStmt
from zksk.expr import wsum_secrets
from zksk.utils import make_generators
@pytest.fixture
def params(group):
n1 = 3
n2 = 4
generators1 = make_generators(n1)
generators2 = make_generators(n2)
x0 = Secret()
x1 = Secret()
x2 = Secret()
x3 = Secret()
x4 = Secret()
x5 = Secret()
secrets = [x0, x1, x2, x3, x4, x5]
secrets_dict = dict(
[
(x0, Bn(1)),
(x1, Bn(2)),
(x2, Bn(5)),
(x3, Bn(100)),
(x4, Bn(43)),
(x5, Bn(10)),
]
)
sum_1 = group.wsum(
[secrets_dict[x0], secrets_dict[x1], secrets_dict[x2]], generators1
)
secrets_2 = [secrets_dict[x0]]
for i in range(3, 6):
secrets_2.append(secrets_dict[secrets[i]])
sum_2 = group.wsum(secrets_2, generators2)
p1 = DLRep(sum_1, wsum_secrets([x0, x1, x2], generators1))
p2 = DLRep(sum_2, wsum_secrets([x0, x3, x4, x5], generators2))
return p1, p2, secrets_dict
def verify(verifier, prover):
commitment = prover.commit()
challenge = verifier.send_challenge(commitment)
response = prover.compute_response(challenge)
return verifier.verify(response)
def verify_proof(proof, secrets):
prov = proof.get_prover(secrets)
verif = proof.get_verifier()
com = prov.commit()
chal = verif.send_challenge(com)
resp = prov.compute_response(chal)
return verif.verify(resp)
def test_and_proof_same_environment(params):
p1, p2, secrets_dict = params
and_proof = AndProofStmt(p1, p2)
prover = and_proof.get_prover(secrets_dict)
verifier = and_proof.get_verifier()
assert verify(verifier, prover)
def test_and_proof_different_environments(params):
x, y = Secret(), Secret()
p1, p2, secrets_dict = params
and_proof = AndProofStmt(p1, p2)
prover = and_proof.get_prover(secrets_dict)
verifier = and_proof.get_verifier()
assert verify(verifier, prover)
def test_and_proof_partially_defined_secrets():
generators = make_generators(2)
x = Secret(value=4)
x2 = Secret()
p1 = DLRep(4 * generators[0], x * generators[0])
p2 = DLRep(3 * generators[1], x2 * generators[1])
andp = p1 & p2
tr = andp.prove({x2: 3})
assert andp.verify(tr)
def test_and_proof_fails_when_bases_belong_to_different_groups(group):
"""
An alien EcPt is inserted in the generators
"""
g1 = group.generator()
other_group = EcGroup(706)
assert group != other_group
g2 = other_group.generator()
x = Secret(value=Bn(42))
y1 = group.wsum([x.value], [g1])
y2 = other_group.wsum([x.value], [g2])
p1 = DLRep(y1, wsum_secrets([x], [g1]))
p2 = DLRep(y2, wsum_secrets([x], [g2]))
and_proof = AndProofStmt(p1, p2)
prover = and_proof.get_prover()
verifier = and_proof.get_verifier()
# An exception should be raised because of a shared secrets linked to two different groups
with pytest.raises(GroupMismatchError):
verify(verifier, prover)
def test_and_proof_fails_when_secret_is_wrong(params, group):
p1, p2, secrets_dict = params
and_proof = AndProofStmt(p1, p2)
sec = secrets_dict.copy()
u = list(sec.keys())
sec[u[0]] = group.order().random()
prover = and_proof.get_prover(sec)
verifier = and_proof.get_verifier()
assert not verify(verifier, prover)
def test_multiple_and_proofs(params):
p1, p2, secrets_dict = params
and_proof = AndProofStmt(p1, p2, p2, p1, p1, p1, p2)
prover = and_proof.get_prover(secrets_dict)
verifier = and_proof.get_verifier()
assert verify(verifier, prover)
def test_compose_and_proofs_1(params):
p1, p2, secrets_dict = params
p3 = AndProofStmt(p1, p2)
p4 = AndProofStmt(AndProofStmt(p1, p2), p1)
prover = p4.get_prover(secrets_dict)
verifier = p4.get_verifier()
assert verify(verifier, prover)
def test_compose_and_proofs_2(params):
p1, p2, secrets_dict = params
p3 = AndProofStmt(p1, p2)
p = AndProofStmt(AndProofStmt(p1, AndProofStmt(p3, AndProofStmt(p1, p2))), p2)
prover = p.get_prover(secrets_dict)
verifier = p.get_verifier()
assert verify(verifier, prover)
def test_and_proof_simulation_1(group):
n = 3
secret_values = [Bn(i) for i in range(n)]
secrets = [Secret() for _ in range(n)]
generators = make_generators(n, group)
lhs = group.wsum(secret_values, generators)
subproof1 = DLRep(lhs, wsum_secrets(secrets, generators))
subproof2 = DLRep(lhs, wsum_secrets(secrets, generators))
andp = AndProofStmt(subproof1, subproof2)
andv = andp.get_verifier()
tr = andp.simulate_proof()
tr.stmt_hash = andp.prehash_statement().digest()
assert not andv.verify_nizk(tr)
def test_and_proof_simulation_2(group):
n = 3
secret_values = [Bn(i) for i in range(n)]
secrets = [Secret() for _ in range(n)]
generators = make_generators(n, group)
lhs = group.wsum(secret_values, generators)
subproof1 = DLRep(lhs, wsum_secrets(secrets, generators))
subproof2 = DLRep(lhs, wsum_secrets(secrets, generators))
andp = AndProofStmt(subproof1, subproof2)
tr = andp.simulate()
assert andp.verify_simulation_consistency(tr)
assert not andp.verify(tr)
def test_and_proof_non_interactive(params):
p1, p2, secrets = params
p = AndProofStmt(p1, p2)
message = "whatever"
tr = p.prove(secrets, message=message)
assert p.verify(tr, message=message)
def test_and_proof_non_interactive_fails_when_wrong_secrets(params, group):
p1, p2, secrets = params
andp = AndProofStmt(p1, p2)
bad_secrets = secrets.copy()
u = list(bad_secrets.keys())
bad_secrets[u[0]] = group.order().random()
message = "whatever"
tr = andp.prove(bad_secrets, message=message)
assert not andp.verify(tr, message=message)
def test_and_proof_infix_operator(params):
p1, p2, secrets_dict = params
and_proof = p1 & p2 & p1
prover = and_proof.get_prover(secrets_dict)
verifier = and_proof.get_verifier()
assert verify(verifier, prover)
def test_and_proof_with_complex_expression(group):
g = group.generator()
g1 = 2 * g
g2 = 5 * g
g3 = 10 * g
x1 = Secret()
x2 = Secret()
x3 = Secret()
proof = DLRep(10 * g1 + 15 * g2, x1 * g1 + x2 * g2) & DLRep(
15 * g1 + 35 * g3, x2 * g1 + x3 * g3
)
prover = proof.get_prover({x1: 10, x2: 15, x3: 35})
verifier = proof.get_verifier()
assert verify(verifier, prover)
def test_or_proof(params):
p1, p2, secrets = params
orproof = OrProofStmt(p1, p2, p1, p2, p1, p2)
prov = orproof.get_prover(secrets)
verif = orproof.get_verifier()
com = prov.commit()
chal = verif.send_challenge(com)
resp = prov.compute_response(chal)
# Here we see that some responses have an identical first element.
# The only one with a different first element is the non-simulated one.
assert verif.verify(resp)
def test_or_proof_manual(params):
"""
TODO: Clarify what is being tested here.
"""
p1, p2, secrets = params
orproof = OrProofStmt(p1, p2, p1, p2, p1, p2)
subproofs = orproof.subproofs
rep = 0
chosen = []
sims = [True]
while rep < 10:
sims = []
# Make random subproofs simulated.
for proof in subproofs:
is_simulated = random.choice([True, False])
sims.append(is_simulated)
proof.set_simulated(is_simulated)
if all(sims):
continue
for i in range(30):
# Choose a subproof, look if it was a valid choice, store the result
prov = orproof.get_prover(secrets)
chosen.append(sims[orproof.chosen_idx] == False)
rep += 1
assert all(chosen)
def test_and_or_proof_composition(params):
p1, p2, secrets = params
g1 = 7 * p1.bases[0]
g2 = 8 * p1.bases[0]
xb = Secret(name="xb")
xa = Secret(name="xa")
p0 = DLRep(7 * g1 + 18 * g2, xb * g1 + xa * g2)
secrets[xb] = 7
secrets[xa] = 18
orproof = OrProofStmt(p1, p2)
andp = AndProofStmt(orproof, p0)
andp = AndProofStmt(andp, DLRep(15 * p1.bases[0], Secret(value=15) * p1.bases[0]))
prover = andp.get_prover(secrets)
verifier = andp.get_verifier()
assert verify(verifier, prover)
def test_or_and_proof_composition(params):
p1, p2, secrets = params
andp = AndProofStmt(p1, p2)
g1 = 7 * p1.bases[0]
g2 = 8 * p1.bases[0]
xb = Secret(name="xb")
xa = Secret(name="xa")
p0 = DLRep(7 * g1 + 18 * g2, xb * g1 + xa * g2)
secrets[xa] = 7
secrets[Secret(name="xc")] = 18
orproof = OrProofStmt(p0, andp)
prover = orproof.get_prover(secrets)
verifier = orproof.get_verifier()
assert verify(verifier, prover)
def test_or_or_proof_composition(params):
p1, p2, secrets = params
first_or = OrProofStmt(p1, p2)
g1 = 7 * p1.bases[0]
g2 = 8 * p1.bases[0]
xb = Secret(name="xb")
xa = Secret(name="xa")
p0 = DLRep(7 * g1 + 18 * g2, xb * g1 + xa * g2)
secrets[xa] = 7
secrets[Secret()] = 18
orproof = OrProofStmt(p0, first_or)
prover = orproof.get_prover(secrets)
verifier = orproof.get_verifier()
assert verify(verifier, prover)
def test_or_proof_simulation(params):
p1, p2, secrets = params
first_or = OrProofStmt(p1, p2)
tr = first_or.simulate()
assert first_or.verify_simulation_consistency(tr)
assert not first_or.verify(tr)
def test_multiple_or_proofs(group, params):
p1, p2, secrets = params
g = group.generator()
x10 = Secret()
secrets.update({x10: 13})
orproof = OrProofStmt(p1, OrProofStmt(p2, DLRep(13 * g, x10 * g)))
assert verify_proof(orproof, secrets)
def test_multiple_or_proofs_composition(group, params):
p1, p2, secrets = params
g = group.generator()
x10 = Secret()
secrets.update({x10: 13})
orp1 = OrProofStmt(p2, p1)
orp2 = OrProofStmt(p1, DLRep(13 * g, x10 * g))
orproof = OrProofStmt(orp1, p2, orp2)
assert verify_proof(orproof, secrets)
def test_or_proof_infix_operator(params):
p1, p2, secrets = params
orproof = p1 | p2
assert verify_proof(orproof, secrets)
def test_multiple_or_proof_infix_operator(group, params):
p1, p2, secrets = params
g = group.generator()
x10 = Secret()
secrets.update({x10: 13})
orproof = p1 | p2 | DLRep(13 * g, x10 * g)
assert verify_proof(orproof, secrets)
def test_or_non_interactive(params):
p1, p2, secrets = params
p = OrProofStmt(p1, p2)
message = "whatever"
tr = p.prove(secrets, message=message)
assert p.verify(tr, message=message)
def test_or_non_interactive_fails_on_wrong_secrets(group, params):
p1, p2, secrets = params
p = OrProofStmt(p1, p2)
bad_secrets = secrets.copy()
u = list(bad_secrets.keys())
bad_secrets[u[0]] = group.order().random()
bad_secrets[u[1]] = group.order().random()
bad_secrets[u[2]] = group.order().random()
bad_secrets[u[3]] = group.order().random()
message = "whatever"
tr = p.prove(bad_secrets, message=message)
assert not p.verify(tr, message=message)
def test_malicious_and_proofs():
x0 = Secret()
x2 = Secret()
x1 = Secret()
generators = make_generators(3)
g1 = generators[0]
g2 = generators[1]
g3 = generators[2]
secret_dict = {x0: 3, x2: 50, x1: 12}
mal_secret_dict = {x0: 3, x2: 51}
andp = AndProofStmt(
DLRep(12 * g1 + 50 * g2, x1 * g1 + x2 * g2),
DLRep(3 * g3 + 51 * g2, x0 * g1 + x2 * g2),
)
prov = andp.get_prover(secret_dict)
prov.subs[1].secret_values = mal_secret_dict
verif = andp.get_verifier()
com = prov.commit()
chal = verif.send_challenge(com)
resp = prov.compute_response(chal)
with pytest.raises(ValidationError):
verif.verify(resp)
# Secret used both inside and outside of or clause
def test_invalid_or_composition():
r = Secret(10)
g1, g2, g3 = make_generators(3)
st1 = DLRep(10 * g1, r * g1)
st21 = DLRep(10 * g2, r * g2)
st22 = DLRep(12 * g3, r * g3)
st22.set_simulated()
st2 = st21 | st22
st = st1 & st2
with pytest.raises(InvalidSecretsError):
st.prove()
# Secret used both inside two different or clauses
def test_invalid_or_composition_inside_two_or():
r = Secret(10)
g1, g2, g3, g4 = make_generators(4)
st11 = DLRep(r.value * g1, r * g1)
st12 = DLRep(2 * g2, r * g2)
st12.set_simulated()
st1 = st11 | st12
st21 = DLRep(7 * g3, r * g3)
st21.simluation = True
st22 = DLRep(r.value * g4, r * g4)
st2 = st21 | st22
st = st1 & st2
with pytest.raises(InvalidSecretsError):
st.prove()
| 27.659574 | 94 | 0.648615 |
8cb3639391511b6dc23918f04ec8af21bd304d29 | 66 | py | Python | testAppium/unit/testcase/start_activity/__init__.py | moulage/appium-android | 082e4018673fecd260552d758f8a8ba154838b9a | [
"Apache-2.0"
] | null | null | null | testAppium/unit/testcase/start_activity/__init__.py | moulage/appium-android | 082e4018673fecd260552d758f8a8ba154838b9a | [
"Apache-2.0"
] | null | null | null | testAppium/unit/testcase/start_activity/__init__.py | moulage/appium-android | 082e4018673fecd260552d758f8a8ba154838b9a | [
"Apache-2.0"
] | null | null | null | # coding = utf-8
from .start_activity import AA_StartAPPActivity
| 16.5 | 47 | 0.80303 |
99bc473da557c565cdcc65a41ef42202b797597b | 11,907 | py | Python | gem_metrics/config.py | ndaheim/GEM-metrics | cfa7a3223d94a7f7ef5fda1d2928e2510b0cbede | [
"MIT"
] | 30 | 2021-02-06T04:58:14.000Z | 2022-03-04T11:26:14.000Z | gem_metrics/config.py | ndaheim/GEM-metrics | cfa7a3223d94a7f7ef5fda1d2928e2510b0cbede | [
"MIT"
] | 70 | 2021-01-12T17:55:15.000Z | 2022-03-30T17:37:02.000Z | gem_metrics/config.py | ndaheim/GEM-metrics | cfa7a3223d94a7f7ef5fda1d2928e2510b0cbede | [
"MIT"
] | 14 | 2021-01-30T20:55:17.000Z | 2022-03-24T02:31:21.000Z |
'''
This is the main list of splits that are supported. The naming schema follows
{DATASET_NAME}_{SPLIT_NAME}.
For validation sets, both `val` and `validation` are allowed, but the
corresponding filename should use `validation` - this was introduced to maintain
backwards compability.
Should the dataset not be found at the corresponding URL, the results will be
empty.
In this file, we first construct the _SUPPORTED DATASETS with all the
GEM-supported datasets. If you want to use a non-supported dataset, simply
add it to the dictionary _after_ the automatic URL addition code below.
'''
from gem_metrics import data
_SUPPORTED_DATASETS = {
"common_gen_val": {"language": "en", "task": "data2text"},
"common_gen_test": {"language": "en", "task": "data2text"},
"common_gen_challenge_test_scramble": {"language": "en", "task": "data2text"},
"common_gen_challenge_train_sample": {"language": "en", "task": "data2text"},
"common_gen_challenge_validation_sample": {"language": "en", "task": "data2text"},
"cs_restaurants_val": {"language": "cs", "task": "data2text"},
"cs_restaurants_test": {"language": "cs", "task": "data2text"},
"cs_restaurants_challenge_test_scramble": {"language": "cs", "task": "data2text"},
"cs_restaurants_challenge_train_sample": {"language": "cs", "task": "data2text"},
"cs_restaurants_challenge_validation_sample": {"language": "cs", "task": "data2text"},
# "dart_val": {"language": "en", "task": "data2text"},
# "dart_test": {"language": "en", "task": "data2text"},
"e2e_nlg_val": {"language": "en", "task": "data2text"},
"e2e_nlg_test": {"language": "en", "task": "data2text"},
"e2e_nlg_challenge_test_scramble": {"language": "en", "task": "data2text"},
"e2e_nlg_challenge_train_sample": {"language": "en", "task": "data2text"},
"e2e_nlg_challenge_validation_sample": {"language": "en", "task": "data2text"},
"mlsum_de_val": {"language": "de", "task": "summarization"},
"mlsum_de_test": {"language": "de", "task": "summarization"},
"mlsum_de_challenge_test_covid": {"language": "de", "task": "summarization"},
"mlsum_de_challenge_train_sample": {"language": "de", "task": "summarization"},
"mlsum_de_challenge_validation_sample": {"language": "de", "task": "summarization"},
"mlsum_es_val": {"language": "es", "task": "summarization"},
"mlsum_es_test": {"language": "es", "task": "summarization"},
"mlsum_es_challenge_test_covid": {"language": "es", "task": "summarization"},
"mlsum_es_challenge_train_sample": {"language": "es", "task": "summarization"},
"mlsum_es_challenge_validation_sample": {"language": "es", "task": "summarization"},
"schema_guided_dialog_val": {"language": "en", "task": "data2text"},
"schema_guided_dialog_test": {"language": "en", "task": "data2text"},
"schema_guided_dialog_challenge_test_backtranslation": {"language": "en", "task": "data2text"},
"schema_guided_dialog_challenge_test_bfp02": {"language": "en", "task": "data2text"},
"schema_guided_dialog_challenge_test_bfp05": {"language": "en", "task": "data2text"},
"schema_guided_dialog_challenge_test_nopunc": {"language": "en", "task": "data2text"},
"schema_guided_dialog_challenge_test_scramble": {"language": "en", "task": "data2text"},
"schema_guided_dialog_challenge_train_sample": {"language": "en", "task": "data2text"},
"schema_guided_dialog_challenge_validation_sample": {"language": "en", "task": "data2text"},
"totto_val": {"language": "en", "task": "data2text"},
"totto_test": {"language": "en", "task": "data2text"},
"totto_challenge_test_scramble": {"language": "en", "task": "data2text"},
"totto_challenge_train_sample": {"language": "en", "task": "data2text"},
"totto_challenge_validation_sample": {"language": "en", "task": "data2text"},
"web_nlg_en_val": {"language": "en", "task": "data2text"},
"web_nlg_en_test": {"language": "en", "task": "data2text"},
"web_nlg_en_challenge_test_scramble": {"language": "en", "task": "data2text"},
"web_nlg_en_challenge_test_numbers": {"language": "en", "task": "data2text"},
"web_nlg_en_challenge_train_sample": {"language": "en", "task": "data2text"},
"web_nlg_en_challenge_validation_sample": {"language": "en", "task": "data2text"},
"web_nlg_ru_val": {"language": "ru", "task": "data2text"},
"web_nlg_ru_test": {"language": "ru", "task": "data2text"},
"web_nlg_ru_challenge_test_scramble": {"language": "ru", "task": "data2text"},
"web_nlg_ru_challenge_train_sample": {"language": "ru", "task": "data2text"},
"web_nlg_ru_challenge_validation_sample": {"language": "ru", "task": "data2text"},
# "wiki_auto_asset_turk_val": {"language": "en", "task": "text_simplification"},
"wiki_auto_asset_turk_test_asset": {"language": "en", "task": "text_simplification"},
"wiki_auto_asset_turk_test_turk": {"language": "en", "task": "text_simplification"},
"wiki_auto_asset_turk_challenge_test_asset_backtranslation": {"language": "en", "task": "text_simplification"},
"wiki_auto_asset_turk_challenge_test_asset_bfp02": {"language": "en", "task": "text_simplification"},
"wiki_auto_asset_turk_challenge_test_asset_bfp05": {"language": "en", "task": "text_simplification"},
"wiki_auto_asset_turk_challenge_test_asset_nopunc": {"language": "en", "task": "text_simplification"},
"wiki_auto_asset_turk_challenge_test_turk_backtranslation": {"language": "en", "task": "text_simplification"},
"wiki_auto_asset_turk_challenge_test_turk_bfp02": {"language": "en", "task": "text_simplification"},
"wiki_auto_asset_turk_challenge_test_turk_bfp05": {"language": "en", "task": "text_simplification"},
"wiki_auto_asset_turk_challenge_test_turk_nopunc": {"language": "en", "task": "text_simplification"},
"wiki_auto_asset_turk_challenge_train_sample": {"language": "en", "task": "text_simplification"},
"wiki_auto_asset_turk_challenge_validation_sample": {"language": "en", "task": "text_simplification"},
"wiki_lingua_spanish_es_val": {"language": "en", "task": "summarization"},
"wiki_lingua_spanish_es_test": {"language": "en", "task": "summarization"},
"wiki_lingua_russian_ru_val": {"language": "en", "task": "summarization"},
"wiki_lingua_russian_ru_test": {"language": "en", "task": "summarization"},
"wiki_lingua_turkish_tr_val": {"language": "en", "task": "summarization"},
"wiki_lingua_turkish_tr_test": {"language": "en", "task": "summarization"},
"wiki_lingua_vietnamese_vi_val": {"language": "en", "task": "summarization"},
"wiki_lingua_vietnamese_vi_test": {"language": "en", "task": "summarization"},
"xsum_val": {"language": "en", "task": "summarization"},
"xsum_test": {"language": "en", "task": "summarization"},
"xsum_challenge_test_backtranslation": {"language": "en", "task": "summarization"},
"xsum_challenge_test_bfp_02": {"language": "en", "task": "summarization"},
"xsum_challenge_test_bfp_05": {"language": "en", "task": "summarization"},
"xsum_challenge_test_nopunc": {"language": "en", "task": "summarization"},
"xsum_challenge_test_covid": {"language": "en", "task": "summarization"},
"xsum_challenge_train_sample": {"language": "en", "task": "summarization"},
"xsum_challenge_validation_sample": {"language": "en", "task": "summarization"},
}
# Also add "*_validation" compatibility.
# DO NOT MODIFY THIS PART.
_VAL_COMPATIBILITY_DICT = {}
for key, value in _SUPPORTED_DATASETS.items():
if key.endswith("_val"):
_VAL_COMPATIBILITY_DICT[key.replace("_val", "_validation")] = value
_SUPPORTED_DATASETS.update(_VAL_COMPATIBILITY_DICT)
# Now automatically add download links.
for dataset_name, settings in _SUPPORTED_DATASETS.items():
# For both val and validation named datasets, the download should link to validation.
if dataset_name.endswith("_val"):
sanitized_dataset_name = dataset_name.replace("_val", "_validation")
else:
sanitized_dataset_name = dataset_name
settings['url'] = f"https://github.com/GEM-benchmark/GEM-metrics/releases/download/data/{sanitized_dataset_name}.json"
# If you want to add a custom dataset / url, you can add it here.
# Just ensure that your entry has `language`, `task`, and `url` set.
# HERE
# Access functions used by the main scripts.
def get_all_datasets():
return list(_SUPPORTED_DATASETS.keys())
def get_language_for_dataset(dataset_name):
data_config = _SUPPORTED_DATASETS.get(dataset_name, {'language': 'en'})
return data_config['language']
def get_task_type_for_dataset(dataset_name):
data_config = _SUPPORTED_DATASETS.get(dataset_name, {'task':'text2text'})
return data_config["task"]
def get_url_for_dataset(dataset_name):
data_config = _SUPPORTED_DATASETS.get(dataset_name, {'url': ""})
return data_config["url"]
'''
EVALUATION SUITE SETTINGS
We support two types of challenge sets:
1) Transformations
2) Subpopulations
Transformed datasets will be evaluated in relation to the parent_datapoints from
which they are derived. This feature is added here - simply add the name of the
challenge set and the parent set.
Subpopulations are partitions of test set of particular interest. If you have a
file for subpopulations, add them to the list below.
'''
# URLs to download standard references from
_TRANSFORMATION_PARENT_DATASETS = {
"cs_restaurants_challenge_test_scramble": "cs_restaurants_test",
"web_nlg_ru_challenge_test_scramble": "web_nlg_ru_test",
"schema_guided_dialog_challenge_test_backtranslation": "schema_guided_dialog_test",
"schema_guided_dialog_challenge_test_bfp02": "schema_guided_dialog_test",
"schema_guided_dialog_challenge_test_bfp05": "schema_guided_dialog_test",
"schema_guided_dialog_challenge_test_nopunc": "schema_guided_dialog_test",
"schema_guided_dialog_challenge_test_scramble": "schema_guided_dialog_test",
"xsum_challenge_test_backtranslation": "xsum_test",
"xsum_challenge_test_bfp_02": "xsum_test",
"xsum_challenge_test_bfp_05": "xsum_test",
"xsum_challenge_test_nopunc": "xsum_test",
"e2e_nlg_challenge_test_scramble": "e2e_nlg_test",
"web_nlg_en_challenge_test_scramble": "web_nlg_en_test",
"web_nlg_en_challenge_test_numbers": "web_nlg_en_test",
"wiki_auto_asset_turk_challenge_test_asset_backtranslation": "wiki_auto_asset_turk_test_asset",
"wiki_auto_asset_turk_challenge_test_asset_bfp02": "wiki_auto_asset_turk_test_asset",
"wiki_auto_asset_turk_challenge_test_asset_bfp05": "wiki_auto_asset_turk_test_asset",
"wiki_auto_asset_turk_challenge_test_asset_nopunc": "wiki_auto_asset_turk_test_asset",
"wiki_auto_asset_turk_challenge_test_turk_backtranslation": "wiki_auto_asset_turk_test_turk",
"wiki_auto_asset_turk_challenge_test_turk_bfp02": "wiki_auto_asset_turk_test_turk",
"wiki_auto_asset_turk_challenge_test_turk_bfp05": "wiki_auto_asset_turk_test_turk",
"wiki_auto_asset_turk_challenge_test_turk_nopunc": "wiki_auto_asset_turk_test_turk",
}
def get_all_transformation_sets():
return list(_TRANSFORMATION_PARENT_DATASETS.keys())
def get_parent_dataset_for_transformation(dataset_name):
return _TRANSFORMATION_PARENT_DATASETS.get(dataset_name, None)
_SUBPOPULATION_BASE = [
"cs_restaurants_test",
"e2e_nlg_test",
"schema_guided_dialog_test",
"totto_test",
"xsum_test",
"web_nlg_en_test",
"web_nlg_ru_test",
"wiki_auto_asset_turk_test_asset",
"wiki_auto_asset_turk_test_turk",
]
_SUBPOPULATION_DATASETS = {
dataset_name: f"https://github.com/GEM-benchmark/GEM-metrics/releases/download/data/{dataset_name}_contrast_sets.json"
for dataset_name in _SUBPOPULATION_BASE
}
def get_all_subpopulation_sets():
return list(_SUBPOPULATION_DATASETS.keys())
def get_url_for_subpopulation(dataset_name):
return _SUBPOPULATION_DATASETS.get(dataset_name, None)
| 55.125 | 122 | 0.727975 |
34cf365564519d1fc7e885887eed7aba4e7edf1e | 3,319 | py | Python | guardian/managers.py | peopledoc/django-guardian | 459827c2329975113cbf0d11f4fd476b5689a055 | [
"BSD-2-Clause"
] | 5 | 2015-12-05T15:39:51.000Z | 2020-09-16T20:14:29.000Z | guardian/managers.py | peopledoc/django-guardian | 459827c2329975113cbf0d11f4fd476b5689a055 | [
"BSD-2-Clause"
] | null | null | null | guardian/managers.py | peopledoc/django-guardian | 459827c2329975113cbf0d11f4fd476b5689a055 | [
"BSD-2-Clause"
] | 2 | 2019-11-23T17:47:46.000Z | 2022-01-14T11:05:21.000Z | from django.db import models
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from guardian.exceptions import ObjectNotPersisted
class UserObjectPermissionManager(models.Manager):
def assign(self, perm, user, obj):
"""
Assigns permission with given ``perm`` for an instance ``obj`` and
``user``.
"""
if getattr(obj, 'pk', None) is None:
raise ObjectNotPersisted("Object %s needs to be persisted first"
% obj)
ctype = ContentType.objects.get_for_model(obj)
permission = Permission.objects.get(
content_type=ctype, codename=perm)
obj_perm, created = self.get_or_create(
content_type = ctype,
permission = permission,
object_pk = obj.pk,
user = user)
return obj_perm
def remove_perm(self, perm, user, obj):
"""
Removes permission ``perm`` for an instance ``obj`` and given ``user``.
"""
if getattr(obj, 'pk', None) is None:
raise ObjectNotPersisted("Object %s needs to be persisted first"
% obj)
self.filter(
permission__codename=perm,
user=user,
object_pk=obj.pk,
content_type=ContentType.objects.get_for_model(obj))\
.delete()
def get_for_object(self, user, obj):
if getattr(obj, 'pk', None) is None:
raise ObjectNotPersisted("Object %s needs to be persisted first"
% obj)
ctype = ContentType.objects.get_for_model(obj)
perms = self.filter(
content_type = ctype,
user = user,
)
return perms
class GroupObjectPermissionManager(models.Manager):
def assign(self, perm, group, obj):
"""
Assigns permission with given ``perm`` for an instance ``obj`` and
``group``.
"""
if getattr(obj, 'pk', None) is None:
raise ObjectNotPersisted("Object %s needs to be persisted first"
% obj)
ctype = ContentType.objects.get_for_model(obj)
permission = Permission.objects.get(
content_type=ctype, codename=perm)
obj_perm, created = self.get_or_create(
content_type = ctype,
permission = permission,
object_pk = obj.pk,
group = group)
return obj_perm
def remove_perm(self, perm, group, obj):
"""
Removes permission ``perm`` for an instance ``obj`` and given ``group``.
"""
if getattr(obj, 'pk', None) is None:
raise ObjectNotPersisted("Object %s needs to be persisted first"
% obj)
self.filter(
permission__codename=perm,
group=group,
object_pk=obj.pk,
content_type=ContentType.objects.get_for_model(obj))\
.delete()
def get_for_object(self, group, obj):
if getattr(obj, 'pk', None) is None:
raise ObjectNotPersisted("Object %s needs to be persisted first"
% obj)
ctype = ContentType.objects.get_for_model(obj)
perms = self.filter(
content_type = ctype,
group = group,
)
return perms
| 33.525253 | 80 | 0.576981 |
8e2ba1065983562cecb919962b9da5665ebecb98 | 4,546 | py | Python | src/m9b_summing_again.py | DonkeyK1ng/03-AccumulatorsAndFunctionsWithParameters | e9a0dfb431e8bb8a408fee0732a124b24efa44a6 | [
"MIT"
] | null | null | null | src/m9b_summing_again.py | DonkeyK1ng/03-AccumulatorsAndFunctionsWithParameters | e9a0dfb431e8bb8a408fee0732a124b24efa44a6 | [
"MIT"
] | null | null | null | src/m9b_summing_again.py | DonkeyK1ng/03-AccumulatorsAndFunctionsWithParameters | e9a0dfb431e8bb8a408fee0732a124b24efa44a6 | [
"MIT"
] | null | null | null | """
This module lets you practice the ACCUMULATOR pattern
in its simplest classic forms:
SUMMING: total = total + number
Authors: David Mutchler, Dave Fisher, Vibha Alangar, Mark Hays, Amanda Stouder,
their colleagues and Yuanning Zuo.
""" # Done: 1. PUT YOUR NAME IN THE ABOVE LINE.
def main():
""" Calls the TEST functions in this module. """
run_test_sum_powers()
run_test_sum_powers_in_range()
def run_test_sum_powers():
""" Tests the sum_powers function. """
# ------------------------------------------------------------------
# TODO: 2. Implement this function.
# It TESTS the sum_powers function defined below.
# Include at least ** 3 ** tests.
#
# Use the same 4-step process as in implementing previous
# TEST functions, including the same way to print expected/actual.
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the sum_powers function:')
print('--------------------------------------------------')
expected=8
answer=sum_powers(2,3)
print(expected,answer)
expected=144.45655
answer=sum_powers(100,0.1)
print(expected,answer)
expected=3.80
answer=sum_powers(5,-0.3)
print(expected,float(answer))
def sum_powers(n, p):
"""
What comes in: A non-negative integer n
and a number p.
What goes out: The sum 1**p + 2**p + 3**p + ... + n**p
for the given numbers n and p. The latter may be any number
(possibly a floating point number, and possibly negative).
Side effects: None.
Examples:
-- sum_powers(5, -0.3) returns about 3.80826
-- sum_powers(100, 0.1) returns about 144.45655
"""
total =0
for k in range(n):
#print(k,n,k**p)
total=total+((k+1)**p)
return (total)
# ------------------------------------------------------------------
# Done: 3. Implement and test this function.
# Note that you should write its TEST function first (above).
#
# No fair running the code of sum_powers to GENERATE
# test cases; that would defeat the purpose of TESTING!
# ------------------------------------------------------------------
def run_test_sum_powers_in_range():
""" Tests the sum_powers_in_range function. """
# ------------------------------------------------------------------
# TODO: 4. Implement this function.
# It TESTS the sum_powers_in_range function defined below.
# Include at least ** 3 ** tests.
#
# Use the same 4-step process as in implementing previous
# TEST functions, including the same way to print expected/actual.
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the sum_powers_in_range function:')
print('--------------------------------------------------')
expected=142.384
answer=sum_powers_in_range(3,100,0.1)
print('The expected is: ',expected,answer)
expected=5
answer=sum_powers_in_range(2,3,1)
print('The expected is: ',expected,answer)
expected=39
answer=sum_powers_in_range(4,9,1)
print('The expected is: ',expected,answer)
def sum_powers_in_range(m, n, p):
"""
What comes in: Non-negative integers m and n, with n >= m,
and a number p.
What goes out: the sum
m**p + (m+1)**p + (m+2)**p + ... + n**p
for the given numbers m, n and p. The latter may be any number
(possibly a floating point number, and possibly negative).
Side effects: None.
Example:
-- sum_powers_in_range(3, 100, 0.1) returns about 142.384776
"""
total=0
for k in range((n-m)+1):
print(k)
total=total+((m+k)**p)
return total
# ------------------------------------------------------------------
# Done: 5. Implement and test this function.
# Note that you should write its TEST function first (above).
#
# No fair running the code of sum_powers_in_range to GENERATE
# test cases; that would defeat the purpose of TESTING!
# ------------------------------------------------------------------
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
| 36.368 | 79 | 0.50088 |
a2fe2c49e0bb2d2596a15fcf559eb112de2b2e49 | 6,056 | py | Python | troposphere/opensearchservice.py | filipepmo/troposphere | b1590f58ed8cc86ba18a19ed93fc9380d6f7306b | [
"BSD-2-Clause"
] | null | null | null | troposphere/opensearchservice.py | filipepmo/troposphere | b1590f58ed8cc86ba18a19ed93fc9380d6f7306b | [
"BSD-2-Clause"
] | null | null | null | troposphere/opensearchservice.py | filipepmo/troposphere | b1590f58ed8cc86ba18a19ed93fc9380d6f7306b | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2012-2022, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
# Resource specification version: 51.0.0
from . import AWSObject, AWSProperty, PropsDictType, Tags
from .validators import boolean, integer
from .validators.opensearchservice import validate_search_service_engine_version
class MasterUserOptions(AWSProperty):
"""
`MasterUserOptions <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-opensearchservice-domain-masteruseroptions.html>`__
"""
props: PropsDictType = {
"MasterUserARN": (str, False),
"MasterUserName": (str, False),
"MasterUserPassword": (str, False),
}
class AdvancedSecurityOptionsInput(AWSProperty):
"""
`AdvancedSecurityOptionsInput <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-opensearchservice-domain-advancedsecurityoptionsinput.html>`__
"""
props: PropsDictType = {
"Enabled": (boolean, False),
"InternalUserDatabaseEnabled": (boolean, False),
"MasterUserOptions": (MasterUserOptions, False),
}
class ZoneAwarenessConfig(AWSProperty):
"""
`ZoneAwarenessConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-opensearchservice-domain-zoneawarenessconfig.html>`__
"""
props: PropsDictType = {
"AvailabilityZoneCount": (integer, False),
}
class ClusterConfig(AWSProperty):
"""
`ClusterConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-opensearchservice-domain-clusterconfig.html>`__
"""
props: PropsDictType = {
"DedicatedMasterCount": (integer, False),
"DedicatedMasterEnabled": (boolean, False),
"DedicatedMasterType": (str, False),
"InstanceCount": (integer, False),
"InstanceType": (str, False),
"WarmCount": (integer, False),
"WarmEnabled": (boolean, False),
"WarmType": (str, False),
"ZoneAwarenessConfig": (ZoneAwarenessConfig, False),
"ZoneAwarenessEnabled": (boolean, False),
}
class CognitoOptions(AWSProperty):
"""
`CognitoOptions <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-opensearchservice-domain-cognitooptions.html>`__
"""
props: PropsDictType = {
"Enabled": (boolean, False),
"IdentityPoolId": (str, False),
"RoleArn": (str, False),
"UserPoolId": (str, False),
}
class DomainEndpointOptions(AWSProperty):
"""
`DomainEndpointOptions <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-opensearchservice-domain-domainendpointoptions.html>`__
"""
props: PropsDictType = {
"CustomEndpoint": (str, False),
"CustomEndpointCertificateArn": (str, False),
"CustomEndpointEnabled": (boolean, False),
"EnforceHTTPS": (boolean, False),
"TLSSecurityPolicy": (str, False),
}
class EBSOptions(AWSProperty):
"""
`EBSOptions <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-opensearchservice-domain-ebsoptions.html>`__
"""
props: PropsDictType = {
"EBSEnabled": (boolean, False),
"Iops": (integer, False),
"VolumeSize": (integer, False),
"VolumeType": (str, False),
}
class EncryptionAtRestOptions(AWSProperty):
"""
`EncryptionAtRestOptions <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-opensearchservice-domain-encryptionatrestoptions.html>`__
"""
props: PropsDictType = {
"Enabled": (boolean, False),
"KmsKeyId": (str, False),
}
class LogPublishingOption(AWSProperty):
"""
`LogPublishingOption <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-opensearchservice-domain-logpublishingoption.html>`__
"""
props: PropsDictType = {
"CloudWatchLogsLogGroupArn": (str, False),
"Enabled": (boolean, False),
}
class NodeToNodeEncryptionOptions(AWSProperty):
"""
`NodeToNodeEncryptionOptions <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-opensearchservice-domain-nodetonodeencryptionoptions.html>`__
"""
props: PropsDictType = {
"Enabled": (boolean, False),
}
class SnapshotOptions(AWSProperty):
"""
`SnapshotOptions <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-opensearchservice-domain-snapshotoptions.html>`__
"""
props: PropsDictType = {
"AutomatedSnapshotStartHour": (integer, False),
}
class VPCOptions(AWSProperty):
"""
`VPCOptions <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-opensearchservice-domain-vpcoptions.html>`__
"""
props: PropsDictType = {
"SecurityGroupIds": ([str], False),
"SubnetIds": ([str], False),
}
class Domain(AWSObject):
"""
`Domain <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-opensearchservice-domain.html>`__
"""
resource_type = "AWS::OpenSearchService::Domain"
props: PropsDictType = {
"AccessPolicies": (dict, False),
"AdvancedOptions": (dict, False),
"AdvancedSecurityOptions": (AdvancedSecurityOptionsInput, False),
"ClusterConfig": (ClusterConfig, False),
"CognitoOptions": (CognitoOptions, False),
"DomainEndpointOptions": (DomainEndpointOptions, False),
"DomainName": (str, False),
"EBSOptions": (EBSOptions, False),
"EncryptionAtRestOptions": (EncryptionAtRestOptions, False),
"EngineVersion": (validate_search_service_engine_version, False),
"LogPublishingOptions": (dict, False),
"NodeToNodeEncryptionOptions": (NodeToNodeEncryptionOptions, False),
"SnapshotOptions": (SnapshotOptions, False),
"Tags": (Tags, False),
"VPCOptions": (VPCOptions, False),
}
| 32.735135 | 174 | 0.683124 |
8ce7419d84586d71646164441a214fd7d9640e07 | 467 | py | Python | main/apache/wsgi.py | nmota/public-contracts | b809df82147e5e4fa746416c0f9d51db2c6db05a | [
"BSD-3-Clause"
] | 25 | 2015-03-05T00:15:11.000Z | 2021-04-04T18:50:43.000Z | main/apache/wsgi.py | nmota/public-contracts | b809df82147e5e4fa746416c0f9d51db2c6db05a | [
"BSD-3-Clause"
] | 36 | 2015-03-21T17:04:54.000Z | 2017-07-06T10:35:51.000Z | main/apache/wsgi.py | nmota/public-contracts | b809df82147e5e4fa746416c0f9d51db2c6db05a | [
"BSD-3-Clause"
] | 7 | 2015-03-24T16:18:02.000Z | 2019-05-29T11:51:01.000Z | import os
import sys
#path to directory of the .wgsi file ('[directory]/')
wsgi_dir = os.path.abspath(os.path.dirname(__file__))
# path to project root directory (osqa '/')
project_dir = os.path.dirname(wsgi_dir)
# add project directory to system's Path
sys.path.append(project_dir)
sys.path.append(project_dir+"/main")
os.environ['DJANGO_SETTINGS_MODULE'] = 'main.settings'
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 27.470588 | 54 | 0.770878 |
4a938cd2dc9aaf6a4dfffb39767d72a869ef2441 | 14,918 | py | Python | SimulationCode/ErgodicHarvestingLib/ergodic.py | MacIver-Lab/Ergodic-Information-Harvesting | 6b06033852d511c682f1a38d84d6c3e0d735659b | [
"MIT"
] | 4 | 2018-06-29T18:41:14.000Z | 2021-03-16T06:08:46.000Z | SimulationCode/ErgodicHarvestingLib/ergodic.py | MacIver-Lab/Ergodic-Information-Harvesting | 6b06033852d511c682f1a38d84d6c3e0d735659b | [
"MIT"
] | 10 | 2018-05-10T21:42:20.000Z | 2019-03-10T20:48:46.000Z | SimulationCode/ErgodicHarvestingLib/ergodic.py | MacIver-Lab/Ergodic-Information-Harvesting | 6b06033852d511c682f1a38d84d6c3e0d735659b | [
"MIT"
] | 3 | 2019-02-18T02:32:06.000Z | 2021-04-07T02:55:06.000Z | # -*- coding: utf-8 -*-
import numpy as np
from scipy.integrate import trapz, quad, solve_ivp
from scipy.interpolate import interp1d
from ErgodicHarvestingLib.utils import matmult
class ProjectionBasedOpt(object):
def __init__(self, nx, nu, R, time, Quinit):
"""
Class to represent an optimization problem for a system with dynamic constraints.
:nx dimension of state
:nu dimension of the control
"""
self.nx = nx # Dimension of State X
self.nu = nu # Dimension of Control U
self.mass = 1.0 # Mass of the Dynamics Model
self.Q = np.eye(self.nx)
self.R = R * np.eye(self.nu)
self.Quinit = Quinit # weight for the initial control
self.P1 = 1.0
self.Qn = 1.0
self.Rn = 1.0
self.Qk = 1.0
self.Rk = 1.0
self.time = time
odeDeltaT = time[1] - time[0]
self.odeParam = {
"method": "RK23",
"t_span": (time[0], time[-1]),
"rtol": 1e-4,
"atol": 1e-7,
"t_eval": time,
"min_step": odeDeltaT,
"first_step": odeDeltaT,
"max_step": odeDeltaT,
}
self.odeIntegrator = lambda fun, y0: solve_ivp(
fun, y0=y0, **self.odeParam
).y.flatten()
def peqns(self, t, pp, Al, Bl, Rn, Qn):
if t > self.time[-1] or t < self.time[0]:
return 0
pp = np.array(pp).flatten()
pp = pp.reshape(self.nx, self.nx)
matdiffeq = (
matmult(pp, Al(t)) + matmult(Al(t), pp) - matmult(pp, Bl(t), Bl(t), pp) + Qn
)
return matdiffeq.flatten()
def reqns(self, t, rr, Al, Bl, a, b, Psol, Rn, Qn):
if t > self.time[-1] or t < self.time[0]:
return np.array([0.0])
t = self.time[-1] - t
matdiffeq = (
matmult((Al(t) - matmult(Bl(t), Bl(t), Psol(t))), rr)
+ a(t)
- matmult(Psol(t), Bl(t), b(t))
)
return matdiffeq.flatten()
def veqns(self, zz, Al, Bl, a, b, Psol, Rsol, Rn, Qn):
vmatdiffeq = matmult(-Bl, Psol, zz) - matmult(Bl, Rsol) - b
return vmatdiffeq
def zeqns(self, t, zz, Al, Bl, a, b, Psol, Rsol, Rn, Qn):
if t > self.time[-1] or t < self.time[0]:
return 0
vmateq = self.veqns(zz, Al(t), Bl(t).T, a(t), b(t), Psol(t), Rsol(t), Rn, Qn)
matdiffeq = matmult(Al(t), zz) + matmult(Bl(t), vmateq)
return matdiffeq.flatten()
def Ksol(self, X, U):
time = self.time
P1 = np.array([1.0])
soln = self.odeIntegrator(
lambda t, y: self.peqns(
t, y, self.A_interp, self.B_interp, self.Rk, self.Qk
),
P1,
)
# Convert the list to a numpy array.
psoln = np.array(soln).reshape(len(soln), 1)
K = np.empty((time.shape[0], self.nx))
for tindex, t in np.ndenumerate(time):
K[tindex, :] = matmult(self.B_current[tindex, 0], psoln[tindex])
self.K = K
return K
def Psol(self, X, U, time):
P1 = np.array([1.0])
soln = self.odeIntegrator(
lambda t, y: self.peqns(
t, y, self.A_interp, self.B_interp, self.Rn, self.Qn
),
P1,
)
soln = np.array(soln).reshape(len(soln), 1)
return soln
def Rsol(self, X, U, P_interp, time):
rinit2 = np.array([0])
Qn = 1.0
Rn = 1.0
soln = self.odeIntegrator(
lambda t, y: self.reqns(
t,
y,
self.A_interp,
self.B_interp,
self.a_interp,
self.b_interp,
P_interp,
Rn,
Qn,
),
rinit2,
)
soln = np.array(soln)
soln = np.flip(soln, 0).reshape(len(soln), 1)
return soln
# pointwise dynamics linearizations
def fofx_pointwise(self, X, U):
return U
def fofx(self, t, X, U):
if t > self.time[-1] or t < self.time[0]:
return 0
return U(t)
def dfdx_pointwise(self, x, u):
return np.array([0])
def dfdx(self):
time = self.time
dfdxl = np.empty([time.shape[0], self.nx])
for tindex, _ in np.ndenumerate(time):
dfdxl[tindex, :] = np.array([0])
self.A_current = dfdxl
return dfdxl
def dfdu_pointwise(self, x, u):
return np.array([1])
def dfdu(self):
time = self.time
dfdul = np.empty([time.shape[0], self.nx])
for tindex, _ in np.ndenumerate(time):
dfdul[tindex, :] = np.array([1])
self.B_current = dfdul
return dfdul
def cost_pointwise(self, x, u):
R = self.R
return 0.5 * matmult(u, R, u)
def cost(self, X, U):
cost = np.empty(self.time.shape[0])
for tindex, _ in np.ndenumerate(self.time):
cost[tindex] = self.cost_pointwise(X[tindex], U[tindex])
return trapz(cost, self.time) # Integrate over time
def eval_cost(self):
# return the evaluated cost function
return self.cost(self.X_current, self.U_current)
def dldu_pointwise(self, x, u):
# return the pointwise linearized cost WRT state
return matmult(self.R, u)
def dldx_pointwise(self, x, u):
# return pointwise linearized cost WRT input
return np.array([0.0])
def dldx(self):
# evaluate linearized cost WRT state
X = self.X_current
U = self.U_current
time = self.time
dldxl = np.empty((time.shape[0], self.nx))
for tindex, _ in np.ndenumerate(time):
dldxl[tindex, :] = self.dldx_pointwise(X[tindex], U[tindex])
self.a_current = dldxl
return self.a_current
def dldu(self):
# evaluate linearized cost WRT input
X = self.X_current
U = self.U_current
time = self.time
dldul = np.empty((time.shape[0], 1))
for tindex, _ in np.ndenumerate(time):
dldul[tindex, :] = self.dldu_pointwise(X[tindex], U[tindex])
dldul[0, :] += self.uinit * self.Quinit # initial control
self.b_current = dldul
return dldul
def dcost(self, descdir):
# evaluate directional derivative
dX = descdir[0]
dU = descdir[1]
time = self.time
dc = np.empty(time.shape[0])
for tindex, _ in np.ndenumerate(time):
dc[tindex] = matmult(self.a_current[tindex], dX[tindex]) + matmult(
self.b_current[tindex], dU[tindex]
)
intdcost = trapz(dc, time)
return intdcost
def descentdirection(self):
# solve for the descent direction by
X = self.X_current
U = self.U_current
time = self.time
Ps = self.Psol(X, U, time)
self.P_current = Ps
P_interp = interp1d(time, Ps.T)
Rs = self.Rsol(X, U, P_interp, time).flatten()
self.R_current = Rs
r_interp = interp1d(time, Rs.T)
zinit = -matmult(P_interp(0) ** -1, r_interp(0))
soln = self.odeIntegrator(
lambda t, y: self.zeqns(
t,
y,
self.A_interp,
self.B_interp,
self.a_interp,
self.b_interp,
P_interp,
r_interp,
self.Rn,
self.Qn,
),
zinit,
)
# Convert the list to a numpy array.
zsoln = np.array(soln)
zsoln = zsoln.reshape(time.shape[0], 1)
vsoln = np.empty(U.shape)
for tindex, t in np.ndenumerate(time):
vsoln[tindex] = self.veqns(
zsoln[tindex],
self.A_current[tindex],
self.B_current[tindex],
self.a_current[tindex],
self.b_current[tindex],
Ps[tindex],
Rs[tindex],
self.Rn,
self.Qn,
)
return [zsoln, vsoln]
def simulate(self, X0, U):
time = self.time
U_interp = interp1d(time, U.T)
# Solve ODE
soln = self.odeIntegrator(lambda t, y: self.fofx(t, y, U_interp), X0)
# Convert the list to a numpy array.
xsoln = np.array(soln).reshape(len(soln), 1)
return xsoln
def proj(self, t, X, K, mu, alpha):
if type(X) is float:
X = np.array(X)
if t > self.time[-1] or t < self.time[0]:
return 0
uloc = mu(t) + matmult(K(t), (alpha(t).T - X.T))
return uloc
def projcontrol(self, X, K, mu, alpha):
uloc = mu + matmult(K, (alpha.T - X.T))
return uloc
def project(self, X0, traj):
time = self.time
alpha = traj[0]
mu = traj[1]
# solve for riccatti gain
Ks = self.Ksol(alpha, mu)
K_interp = interp1d(time, Ks.T)
mu_interp = interp1d(time, mu.T)
alpha_interp = interp1d(time, alpha.T)
# Solve ODE
soln = self.odeIntegrator(
lambda t, y: self.proj(t, y, K_interp, mu_interp, alpha_interp), X0
)
# Convert the list to a numpy array.
xsoln = np.array(soln).reshape(len(soln), 1)
usoln = np.empty(mu.shape)
for tindex, _ in np.ndenumerate(time):
usoln[tindex, :] = self.projcontrol(
xsoln[tindex], Ks[tindex], mu[tindex], alpha[tindex]
)
return np.array([xsoln, usoln])
def update_traj(self, X, U):
self.X_current = X
self.U_current = U
self.dfdx()
self.dfdu()
self.dldx()
self.dldu()
self.A_interp = interp1d(self.time, self.A_current.T)
self.B_interp = interp1d(self.time, self.B_current.T)
self.a_interp = interp1d(self.time, self.a_current.T)
self.b_interp = interp1d(self.time, self.b_current.T)
class ErgodicOpt(ProjectionBasedOpt):
def __init__(self, nx, nu, ergParam, uinit):
super().__init__(
nx, nu, R=ergParam.wControl, time=ergParam.time, Quinit=ergParam.wInitCtrl
)
self.barrcost = ergParam.wBarrCost
self.ergcost = ergParam.wErgCost
self.Nfourier = ergParam.nFourier
self.uinit = uinit
self.dimw = 1 # workspace dimension
self.wlimit = 1.0
self.tRes = ergParam.tRes # Time Resolution
self.res = ergParam.res # EID spatial resolution
self.eidTime = ergParam.eidTime
self.xlist = np.linspace(0.0, 1.0, self.tRes)
# set up a grid over the frequency
klist = np.arange(self.Nfourier)
# do some ergodic stuff
s = (float(self.dimw) + 1.0) / 2.0
self.Lambdak = 1.0 / (1.0 + klist ** 2) ** s
self.klist = klist / self.wlimit * np.pi
self.hk = np.zeros(self.Nfourier).flatten()
for index in range(self.Nfourier):
integ = quad(lambda x: (np.cos(x * self.klist[index])) ** 2, 0.0, 1.0)
self.hk[index] = np.sqrt(integ[0])
def normalize_pdf(self):
# function to normalize a pdf
self.pdf /= np.sum(self.pdf) / np.product(self.pdf.shape)
def set_pdf(self, pdf):
# input pdf
pdfInterp = interp1d(self.eidTime, pdf)
self.pdf = pdfInterp(self.xlist)
self.normalize_pdf()
self.calculate_uk(self.pdf)
pass
def calculate_ergodicity(self):
# evaluate the ergodic metric (ck, uk, need to be calculated already)
self.erg = np.sum(self.Lambdak * (self.ck - self.uk) ** 2)
return self.erg
def barrier(self, xk):
barr_cost = np.zeros(xk.shape[0])
xk = xk.flatten()
too_big = xk[np.where(xk > self.wlimit)]
barr_cost[np.where(xk > self.wlimit)] = np.square(too_big - self.wlimit)
too_small = xk[np.where(xk < 0)]
barr_cost[np.where(xk < 0)] += np.square(too_small)
barr_cost = trapz(barr_cost, self.time)
return barr_cost
def Dbarrier(self, xk):
xk = xk.flatten()
dbarr_cost = np.zeros(xk.shape).reshape(xk.size, 1)
too_big = xk[np.where(xk > self.wlimit)]
dbarr_cost[np.where(xk > self.wlimit), 0] = 2.0 * (too_big - self.wlimit)
too_small = xk[np.where(xk < 0)]
dbarr_cost[np.where(xk < 0), 0] = 2.0 * too_small
return dbarr_cost
def calculate_uk(self, pdf):
# calculate Fourier coefficients of the distribution
self.uk = np.zeros(self.Nfourier).flatten()
for index in range(len(self.uk)):
uk_interior = pdf / self.hk[index]
basis_part = np.cos(self.klist[index] * self.xlist)
uk_interior *= self.wlimit / self.res * basis_part
self.uk[index] = np.sum(uk_interior)
def ckeval(self):
X = self.X_current
time = self.time
T = time[-1]
# change coordinates from configuration to ergodic workspace
W = X.flatten()
self.ck = np.zeros(self.Nfourier).flatten()
for index in range(len(self.ck)):
ck_interior = 1.0 / (self.hk[index] * T)
basis_part = np.cos(self.klist[index] * W)
ck_interior = ck_interior * basis_part
self.ck[index] = trapz(ck_interior, time)
def akeval(self):
X = self.X_current
time = self.time
T = time[-1]
xlist = X.flatten()
outerchain = 2.0 * self.Lambdak * (self.ck - self.uk) / (self.hk * T)
ak = []
for index in range(self.Nfourier):
# these are chain rule terms, get added
term = outerchain[index]
basis_part = -self.klist[index] * np.sin(self.klist[index] * xlist)
term *= basis_part
ak.append(term)
summed_ak = np.sum(np.array(ak), axis=0)
self.ak = np.array(summed_ak).reshape(summed_ak.size, 1)
return self.ak
def evalcost(self):
cost = self.cost(self.X_current, self.U_current)
barr_cost = self.barrcost * self.barrier(self.X_current)
erg_cost = self.ergcost * self.calculate_ergodicity()
J = barr_cost + erg_cost + cost
return J
def dldx(self):
X = self.X_current
self.a_current = self.ergcost * self.ak + self.barrcost * self.Dbarrier(X)
return self.a_current
def update_traj(self, X, U):
self.X_current = X
self.U_current = U
self.ckeval()
self.akeval()
self.dfdx()
self.dfdu()
self.dldx()
self.dldu()
self.A_interp = interp1d(self.time, self.A_current.T)
self.B_interp = interp1d(self.time, self.B_current.T)
self.a_interp = interp1d(self.time, self.a_current.T)
self.b_interp = interp1d(self.time, self.b_current.T)
| 33.004425 | 89 | 0.538946 |
846018c5cfa73ee055f5d358666e2cc3b1c9ef14 | 89,646 | py | Python | sympy/core/tests/test_wester.py | lelegan/sympy | ce020cf4ae1e741a99da74172c930de5affe583f | [
"BSD-3-Clause"
] | 1 | 2020-05-20T08:20:50.000Z | 2020-05-20T08:20:50.000Z | sympy/core/tests/test_wester.py | lelegan/sympy | ce020cf4ae1e741a99da74172c930de5affe583f | [
"BSD-3-Clause"
] | null | null | null | sympy/core/tests/test_wester.py | lelegan/sympy | ce020cf4ae1e741a99da74172c930de5affe583f | [
"BSD-3-Clause"
] | null | null | null | """ Tests from Michael Wester's 1999 paper "Review of CAS mathematical
capabilities".
http://www.math.unm.edu/~wester/cas/book/Wester.pdf
See also http://math.unm.edu/~wester/cas_review.html for detailed output of
each tested system.
"""
from sympy import (Rational, symbols, factorial, sqrt, log, exp, oo, zoo,
product, binomial, rf, pi, gamma, igcd, factorint, radsimp, combsimp,
npartitions, totient, primerange, factor, simplify, gcd, resultant, expand,
I, trigsimp, tan, sin, cos, cot, diff, nan, limit, EulerGamma, polygamma,
bernoulli, hyper, hyperexpand, besselj, asin, assoc_legendre, Function, re,
im, DiracDelta, chebyshevt, legendre_poly, polylog, series, O,
atan, sinh, cosh, tanh, floor, ceiling, solve, asinh, acot, csc, sec,
LambertW, N, apart, sqrtdenest, factorial2, powdenest, Mul, S, mpmath, ZZ,
Poly, expand_func, E, Q, And, Or, Ne, Eq, Le, Lt,
ask, refine, AlgebraicNumber,
elliptic_e, elliptic_f, powsimp, hessian, wronskian, fibonacci, sign,
Lambda, Piecewise, Subs, residue, Derivative, logcombine)
from sympy.functions.combinatorial.numbers import stirling
from sympy.functions.special.zeta_functions import zeta
from sympy.integrals.deltafunctions import deltaintegrate
from sympy.utilities.pytest import XFAIL, slow
from sympy.utilities.iterables import partitions
from sympy.mpmath import mpi, mpc
from sympy.matrices import Matrix, GramSchmidt, eye
from sympy.matrices.expressions.blockmatrix import BlockMatrix, block_collapse
from sympy.matrices.expressions import MatrixSymbol, ZeroMatrix
from sympy.galgebra.ga import MV
from sympy.physics.quantum import Commutator
from sympy.assumptions import assuming
from sympy.polys.rings import vring
from sympy.polys.fields import vfield
from sympy.polys.solvers import solve_lin_sys
from sympy.concrete import Sum
from sympy.concrete.products import Product
from sympy.integrals import integrate
from sympy.integrals.transforms import laplace_transform,\
inverse_laplace_transform, LaplaceTransform, fourier_transform,\
mellin_transform
from sympy.functions.special.error_functions import erf
from sympy.functions.special.delta_functions import Heaviside
from sympy.solvers.recurr import rsolve
from sympy.solvers.ode import dsolve
from sympy.core.relational import Equality
R = Rational
x, y, z = symbols('x y z')
i, j, k, l, m, n = symbols('i j k l m n', integer=True)
f = Function('f')
g = Function('g')
# A. Boolean Logic and Quantifier Elimination
# Not implemented.
# B. Set Theory
# Not implemented.
# C. Numbers
def test_C1():
assert (factorial(50) ==
30414093201713378043612608166064768844377641568960512000000000000)
def test_C2():
assert (factorint(factorial(50)) == {2: 47, 3: 22, 5: 12, 7: 8,
11: 4, 13: 3, 17: 2, 19: 2, 23: 2, 29: 1, 31: 1, 37: 1,
41: 1, 43: 1, 47: 1})
def test_C3():
assert (factorial2(10), factorial2(9)) == (3840, 945)
# Base conversions; not really implemented by sympy
# Whatever. Take credit!
def test_C4():
assert 0xABC == 2748
def test_C5():
assert 123 == int('234', 7)
def test_C6():
assert int('677', 8) == int('1BF', 16) == 447
def test_C7():
assert log(32768, 8) == 5
def test_C8():
# Modular multiplicative inverse. Would be nice if divmod could do this.
assert ZZ.invert(5, 7) == 3
assert ZZ.invert(5, 6) == 5
def test_C9():
assert igcd(igcd(1776, 1554), 5698) == 74
def test_C10():
x = 0
for n in range(2, 11):
x += R(1, n)
assert x == R(4861, 2520)
def test_C11():
assert R(1, 7) == S('0.[142857]')
def test_C12():
assert R(7, 11) * R(22, 7) == 2
def test_C13():
test = R(10, 7) * (1 + R(29, 1000)) ** R(1, 3)
good = 3 ** R(1, 3)
assert test == good
def test_C14():
assert sqrtdenest(sqrt(2*sqrt(3) + 4)) == 1 + sqrt(3)
def test_C15():
test = sqrtdenest(sqrt(14 + 3*sqrt(3 + 2*sqrt(5 - 12*sqrt(3 - 2*sqrt(2))))))
good = sqrt(2) + 3
assert test == good
def test_C16():
test = sqrtdenest(sqrt(10 + 2*sqrt(6) + 2*sqrt(10) + 2*sqrt(15)))
good = sqrt(2) + sqrt(3) + sqrt(5)
assert test == good
def test_C17():
test = radsimp((sqrt(3) + sqrt(2)) / (sqrt(3) - sqrt(2)))
good = 5 + 2*sqrt(6)
assert test == good
def test_C18():
assert simplify((sqrt(-2 + sqrt(-5)) * sqrt(-2 - sqrt(-5))).expand(complex=True)) == 3
@XFAIL
def test_C19():
assert radsimp(simplify((90 + 35*sqrt(7)) ** R(1, 3))) == 3 + sqrt(7)
@XFAIL
def test_C20():
inside = (135 + 78*sqrt(3))
test = simplify((inside**R(2, 3) + 3) * sqrt(3) / inside**R(1, 3))
assert test == 12
@XFAIL
def test_C21():
assert simplify((41 + 29*sqrt(2)) ** R(1, 5)) == 1 + sqrt(2)
@XFAIL
def test_C22():
test = simplify(((6 - 4*sqrt(2))*log(3 - 2*sqrt(2)) + (3 - 2*sqrt(2))*log(17
- 12*sqrt(2)) + 32 - 24*sqrt(2)) / (48*sqrt(2) - 72))
good = sqrt(2)/3 - log(sqrt(2) - 1)/3
assert test == good
def test_C23():
assert 2 * oo - 3 == oo
@XFAIL
def test_C24():
raise NotImplementedError("2**aleph_null == aleph_1")
# D. Numerical Analysis
def test_D1():
assert 0.0 / sqrt(2) == 0.0
def test_D2():
assert str(exp(-1000000).evalf()) == '3.29683147808856e-434295'
def test_D3():
assert exp(pi*sqrt(163)).evalf(50).num.ae(262537412640768744)
def test_D4():
assert floor(R(-5, 3)) == -2
assert ceiling(R(-5, 3)) == -1
@XFAIL
def test_D5():
raise NotImplementedError("cubic_spline([1, 2, 4, 5], [1, 4, 2, 3], x)(3) == 27/8")
@XFAIL
def test_D6():
raise NotImplementedError("translate sum(a[i]*x**i, (i,1,n)) to FORTRAN")
@XFAIL
def test_D7():
raise NotImplementedError("translate sum(a[i]*x**i, (i,1,n)) to C")
@XFAIL
def test_D8():
# One way is to cheat by converting the sum to a string,
# and replacing the '[' and ']' with ''.
# E.g., horner(S(str(_).replace('[','').replace(']','')))
raise NotImplementedError("apply Horner's rule to sum(a[i]*x**i, (i,1,5))")
@XFAIL
def test_D9():
raise NotImplementedError("translate D8 to FORTRAN")
@XFAIL
def test_D10():
raise NotImplementedError("translate D8 to C")
@XFAIL
def test_D11():
#Is there a way to use count_ops?
raise NotImplementedError("flops(sum(product(f[i][k], (i,1,k)), (k,1,n)))")
@XFAIL
def test_D12():
assert (mpi(-4, 2) * x + mpi(1, 3)) ** 2 == mpi(-8, 16)*x**2 + mpi(-24, 12)*x + mpi(1, 9)
@XFAIL
def test_D13():
raise NotImplementedError("discretize a PDE: diff(f(x,t),t) == diff(diff(f(x,t),x),x)")
# E. Statistics
# See scipy; all of this is numerical.
# F. Combinatorial Theory.
def test_F1():
assert rf(x, 3) == x*(1 + x)*(2 + x)
def test_F2():
assert expand_func(binomial(n, 3)) == n*(n - 1)*(n - 2)/6
@XFAIL
def test_F3():
assert combsimp(2**n * factorial(n) * factorial2(2*n - 1)) == factorial(2*n)
@XFAIL
def test_F4():
assert combsimp((2**n * factorial(n) * product(2*k - 1, (k, 1, n)))) == factorial(2*n)
@XFAIL
def test_F5():
assert gamma(n + R(1, 2)) / sqrt(pi) / factorial(n) == factorial(2*n)/2**(2*n)/factorial(n)**2
def test_F6():
partTest = [p.copy() for p in partitions(4)]
partDesired = [{4: 1}, {1: 1, 3: 1}, {2: 2}, {1: 2, 2:1}, {1: 4}]
assert partTest == partDesired
def test_F7():
assert npartitions(4) == 5
def test_F8():
assert stirling(5, 2, signed=True) == -50 # if signed, then kind=1
def test_F9():
assert totient(1776) == 576
# G. Number Theory
def test_G1():
assert list(primerange(999983, 1000004)) == [999983, 1000003]
@XFAIL
def test_G2():
raise NotImplementedError("find the primitive root of 191 == 19")
@XFAIL
def test_G3():
raise NotImplementedError("(a+b)**p mod p == a**p + b**p mod p; p prime")
# ... G20 Modular equations and continued fractions are not implemented.
# H. Algebra
def test_H1():
assert simplify(2*2**n) == simplify(2**(n + 1))
assert powdenest(2*2**n) == simplify(2**(n + 1))
def test_H2():
assert powsimp(4 * 2**n) == 2**(n + 2)
def test_H3():
assert (-1)**(n*(n + 1)) == 1
def test_H4():
expr = factor(6*x - 10)
assert type(expr) is Mul
assert expr.args[0] == 2
assert expr.args[1] == 3*x - 5
p1 = 64*x**34 - 21*x**47 - 126*x**8 - 46*x**5 - 16*x**60 - 81
p2 = 72*x**60 - 25*x**25 - 19*x**23 - 22*x**39 - 83*x**52 + 54*x**10 + 81
q = 34*x**19 - 25*x**16 + 70*x**7 + 20*x**3 - 91*x - 86
def test_H5():
assert gcd(p1, p2, x) == 1
def test_H6():
assert gcd(expand(p1 * q), expand(p2 * q)) == q
def test_H7():
p1 = 24*x*y**19*z**8 - 47*x**17*y**5*z**8 + 6*x**15*y**9*z**2 - 3*x**22 + 5
p2 = 34*x**5*y**8*z**13 + 20*x**7*y**7*z**7 + 12*x**9*y**16*z**4 + 80*y**14*z
assert gcd(p1, p2, x, y, z) == 1
def test_H8():
p1 = 24*x*y**19*z**8 - 47*x**17*y**5*z**8 + 6*x**15*y**9*z**2 - 3*x**22 + 5
p2 = 34*x**5*y**8*z**13 + 20*x**7*y**7*z**7 + 12*x**9*y**16*z**4 + 80*y**14*z
q = 11*x**12*y**7*z**13 - 23*x**2*y**8*z**10 + 47*x**17*y**5*z**8
assert gcd(p1 * q, p2 * q, x, y, z) == q
def test_H9():
p1 = 2*x**(n + 4) - x**(n + 2)
p2 = 4*x**(n + 1) + 3*x**n
assert gcd(p1, p2) == x**n
def test_H10():
p1 = 3*x**4 + 3*x**3 + x**2 - x - 2
p2 = x**3 - 3*x**2 + x + 5
assert resultant(p1, p2, x) == 0
def test_H11():
assert resultant(p1 * q, p2 * q, x) == 0
def test_H12():
num = x**2 - 4
den = x**2 + 4*x + 4
assert simplify(num/den) == (x - 2)/(x + 2)
@XFAIL
def test_H13():
assert simplify((exp(x) - 1) / (exp(x/2) + 1)) == exp(x/2) - 1
def test_H14():
p = (x + 1) ** 20
ep = expand(p)
assert ep == (1 + 20*x + 190*x**2 + 1140*x**3 + 4845*x**4 + 15504*x**5
+ 38760*x**6 + 77520*x**7 + 125970*x**8 + 167960*x**9 + 184756*x**10
+ 167960*x**11 + 125970*x**12 + 77520*x**13 + 38760*x**14 + 15504*x**15
+ 4845*x**16 + 1140*x**17 + 190*x**18 + 20*x**19 + x**20)
dep = diff(ep, x)
assert dep == (20 + 380*x + 3420*x**2 + 19380*x**3 + 77520*x**4
+ 232560*x**5 + 542640*x**6 + 1007760*x**7 + 1511640*x**8 + 1847560*x**9
+ 1847560*x**10 + 1511640*x**11 + 1007760*x**12 + 542640*x**13
+ 232560*x**14 + 77520*x**15 + 19380*x**16 + 3420*x**17 + 380*x**18
+ 20*x**19)
assert factor(dep) == 20*(1 + x)**19
def test_H15():
assert simplify((Mul(*[x - r for r in solve(x**3 + x**2 - 7)]))) == x**3 + x**2 - 7
def test_H16():
assert factor(x**100 - 1) == ((x - 1)*(x + 1)*(x**2 + 1)*(x**4 - x**3
+ x**2 - x + 1)*(x**4 + x**3 + x**2 + x + 1)*(x**8 - x**6 + x**4
- x**2 + 1)*(x**20 - x**15 + x**10 - x**5 + 1)*(x**20 + x**15 + x**10
+ x**5 + 1)*(x**40 - x**30 + x**20 - x**10 + 1))
@slow
def test_H17():
assert simplify(factor(expand(p1 * p2)) - p1*p2) == 0
@XFAIL
def test_H18():
# Factor over complex rationals.
test = factor(4*x**4 + 8*x**3 + 77*x**2 + 18*x + 53)
good = (2*x + 3*I)*(2*x - 3*I)*(x + 1 - 4*I)(x + 1 + 4*I)
assert test == good
def test_H19():
a = symbols('a')
# The idea is to let a**2 == 2, then solve 1/(a-1). Answer is a+1")
assert Poly(a - 1).invert(Poly(a**2 - 2)) == a + 1
@XFAIL
def test_H20():
raise NotImplementedError("let a**2==2; (x**3 + (a-2)*x**2 - "
+ "(2*a+3)*x - 3*a) / (x**2-2) = (x**2 - 2*x - 3) / (x-a)")
@XFAIL
def test_H21():
raise NotImplementedError("evaluate (b+c)**4 assuming b**3==2, c**2==3. \
Answer is 2*b + 8*c + 18*b**2 + 12*b*c + 9")
def test_H22():
assert factor(x**4 - 3*x**2 + 1, modulus=5) == (x - 2)**2 * (x + 2)**2
def test_H23():
f = x**11 + x + 1
g = (x**2 + x + 1) * (x**9 - x**8 + x**6 - x**5 + x**3 - x**2 + 1)
assert factor(f, modulus=65537) == g
def test_H24():
phi = AlgebraicNumber(S.GoldenRatio.expand(func=True), alias='phi')
assert factor(x**4 - 3*x**2 + 1, extension=phi) == \
(x - phi)*(x + 1 - phi)*(x - 1 + phi)*(x + phi)
@slow
def test_H25():
e = (x - 2*y**2 + 3*z**3) ** 20
assert factor(expand(e)) == e
@slow
def test_H26():
g = expand((sin(x) - 2*cos(y)**2 + 3*tan(z)**3)**20)
assert factor(g, expand=False) == (-sin(x) + 2*cos(y)**2 - 3*tan(z)**3)**20
@slow
def test_H27():
f = 24*x*y**19*z**8 - 47*x**17*y**5*z**8 + 6*x**15*y**9*z**2 - 3*x**22 + 5
g = 34*x**5*y**8*z**13 + 20*x**7*y**7*z**7 + 12*x**9*y**16*z**4 + 80*y**14*z
h = -2*z*y**7 \
*(6*x**9*y**9*z**3 + 10*x**7*z**6 + 17*y*x**5*z**12 + 40*y**7) \
*(3*x**22 + 47*x**17*y**5*z**8 - 6*x**15*y**9*z**2 - 24*x*y**19*z**8 - 5)
assert factor(expand(f*g)) == h
@XFAIL
def test_H28():
raise NotImplementedError("expand ((1 - c**2)**5 * (1 - s**2)**5 * "
+ "(c**2 + s**2)**10) with c**2 + s**2 = 1. Answer is c**10*s**10.")
@XFAIL
def test_H29():
assert factor(4*x**2 - 21*x*y + 20*y**2, modulus=3) == (x + y)*(x - y)
def test_H30():
test = factor(x**3 + y**3, extension=sqrt(-3))
answer = (x + y)*(x + y*(-R(1, 2) - sqrt(3)/2*I))*(x + y*(-R(1, 2) + sqrt(3)/2*I))
assert answer == test
def test_H31():
f = (x**2 + 2*x + 3)/(x**3 + 4*x**2 + 5*x + 2)
g = 2 / (x + 1)**2 - 2 / (x + 1) + 3 / (x + 2)
assert apart(f) == g
@XFAIL
def test_H32(): # issue 3459
raise NotImplementedError("[A*B*C - (A*B*C)**(-1)]*A*C*B (product \
of a non-commuting product and its inverse)")
def test_H33():
A, B, C = symbols('A, B, C', commutatative=False)
assert (Commutator(A, Commutator(B, C))
+ Commutator(B, Commutator(C, A))
+ Commutator(C, Commutator(A, B))).doit().expand() == 0
# I. Trigonometry
@XFAIL
def test_I1():
assert tan(7*pi/10) == -sqrt(1 + 2/sqrt(5))
@XFAIL
def test_I2():
assert sqrt((1 + cos(6))/2) == -cos(3)
def test_I3():
assert cos(n*pi) + sin((4*n - 1)*pi/2) == (-1)**n - 1
def test_I4():
assert refine(cos(pi*cos(n*pi)) + sin(pi/2*cos(n*pi)), Q.integer(n)) == (-1)**n - 1
@XFAIL
def test_I5():
assert sin((n**5/5 + n**4/2 + n**3/3 - n/30) * pi) == 0
@XFAIL
def test_I6():
raise NotImplementedError("assuming -3*pi<x<-5*pi/2, abs(cos(x)) == -cos(x), abs(sin(x)) == -sin(x)")
@XFAIL
def test_I7():
assert cos(3*x)/cos(x) == cos(x)**2 - 3*sin(x)**2
@XFAIL
def test_I8():
assert cos(3*x)/cos(x) == 2*cos(2*x) - 1
@XFAIL
def test_I9():
# Supposed to do this with rewrite rules.
assert cos(3*x)/cos(x) == cos(x)**2 - 3*sin(x)**2
def test_I10():
assert trigsimp((tan(x)**2 + 1 - cos(x)**-2) / (sin(x)**2 + cos(x)**2 - 1)) == nan
#@XFAIL
#def test_I11():
# assert limit((tan(x)**2 + 1 - cos(x)**-2) / (sin(x)**2 + cos(x)**2 - 1), x, 0) != 0
@XFAIL
def test_I12():
try:
# This should fail or return nan or something.
diff((tan(x)**2 + 1 - cos(x)**-2) / (sin(x)**2 + cos(x)**2 - 1), x)
except:
assert True
else:
assert False, "taking the derivative with a fraction equivalent to 0/0 should fail"
# J. Special functions.
def test_J1():
assert bernoulli(16) == R(-3617, 510)
def test_J2():
assert diff(elliptic_e(x, y**2), y) == (elliptic_e(x, y**2) - elliptic_f(x, y**2))/y
@XFAIL
def test_J3():
raise NotImplementedError("Jacobi elliptic functions: diff(dn(u,k), u) == -k**2*sn(u,k)*cn(u,k)")
def test_J4():
assert gamma(R(-1, 2)) == -2*sqrt(pi)
def test_J5():
assert polygamma(0, R(1, 3)) == -EulerGamma - pi/2*sqrt(R(1, 3)) - R(3, 2)*log(3)
def test_J6():
assert mpmath.besselj(2, 1 + 1j).ae(mpc('0.04157988694396212', '0.24739764151330632'))
def test_J7():
assert simplify(besselj(R(-5,2), pi/2)) == 12/(pi**2)
def test_J8():
p = besselj(R(3,2), z)
q = (sin(z)/z - cos(z))/sqrt(pi*z/2)
assert simplify(expand_func(p) -q) == 0
def test_J9():
assert besselj(0, z).diff(z) == - besselj(1, z)
def test_J10():
mu, nu = symbols('mu, nu', integer=True)
assert assoc_legendre(nu, mu, 0) == 2**mu*sqrt(pi)/gamma((nu - mu)/2 + 1)/gamma((-nu - mu + 1)/2)
def test_J11():
assert simplify(assoc_legendre(3, 1, x)) == simplify(-R(3, 2)*sqrt(1 - x**2)*(5*x**2 - 1))
@slow
def test_J12():
assert simplify(chebyshevt(1008, x) - 2*x*chebyshevt(1007, x) + chebyshevt(1006, x)) == 0
def test_J13():
a = symbols('a', integer=True, negative=False)
assert chebyshevt(a, -1) == (-1)**a
def test_J14():
p = hyper([S(1)/2, S(1)/2], [S(3)/2], z**2)
assert hyperexpand(p) == asin(z)/z
@XFAIL
def test_J15():
raise NotImplementedError("F((n+2)/2,-(n-2)/2,R(3,2),sin(z)**2) == sin(n*z)/(n*sin(z)*cos(z)); F(.) is hypergeometric function")
@XFAIL
def test_J16():
raise NotImplementedError("diff(zeta(x), x) @ x=0 == -log(2*pi)/2")
@XFAIL
def test_J17():
assert deltaintegrate(f((x + 2)/5)*DiracDelta((x - 2)/3) - g(x)*diff(DiracDelta(x - 1), x), (x, 0, 3))
@XFAIL
def test_J18():
raise NotImplementedError("define an antisymmetric function")
# K. The Complex Domain
def test_K1():
z1, z2 = symbols('z1, z2', complex=True)
assert re(z1 + I*z2) == -im(z2) + re(z1)
assert im(z1 + I*z2) == im(z1) + re(z2)
@XFAIL # abs(...).n() does evaluate to 1.00000...
def test_K2():
assert abs(3 - sqrt(7) + I*sqrt(6*sqrt(7) - 15)) == 1
@XFAIL
def test_K3():
a, b = symbols('a, b', real=True)
assert simplify(abs(1/(a + I/a + I*b))) == 1/sqrt(a**2 + (I/a + b)**2)
def test_K4():
assert log(3 + 4*I).expand(complex=True) == log(5) + I*atan(R(4, 3))
def test_K5():
x, y = symbols('x, y', real=True)
assert tan(x + I*y).expand(complex=True) == sin(x)*cos(x) / (cos(x)**2 +
sinh(y)**2) + I*sinh(y)*cosh(y) / (cos(x)**2 + sinh(y)**2)
def test_K6():
assert sqrt(x*y*abs(z)**2)/(sqrt(x)*abs(z)) == sqrt(x*y)/sqrt(x)
assert sqrt(x*y*abs(z)**2)/(sqrt(x)*abs(z)) != sqrt(y)
def test_K7():
y = symbols('y', real=True, negative=False)
expr = sqrt(x*y*abs(z)**2)/(sqrt(x)*abs(z))
sexpr = simplify(expr)
assert sexpr == sqrt(y)
@XFAIL
def test_K8():
z = symbols('z', complex=True)
assert simplify(sqrt(1/z) - 1/sqrt(z)) != 0 # Passes
z = symbols('z', complex=True, negative=False)
assert simplify(sqrt(1/z) - 1/sqrt(z)) == 0 # Fails
def test_K9():
z = symbols('z', real=True, positive=True)
assert simplify(sqrt(1/z) - 1/sqrt(z)) == 0
def test_K10():
z = symbols('z', real=True, negative=True)
assert simplify(sqrt(1/z) + 1/sqrt(z)) == 0
# This goes up to K25
# L. Determining Zero Equivalence
def test_L1():
assert sqrt(997) - (997**3)**R(1, 6) == 0
def test_L2():
assert sqrt(999983) - (999983**3)**R(1, 6) == 0
def test_L3():
assert simplify((2**R(1, 3) + 4**R(1, 3))**3 - 6*(2**R(1, 3) + 4**R(1, 3)) - 6) == 0
def test_L4():
assert trigsimp(cos(x)**3 + cos(x)*sin(x)**2 - cos(x)) == 0
@XFAIL
def test_L5():
assert log(tan(R(1, 2)*x + pi/4)) - asinh(tan(x)) == 0
def test_L6():
assert (log(tan(x/2 + pi/4)) - asinh(tan(x))).diff(x).subs({x: 0}) == 0
@XFAIL
def test_L7():
assert simplify(log((2*sqrt(x) + 1)/(sqrt(4*x + 4*sqrt(x) + 1)))) == 0
@XFAIL
def test_L8():
assert simplify((4*x + 4*sqrt(x) + 1)**(sqrt(x)/(2*sqrt(x) + 1)) \
*(2*sqrt(x) + 1)**(1/(2*sqrt(x) + 1)) - 2*sqrt(x) - 1) == 0
@XFAIL
def test_L9():
z = symbols('z', complex=True)
assert simplify(2**(1 - z)*gamma(z)*zeta(z)*cos(z*pi/2) - pi**2*zeta(1 - z)) == 0
# M. Equations
@XFAIL
def test_M1():
assert Equality(x, 2)/2 + Equality(1, 1) == Equality(x/2 + 1, 2)
def test_M2():
# The roots of this equation should all be real. Note that this doesn't test
# that they are correct.
sol = solve(3*x**3 - 18*x**2 + 33*x - 19, x)
assert all(expand(x, complex=True).is_real for x in sol)
@XFAIL
def test_M5():
assert solve(x**6 - 9*x**4 - 4*x**3 + 27*x**2 - 36*x - 23, x) == [2**(1/3) + sqrt(3), 2**(1/3) - sqrt(3), +sqrt(3) - 1/2**(2/3) + I*sqrt(3)/2**(2/3), +sqrt(3) - 1/2**(2/3) - I*sqrt(3)/2**(2/3), -sqrt(3) - 1/2**(2/3) + I*sqrt(3)/2**(2/3), -sqrt(3) - 1/2**(2/3) - I*sqrt(3)/2**(2/3)]
def test_M6():
assert set(solve(x**7 - 1, x)) == set([cos(n*2*pi/7) + I*sin(n*2*pi/7) for n in range(0, 7)])
# The paper asks for exp terms, but sin's and cos's may be acceptable
def test_M7():
assert set(solve(x**8 - 8*x**7 + 34*x**6 - 92*x**5 + 175*x**4 - 236*x**3 +
226*x**2 - 140*x + 46, x)) == set([
1 + sqrt(2)*I*sqrt(sqrt(-3 + 4*sqrt(3)) + 3)/2,
1 + sqrt(2)*sqrt(-3 + sqrt(-3 + 4*sqrt(3)))/2,
1 - sqrt(2)*sqrt(-3 + I*sqrt(3 + 4*sqrt(3)))/2,
1 - sqrt(2)*I*sqrt(sqrt(-3 + 4*sqrt(3)) + 3)/2,
1 + sqrt(2)*sqrt(-3 - I*sqrt(3 + 4*sqrt(3)))/2,
1 + sqrt(2)*sqrt(-3 + I*sqrt(3 + 4*sqrt(3)))/2,
1 - sqrt(2)*sqrt(-3 - I*sqrt(3 + 4*sqrt(3)))/2,
1 - sqrt(2)*sqrt(-3 + sqrt(-3 + 4*sqrt(3)))/2,
])
@XFAIL # There are an infinite number of solutions.
def test_M8():
z = symbols('z', complex=True)
assert set(solve(exp(2*x) + 2*exp(x) + 1 - z, x)) == \
set([log(1 + z - 2*sqrt(z))/2, log(1 + z + 2*sqrt(z))/2])
# This one could be simplified better (the 1/2 could be pulled into the log
# as a sqrt, and the function inside the log can be factored as a square,
# giving [log(sqrt(z) - 1), log(sqrt(z) + 1)]). Also, there should be an
# infinite number of solutions.
# x = {log(sqrt(z) - 1), log(sqrt(z) + 1) + i pi} [+ n 2 pi i, + n 2 pi i]
# where n is an arbitrary integer. See url of detailed output above.
@XFAIL
def test_M9():
x = symbols('x', complex=True)
raise NotImplementedError("solve(exp(2-x**2)-exp(-x),x) has complex solutions.")
def test_M10():
assert solve(exp(x) - x, x) == [-LambertW(-1)]
@XFAIL
def test_M11():
assert solve(x**x - x, x) == [-1, 1]
def test_M12():
# TODO: x = [-1, 2*(+/-asinh(1)*I + n*pi}, 3*(pi/6 + n*pi/3)]
assert solve((x + 1)*(sin(x)**2 + 1)**2*cos(3*x)**3, x) == [
-1, pi/6, pi/2,
- I*log(1 + sqrt(2)), I*log(1 + sqrt(2)),
pi - I*log(1 + sqrt(2)), pi + I*log(1 + sqrt(2)),
]
def test_M13():
assert solve(sin(x) - cos(x), x) == [-3*pi/4, pi/4]
def test_M14():
assert solve(tan(x) - 1, x) == [pi/4]
def test_M15():
assert solve(sin(x) - S.Half) == [pi/6, 5*pi/6]
def test_M16():
assert solve(sin(x) - tan(x), x) == [0, 2*pi]
@XFAIL
def test_M17():
assert solve(asin(x) - atan(x),x) == [0]
@XFAIL
def test_M18():
assert solve(acos(x) - atan(x), x) == [sqrt((sqrt(5) - 1)/2)]
def test_M19():
assert solve((x - 2)/x**R(1, 3), x) == [2]
def test_M20():
assert solve(sqrt(x**2 + 1) - x + 2, x) == []
def test_M21():
assert solve(x + sqrt(x) - 2) == [1]
def test_M22():
assert solve(2*sqrt(x) + 3*x**R(1, 4) - 2) == [R(1, 16)]
def test_M23():
x = symbols('x', complex=True)
assert solve(x - 1/sqrt(1 + x**2)) == [
simplify(-I*sqrt((sqrt(5) + 1)/2)),
simplify( sqrt((sqrt(5) - 1)/2)),
]
def test_M24():
solution = solve(1 - binomial(m, 2)*2**k, k)
answer = log(2/(m*(m - 1)), 2)
assert solution[0].expand() == answer.expand()
def test_M25():
a, b, c, d = symbols(':d', positive=True)
x = symbols('x')
assert solve(a*b**x - c*d**x, x)[0].expand() == (log(c/a)/log(b/d)).expand()
def test_M26():
assert solve(sqrt(log(x)) - log(sqrt(x))) == [1, exp(4)]
@XFAIL
def test_M27():
x = symbols('x', real=True)
b = symbols('b', real=True)
with assuming(Q.is_true(sin(cos(1/E**2) + 1) + b > 0)):
solve(log(acos(asin(x**R(2,3) - b) - 1)) + 2, x) == [-b - sin(1 + cos(1/e**2))**R(3/2), b + sin(1 + cos(1/e**2))**R(3/2)]
@XFAIL
def test_M28():
assert solve(5*x + exp((x - 5)/2) - 8*x**3, x, assume=Q.real(x)) == [-0.784966, -0.016291, 0.802557]
def test_M29():
assert solve(abs(x - 1) - 2) == [-1, 3]
@XFAIL
def test_M30():
assert solve(abs(2*x + 5) - abs(x - 2),x, assume=Q.real(x)) == [-1, -7]
@XFAIL
def test_M31():
assert solve(1 - abs(x) - max(-x - 2, x - 2),x, assume=Q.real(x)) == [-3/2, 3/2]
@XFAIL
def test_M32():
assert solve(max(2 - x**2, x)- max(-x, (x**3)/9), assume=Q.real(x)) == [-1, 3]
@XFAIL
def test_M33():
# Second answer can be written in another form. The second answer is the root of x**3 + 9*x**2 - 18 = 0 in the interval (-2, -1).
assert solve(max(2 - x**2, x) - x**3/9, assume=Q.real(x)) == [-3, -1.554894, 3]
@XFAIL
def test_M34():
z = symbols('z', complex=True)
assert solve((1 + I) * z + (2 - I) * conjugate(z) + 3*I, z) == [2 + 3*I]
def test_M35():
x, y = symbols('x y', real=True)
assert solve((3*x - 2*y - I*y + 3*I).as_real_imag()) == {y: 3, x: 2}
@XFAIL
def test_M36():
assert solve(f**2 + f - 2, x) == [Eq(f(x), 1), Eq(f(x), -2)]
def test_M37():
assert solve([x + y + z - 6, 2*x + y + 2*z - 10, x + 3*y + z - 10 ]) == {x: -z + 4, y: 2}
@slow
def test_M38():
variabes = vring("k1:50", vfield("a,b,c", ZZ).to_domain())
system = [
-b*k8/a + c*k8/a, -b*k11/a + c*k11/a, -b*k10/a + c*k10/a + k2, -k3 - b*k9/a + c*k9/a,
-b*k14/a + c*k14/a, -b*k15/a + c*k15/a, -b*k18/a + c*k18/a - k2, -b*k17/a + c*k17/a,
-b*k16/a + c*k16/a + k4, -b*k13/a + c*k13/a - b*k21/a + c*k21/a + b*k5/a - c*k5/a,
b*k44/a - c*k44/a, -b*k45/a + c*k45/a, -b*k20/a + c*k20/a, -b*k44/a + c*k44/a,
b*k46/a - c*k46/a, b**2*k47/a**2 - 2*b*c*k47/a**2 + c**2*k47/a**2, k3, -k4,
-b*k12/a + c*k12/a - a*k6/b + c*k6/b, -b*k19/a + c*k19/a + a*k7/c - b*k7/c,
b*k45/a - c*k45/a, -b*k46/a + c*k46/a, -k48 + c*k48/a + c*k48/b - c**2*k48/(a*b),
-k49 + b*k49/a + b*k49/c - b**2*k49/(a*c), a*k1/b - c*k1/b, a*k4/b - c*k4/b,
a*k3/b - c*k3/b + k9, -k10 + a*k2/b - c*k2/b, a*k7/b - c*k7/b, -k9, k11,
b*k12/a - c*k12/a + a*k6/b - c*k6/b, a*k15/b - c*k15/b, k10 + a*k18/b - c*k18/b,
-k11 + a*k17/b - c*k17/b, a*k16/b - c*k16/b, -a*k13/b + c*k13/b + a*k21/b - c*k21/b + a*k5/b - c*k5/b,
-a*k44/b + c*k44/b, a*k45/b - c*k45/b, a*k14/c - b*k14/c + a*k20/b - c*k20/b,
a*k44/b - c*k44/b, -a*k46/b + c*k46/b, -k47 + c*k47/a + c*k47/b - c**2*k47/(a*b),
a*k19/b - c*k19/b, -a*k45/b + c*k45/b, a*k46/b - c*k46/b, a**2*k48/b**2 - 2*a*c*k48/b**2 + c**2*k48/b**2,
-k49 + a*k49/b + a*k49/c - a**2*k49/(b*c), k16, -k17, -a*k1/c + b*k1/c,
-k16 - a*k4/c + b*k4/c, -a*k3/c + b*k3/c, k18 - a*k2/c + b*k2/c, b*k19/a - c*k19/a - a*k7/c + b*k7/c,
-a*k6/c + b*k6/c, -a*k8/c + b*k8/c, -a*k11/c + b*k11/c + k17, -a*k10/c + b*k10/c - k18,
-a*k9/c + b*k9/c, -a*k14/c + b*k14/c - a*k20/b + c*k20/b, -a*k13/c + b*k13/c + a*k21/c - b*k21/c - a*k5/c + b*k5/c,
a*k44/c - b*k44/c, -a*k45/c + b*k45/c, -a*k44/c + b*k44/c, a*k46/c - b*k46/c,
-k47 + b*k47/a + b*k47/c - b**2*k47/(a*c), -a*k12/c + b*k12/c, a*k45/c - b*k45/c,
-a*k46/c + b*k46/c, -k48 + a*k48/b + a*k48/c - a**2*k48/(b*c),
a**2*k49/c**2 - 2*a*b*k49/c**2 + b**2*k49/c**2, k8, k11, -k15, k10 - k18,
-k17, k9, -k16, -k29, k14 - k32, -k21 + k23 - k31, -k24 - k30, -k35, k44,
-k45, k36, k13 - k23 + k39, -k20 + k38, k25 + k37, b*k26/a - c*k26/a - k34 + k42,
-2*k44, k45, k46, b*k47/a - c*k47/a, k41, k44, -k46, -b*k47/a + c*k47/a,
k12 + k24, -k19 - k25, -a*k27/b + c*k27/b - k33, k45, -k46, -a*k48/b + c*k48/b,
a*k28/c - b*k28/c + k40, -k45, k46, a*k48/b - c*k48/b, a*k49/c - b*k49/c,
-a*k49/c + b*k49/c, -k1, -k4, -k3, k15, k18 - k2, k17, k16, k22, k25 - k7,
k24 + k30, k21 + k23 - k31, k28, -k44, k45, -k30 - k6, k20 + k32, k27 + b*k33/a - c*k33/a,
k44, -k46, -b*k47/a + c*k47/a, -k36, k31 - k39 - k5, -k32 - k38, k19 - k37,
k26 - a*k34/b + c*k34/b - k42, k44, -2*k45, k46, a*k48/b - c*k48/b,
a*k35/c - b*k35/c - k41, -k44, k46, b*k47/a - c*k47/a, -a*k49/c + b*k49/c,
-k40, k45, -k46, -a*k48/b + c*k48/b, a*k49/c - b*k49/c, k1, k4, k3, -k8,
-k11, -k10 + k2, -k9, k37 + k7, -k14 - k38, -k22, -k25 - k37, -k24 + k6,
-k13 - k23 + k39, -k28 + b*k40/a - c*k40/a, k44, -k45, -k27, -k44, k46,
b*k47/a - c*k47/a, k29, k32 + k38, k31 - k39 + k5, -k12 + k30, k35 - a*k41/b + c*k41/b,
-k44, k45, -k26 + k34 + a*k42/c - b*k42/c, k44, k45, -2*k46, -b*k47/a + c*k47/a,
-a*k48/b + c*k48/b, a*k49/c - b*k49/c, k33, -k45, k46, a*k48/b - c*k48/b,
-a*k49/c + b*k49/c
]
solution = {
k49: 0, k48: 0, k47: 0, k46: 0, k45: 0, k44: 0, k41: 0, k40: 0,
k38: 0, k37: 0, k36: 0, k35: 0, k33: 0, k32: 0, k30: 0, k29: 0,
k28: 0, k27: 0, k25: 0, k24: 0, k22: 0, k21: 0, k20: 0, k19: 0,
k18: 0, k17: 0, k16: 0, k15: 0, k14: 0, k13: 0, k12: 0, k11: 0,
k10: 0, k9: 0, k8: 0, k7: 0, k6: 0, k5: 0, k4: 0, k3: 0,
k2: 0, k1: 0,
k34: b/c*k42, k31: k39, k26: a/c*k42, k23: k39
}
assert solve_lin_sys(system, variabes) == solution
def test_M39():
x, y, z = symbols('x y z', complex=True)
assert solve([x**2*y + 3*y*z - 4, -3*x**2*z + 2*y**2 + 1, 2*y*z**2 - z**2 - 1 ]) ==\
[{y: 1, z: 1, x: -1}, {y: 1, z: 1, x: 1},\
{y: sqrt(2)*I, z: R(1,3) - sqrt(2)*I/3, x: -sqrt(-1 - sqrt(2)*I)},\
{y: sqrt(2)*I, z: R(1,3) - sqrt(2)*I/3, x: sqrt(-1 - sqrt(2)*I)},\
{y: -sqrt(2)*I, z: R(1,3) + sqrt(2)*I/3, x: -sqrt(-1 + sqrt(2)*I)},\
{y: -sqrt(2)*I, z: R(1,3) + sqrt(2)*I/3, x: sqrt(-1 + sqrt(2)*I)}]
# N. Inequalities
def test_N1():
assert ask(Q.is_true(E**pi > pi**E))
@XFAIL
def test_N2():
x = symbols('x', real=True)
assert ask(Q.is_true(x**4 - x + 1 > 0))
assert ask(Q.is_true(x**4 - x + 1 > 1)) == False
@XFAIL
def test_N3():
x = symbols('x', real=True)
assert ask(Q.is_true(And(Lt(-1, x), Lt(x, 1))), Q.is_true(abs(x) < 1 ))
@XFAIL
def test_N4():
x, y = symbols('x y', real=True)
assert ask(Q.is_true(2*x**2 > 2*y**2), Q.is_true((x > y) & (y > 0)))
@XFAIL
def test_N5():
x, y, k = symbols('x y k', real=True)
assert ask(Q.is_true(k*x**2 > k*y**2), Q.is_true((x > y) & (y > 0) & (k > 0)))
@XFAIL
def test_N6():
x, y, k, n = symbols('x y k n', real=True)
assert ask(Q.is_true(k*x**n > k*y**n), Q.is_true((x > y) & (y > 0) & (k > 0) & (n > 0)))
@XFAIL
def test_N7():
x, y = symbols('x y', real=True)
assert ask(Q.is_true(y > 0), Q.is_true((x > 1) & (y >= x - 1)))
@XFAIL
def test_N8():
x, y, z = symbols('x y z', real=True)
assert ask(Q.is_true((x == y) & (y == z)),
Q.is_true((x >= y) & (y >= z) & (z >= x)))
def test_N9():
with assuming(Q.real(x)):
assert solve(abs(x - 1) > 2) == Or(x < -1, x > 3)
def test_N10():
p = (x - 1)*(x - 2)*(x - 3)*(x - 4)*(x - 5)
assert solve(expand(p) < 0, assume=Q.real(x)) == Or(
And(Lt(2, x), Lt(x, 3)), And(Lt(4, x), Lt(x, 5)), Lt(x, 1))
def test_N11():
assert solve(6/(x - 3) <= 3, assume=Q.real(x)) == Or(5 <= x, x < 3)
@XFAIL
def test_N12():
assert solve(sqrt(x) < 2, assume=Q.real(x)) == And(Le(0, x), Lt(x, 4))
@XFAIL
def test_N13():
# raises NotImplementedError: can't reduce [sin(x) < 2]
assert solve(sin(x) < 2, assume=Q.real(x)) == [] # S.Reals not found
@XFAIL
def test_N14():
# raises NotImplementedError: can't reduce [sin(x) < 1]
assert (solve(sin(x) < 1, assume=Q.real(x)) == Ne(x, pi/2))
@XFAIL
def test_N15():
r, t = symbols('r t', real=True)
# raises NotImplementedError: only univariate inequalities are supported
solve(abs(2*r*(cos(t) - 1) + 1) <= 1, r)
@XFAIL
def test_N16():
r, t = symbols('r t', real=True)
solve((r**2)*((cos(t) - 4)**2)*sin(t)**2 < 9, r)
@XFAIL
def test_N17():
# raises NotImplementedError: only univariate inequalities are supported
assert solve((x + y > 0, x - y < 0)) == (abs(x) < y)
def test_O1():
M = Matrix((1 + I, -2, 3*I))
assert sqrt(expand(M.dot(M.H))) == sqrt(15)
def test_O2():
assert Matrix((2, 2, -3)).cross(Matrix((1, 3, 1))) == Matrix([[11],
[-5],
[4]])
@slow
def test_O3():
(va, vb, vc, vd) = MV.setup('va vb vc vd')
assert (va ^ vb) | (vc ^ vd) == -(va | vc)*(vb | vd) + (va | vd)*(vb | vc)
def test_O4():
(ex, ey, ez, grad) = MV.setup('e*x|y|z', metric='[1,1,1]',
coords=(x, y, z))
F = ex*(x*y*z) + ey*((x*y*z)**2) + ez*((y**2)*(z**3))
assert (grad^F -(x*z*(2*y**2*z - 1))*ex^ey - x*y*ex^ez +
(2*y*z*(-x**2*y + z**2))*ey^ez) == 0
@XFAIL
@slow
def test_O5():
(_, _, _, grad) = MV.setup('e*x|y|z',metric='[1,1,1]',coords=(x, y, z))
f = MV('f','vector',fct=True)
g = MV('g','vector',fct=True)
assert grad|(f^g)-g|(grad^f)+f|(grad^g) == 0
#testO8-O9 MISSING!!
def test_O10():
L = [Matrix([2, 3, 5]), Matrix([3, 6, 2]), Matrix([8, 3, 6])]
assert GramSchmidt(L) == [Matrix([
[2],
[3],
[5]]),
Matrix([
[S(23)/19],
[S(63)/19],
[S(-47)/19]]),
Matrix([
[S(1692)/353],
[S(-1551)/706],
[S(-423)/706]])]
@XFAIL
def test_P1():
raise NotImplementedError("Matrix property/function to extract Nth \
diagonal not implemented. See Matlab diag(A,k) \
http://www.mathworks.de/de/help/symbolic/diag.html")
def test_P2():
M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
M.row_del(1)
M.col_del(2)
assert M == Matrix([[1, 2],
[7, 8]])
@XFAIL
def test_P3():
A = Matrix([
[11, 12, 13, 14],
[21, 22, 23, 24],
[31, 32, 33, 34],
[41, 42, 43, 44]])
A11 = A[0:3, 1:4]
A12 = A[(0, 1, 3), (2, 0, 3)] # unsupported raises exception
A21 = A
A221 = A[0:2, 2:4]
A222 = A[(3, 0), (2, 1)] # unsupported raises exception
A22 = BlockMatrix([A221, A222])
B = BlockMatrix([[A11, A12],
[A21, A22]])
assert B == Matrix([[12, 13, 14, 13, 11, 14],
[22, 22, 24, 23, 21, 24],
[32, 33, 34, 43, 41, 44],
[11, 12, 13, 14, 13, 14],
[21, 22, 23, 24, 23, 24],
[31, 32, 33, 34, 43, 42],
[41, 42, 43, 44, 13, 12]])
@XFAIL
def test_P4():
raise NotImplementedError("Block matrix diagonalization not supported")
@XFAIL
def test_P5():
M = Matrix([[7, 11],
[3, 8]])
# Raises exception % not supported for matrices
assert M % 2 == Matrix([[1, 1],
[1, 0]])
def test_P5_workaround():
M = Matrix([[7, 11],
[3, 8]])
assert M.applyfunc(lambda i: i % 2) == Matrix([[1, 1],
[1, 0]])
def test_P6():
M = Matrix([[cos(x), sin(x)],
[-sin(x), cos(x)]])
assert M.diff(x, 2) == Matrix([[-cos(x), -sin(x)],
[sin(x), -cos(x)]])
def test_P7():
M = Matrix([[x, y]])*(
z*Matrix([[1, 3, 5],
[2, 4, 6]]) + Matrix([[7, -9, 11],
[-8, 10, -12]]))
assert M == Matrix([[x*(z + 7) + y*(2*z - 8), x*(3*z - 9) + y*(4*z + 10),
x*(5*z + 11) + y*(6*z - 12)]])
@XFAIL
def test_P8():
M = Matrix([[1, -2*I],
[-3*I, 4]])
assert M.norm(ord=S.Infinity) == 7 # Matrix.norm(ord=inf) not implemented
def test_P9():
a, b, c = symbols('a b c', real=True)
M = Matrix([[a/(b*c), 1/c, 1/b],
[1/c, b/(a*c), 1/a],
[1/b, 1/a, c/(a*b)]])
assert factor(M.norm('fro')) == (a**2 + b**2 + c**2)/(abs(a)*abs(b)*abs(c))
@XFAIL
def test_P10():
M = Matrix([[1, 2 + 3*I],
[f(4 - 5*i), 6]])
# conjugate(f(4 - 5*i)) is not simplified to f(4+5*I)
assert M.H == Matrix([[1, f(4 + 5*I)],
[2 + 3*I, 6]])
@XFAIL
def test_P11():
# raises NotImplementedError("Matrix([[x,y],[1,x*y]]).inv()
# not simplifying to extract common factor")
assert Matrix([[x, y],
[1, x*y]]).inv() == (1/(x**2 - 1))*Matrix([[x, -1],
[-1/y, x/y]])
def test_P12():
A11 = MatrixSymbol('A11', n, n)
A12 = MatrixSymbol('A12', n, n)
A22 = MatrixSymbol('A22', n, n)
B = BlockMatrix([[A11, A12],
[ZeroMatrix(n, n), A22]])
assert block_collapse(B.I) == BlockMatrix([[A11.I, (-1)*A11.I*A12*A22.I],
[ZeroMatrix(n, n), A22.I]])
def test_P13():
M = Matrix([[1, x - 2, x - 3],
[x - 1, x**2 - 3*x + 6, x**2 - 3*x - 2],
[x - 2, x**2 - 8, 2*(x**2) - 12*x + 14]])
L, U, _ = M.LUdecomposition()
assert simplify(L) == Matrix([[1, 0, 0],
[x - 1, 1, 0],
[x - 2, x - 3, 1]])
assert simplify(U) == Matrix([[1, x - 2, x - 3],
[0, 4, x - 5],
[0, 0, x - 7]])
def test_P14():
M = Matrix([[1, 2, 3, 1, 3],
[3, 2, 1, 1, 7],
[0, 2, 4, 1, 1],
[1, 1, 1, 1, 4]])
R, _ = M.rref()
assert R == Matrix([[1, 0, -1, 0, 2],
[0, 1, 2, 0, -1],
[0, 0, 0, 1, 3],
[0, 0, 0, 0, 0]])
def test_P15():
M = Matrix([[-1, 3, 7, -5],
[4, -2, 1, 3],
[2, 4, 15, -7]])
assert M.rank() == 2
def test_P16():
M = Matrix([[2*sqrt(2), 8],
[6*sqrt(6), 24*sqrt(3)]])
assert M.rank() == 1
@XFAIL
def test_P17():
t = symbols('t', real=True)
M=Matrix([
[sin(2*t), cos(2*t)],
[2*(1 - (cos(t)**2))*cos(t), (1 - 2*(sin(t)**2))*sin(t)]])
assert M.rank() == 1
def test_P18():
M = Matrix([[1, 0, -2, 0],
[-2, 1, 0, 3],
[-1, 2, -6, 6]])
assert M.nullspace() == [Matrix([[2],
[4],
[1],
[0]]),
Matrix([[0],
[-3],
[0],
[1]])]
def test_P19():
w = symbols('w')
M = Matrix([[1, 1, 1, 1],
[w, x, y, z],
[w**2, x**2, y**2, z**2],
[w**3, x**3, y**3, z**3]])
assert M.det() == (w**3*x**2*y - w**3*x**2*z - w**3*x*y**2 + w**3*x*z**2
+ w**3*y**2*z - w**3*y*z**2 - w**2*x**3*y + w**2*x**3*z
+ w**2*x*y**3 - w**2*x*z**3 - w**2*y**3*z + w**2*y*z**3
+ w*x**3*y**2 - w*x**3*z**2 - w*x**2*y**3 + w*x**2*z**3
+ w*y**3*z**2 - w*y**2*z**3 - x**3*y**2*z + x**3*y*z**2
+ x**2*y**3*z - x**2*y*z**3 - x*y**3*z**2 + x*y**2*z**3
)
@XFAIL
def test_P20():
raise NotImplementedError("Matrix minimal polynomial not supported")
def test_P21():
M = Matrix([[5, -3, -7],
[-2, 1, 2],
[2, -3, -4]])
assert M.charpoly(x).as_expr() == x**3 - 2*x**2 - 5*x + 6
@slow
def test_P22():
# Wester test calculates eigenvalues for a diagonal matrix of dimension 100
# This currently takes forever with sympy:
# M = (2 - x)*eye(100);
# assert M.eigenvals() == {-x + 2: 100}
# So we will speed-up the test checking only for dimension=12
d = 12
M = (2 - x)*eye(d)
assert M.eigenvals() == {-x + 2: d}
def test_P23():
M = Matrix([
[2, 1, 0, 0, 0],
[1, 2, 1, 0, 0],
[0, 1, 2, 1, 0],
[0, 0, 1, 2, 1],
[0, 0, 0, 1, 2]])
assert M.eigenvals() == {
S('1'): 1,
S('2'): 1,
S('3'): 1,
S('sqrt(3) + 2'): 1,
S('-sqrt(3) + 2'): 1}
def test_P24():
M = Matrix([[611, 196, -192, 407, -8, -52, -49, 29],
[196, 899, 113, -192, -71, -43, -8, -44],
[-192, 113, 899, 196, 61, 49, 8, 52],
[ 407, -192, 196, 611, 8, 44, 59, -23],
[ -8, -71, 61, 8, 411, -599, 208, 208],
[ -52, -43, 49, 44, -599, 411, 208, 208],
[ -49, -8, 8, 59, 208, 208, 99, -911],
[ 29, -44, 52, -23, 208, 208, -911, 99]])
assert M.eigenvals() == {
S('0'): 1,
S('10*sqrt(10405)'): 1,
S('100*sqrt(26) + 510'): 1,
S('1000'): 2,
S('-100*sqrt(26) + 510'): 1,
S('-10*sqrt(10405)'): 1,
S('1020'): 1}
def test_P25():
MF = N(Matrix([[ 611, 196, -192, 407, -8, -52, -49, 29],
[ 196, 899, 113, -192, -71, -43, -8, -44],
[-192, 113, 899, 196, 61, 49, 8, 52],
[ 407, -192, 196, 611, 8, 44, 59, -23],
[ -8, -71, 61, 8, 411, -599, 208, 208],
[ -52, -43, 49, 44, -599, 411, 208, 208],
[ -49, -8, 8, 59, 208, 208, 99, -911],
[ 29, -44, 52, -23, 208, 208, -911, 99]]))
assert (Matrix(sorted(MF.eigenvals())) - Matrix(
[-1020.0490184299969, 0.0, 0.09804864072151699, 1000.0,
1019.9019513592784, 1020.0, 1020.0490184299969])).norm() < 1e-13
def test_P26():
a0, a1, a2, a3, a4 = symbols('a0 a1 a2 a3 a4')
M = Matrix([[-a4, -a3, -a2, -a1, -a0, 0, 0, 0, 0],
[ 1, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 1, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 1, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 1, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, -1, -1, 0, 0],
[ 0, 0, 0, 0, 0, 1, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 1, -1, -1],
[ 0, 0, 0, 0, 0, 0, 0, 1, 0]])
assert M.eigenvals() == {
S('-1/2 - sqrt(3)*I/2'): 2,
S('-1/2 + sqrt(3)*I/2'): 2}
def test_P27():
a = symbols('a')
M = Matrix([[a, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[0, 0, a, 0, 0],
[0, 0, 0, a, 0],
[0, -2, 0, 0, 2]])
assert M.eigenvects() == [(a, 3, [Matrix([[1],
[0],
[0],
[0],
[0]]),
Matrix([[0],
[0],
[1],
[0],
[0]]),
Matrix([[0],
[0],
[0],
[1],
[0]])]),
(1 - I, 1, [Matrix([[ 0],
[-1/(-1 + I)],
[ 0],
[ 0],
[ 1]])]),
(1 + I, 1, [Matrix([[ 0],
[-1/(-1 - I)],
[ 0],
[ 0],
[ 1]])])]
@XFAIL
def test_P28():
raise NotImplementedError("Generalized eigenvectors not supported \
https://code.google.com/p/sympy/issues/detail?id=2194")
@XFAIL
def test_P29():
raise NotImplementedError("Generalized eigenvectors not supported \
https://code.google.com/p/sympy/issues/detail?id=2194")
def test_P30():
M = Matrix([[1, 0, 0, 1, -1],
[0, 1, -2, 3, -3],
[0, 0, -1, 2, -2],
[1, -1, 1, 0, 1],
[1, -1, 1, -1, 2]])
_, J = M.jordan_form()
assert J == Matrix([[-1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 1],
[0, 0, 0, 0, 1]])
@XFAIL
def test_P31():
raise NotImplementedError("Smith normal form not implemented")
def test_P32():
M = Matrix([[1, -2],
[2, 1]])
assert exp(M).rewrite(cos).simplify() == Matrix([[E*cos(2), -E*sin(2)],
[E*sin(2), E*cos(2)]])
def test_P33():
w, t = symbols('w t')
M = Matrix([[0, 1, 0, 0],
[0, 0, 0, 2*w],
[0, 0, 0, 1],
[0, -2*w, 3*w**2, 0]])
assert exp(M*t).rewrite(cos).expand() == Matrix([
[1, -3*t + 4*sin(t*w)/w, 6*t*w - 6*sin(t*w), -2*cos(t*w)/w + 2/w],
[0, 4*cos(t*w) - 3, -6*w*cos(t*w) + 6*w, 2*sin(t*w)],
[0, 2*cos(t*w)/w - 2/w, -3*cos(t*w) + 4, sin(t*w)/w],
[0, -2*sin(t*w), 3*w*sin(t*w), cos(t*w)]])
@XFAIL
def test_P34():
a, b, c = symbols('a b c', real=True)
M = Matrix([[a, 1, 0, 0, 0, 0],
[0, a, 0, 0, 0, 0],
[0, 0, b, 0, 0, 0],
[0, 0, 0, c, 1, 0],
[0, 0, 0, 0, c, 1],
[0, 0, 0, 0, 0, c]])
# raises exception, sin(M) not supported. exp(M*I) also not supported
# https://code.google.com/p/sympy/issues/detail?id=3119
assert sin(M) == Matrix([[sin(a), cos(a), 0, 0, 0, 0],
[0, sin(a), 0, 0, 0, 0],
[0, 0, sin(b), 0, 0, 0],
[0, 0, 0, sin(c), cos(c), -sin(c)/2],
[0, 0, 0, 0, sin(c), cos(c)],
[0, 0, 0, 0, 0, sin(c)]])
@XFAIL
def test_P35():
M = pi/2*Matrix([[2, 1, 1],
[2, 3, 2],
[1, 1, 2]])
# raises exception, sin(M) not supported. exp(M*I) also not supported
# https://code.google.com/p/sympy/issues/detail?id=3119
assert sin(M) == eye(3)
@XFAIL
def test_P36():
M = Matrix([[10, 7],
[7, 17]])
assert sqrt(M) == Matrix([[3, 1],
[1, 4]])
@XFAIL
def test_P37():
M = Matrix([[1, 1, 0],
[0, 1, 0],
[0, 0, 1]])
#raises NotImplementedError: Implemented only for diagonalizable matrices
M**Rational(1, 2)
@XFAIL
def test_P38():
M=Matrix([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])
#raises NotImplementedError: Implemented only for diagonalizable matrices
M**Rational(1,2)
@XFAIL
def test_P39():
'''
M=Matrix([
[1, 1],
[2, 2],
[3, 3]])
M.SVD()
'''
raise NotImplementedError("Singular value decomposition not implemented")
def test_P40():
r, t = symbols('r t', real=True)
M = Matrix([r*cos(t), r*sin(t)])
assert M.jacobian(Matrix([r, t])) == Matrix([[cos(t), -r*sin(t)],
[sin(t), r*cos(t)]])
def test_P41():
r, t = symbols('r t', real=True)
assert hessian(r**2*sin(t),(r,t)) == Matrix([[ 2*sin(t), 2*r*cos(t)],
[2*r*cos(t), -r**2*sin(t)]])
def test_P42():
assert wronskian([cos(x), sin(x)], x).simplify() == 1
def test_P43():
def __my_jacobian(M, Y):
return Matrix([M.diff(v).T for v in Y]).T
r, t = symbols('r t', real=True)
M = Matrix([r*cos(t), r*sin(t)])
assert __my_jacobian(M,[r,t]) == Matrix([[cos(t), -r*sin(t)],
[sin(t), r*cos(t)]])
def test_P44():
def __my_hessian(f, Y):
V = Matrix([diff(f, v) for v in Y])
return Matrix([V.T.diff(v) for v in Y])
r, t = symbols('r t', real=True)
assert __my_hessian(r**2*sin(t), (r, t)) == Matrix([
[ 2*sin(t), 2*r*cos(t)],
[2*r*cos(t), -r**2*sin(t)]])
def test_P45():
def __my_wronskian(Y, v):
M = Matrix([Matrix(Y).T.diff(x, n) for n in range(0, len(Y))])
return M.det()
assert __my_wronskian([cos(x), sin(x)], x).simplify() == 1
# Q1-Q6 Tensor tests missing
@XFAIL
def test_R1():
i, n = symbols('i n', integer=True, positive=True)
xn = MatrixSymbol('xn', n, 1)
Sm = Sum((xn[i, 0] - Sum(xn[j, 0], (j, 0, n - 1))/n)**2, (i, 0, n - 1))
# raises AttributeError: 'str' object has no attribute 'is_Piecewise'
Sm.doit()
@XFAIL
def test_R2():
m, b = symbols('m b', real=True)
i, n = symbols('i n', integer=True, positive=True)
xn = MatrixSymbol('xn', n, 1)
yn = MatrixSymbol('yn', n, 1)
f = Sum((yn[i, 0] - m*xn[i, 0] - b)**2, (i, 0, n - 1))
f1 = diff(f, m)
f2 = diff(f, b)
# raises AttributeError: 'str' object has no attribute 'is_Piecewise'
solve((f1, f2), m, b)
@XFAIL
def test_R3():
n, k = symbols('n k', integer=True, positive=True)
sk = ((-1)**k) * (binomial(2*n, k))**2
Sm = Sum(sk, (k, 1, oo))
T = Sm.doit()
T2 = T.combsimp()
# returns -((-1)**n*factorial(2*n)
# - (factorial(n))**2)*exp_polar(-I*pi)/(factorial(n))**2
assert T2 == (-1)**n*binomial(2*n, n)
@XFAIL
def test_R4():
# Macsyma indefinite sum test case:
#(c15) /* Check whether the full Gosper algorithm is implemented
# => 1/2^(n + 1) binomial(n, k - 1) */
#closedform(indefsum(binomial(n, k)/2^n - binomial(n + 1, k)/2^(n + 1), k));
#Time= 2690 msecs
# (- n + k - 1) binomial(n + 1, k)
#(d15) - --------------------------------
# n
# 2 2 (n + 1)
#
#(c16) factcomb(makefact(%));
#Time= 220 msecs
# n!
#(d16) ----------------
# n
# 2 k! 2 (n - k)!
# Might be possible after fixing https://github.com/sympy/sympy/pull/1879
raise NotImplementedError("Indefinite sum not supported")
@XFAIL
def test_R5():
a, b, c, n, k = symbols('a b c n k', integer=True, positive=True)
sk = ((-1)**k)*(binomial(a + b, a + k)
*binomial(b + c, b + k)*binomial(c + a, c + k))
Sm = Sum(sk, (k, 1, oo))
T = Sm.doit() # hypergeometric series not calculated
assert T == factorial(a+b+c)/(factorial(a)*factorial(b)*factorial(c))
@XFAIL
def test_R6():
n, k = symbols('n k', integer=True, positive=True)
gn = MatrixSymbol('gn', n + 1, 1)
Sm = Sum(gn[k, 0] - gn[k - 1, 0], (k, 1, n + 1))
# raises AttributeError: 'str' object has no attribute 'is_Piecewise'
assert Sm.doit() == -gn[0, 0] + gn[n + 1, 0]
def test_R7():
n, k = symbols('n k', integer=True, positive=True)
T = Sum(k**3,(k,1,n)).doit()
assert T.factor() == n**2*(n + 1)**2/4
@XFAIL
def test_R8():
n, k = symbols('n k', integer=True, positive=True)
Sm = Sum(k**2*binomial(n, k), (k, 1, n))
T = Sm.doit() #returns Piecewise function
# T.simplify() raisesAttributeError
assert T.combsimp() == n*(n + 1)*2**(n - 2)
def test_R9():
n, k = symbols('n k', integer=True, positive=True)
Sm = Sum(binomial(n, k - 1)/k, (k, 1, n + 1))
assert Sm.doit().simplify() == (2**(n + 1) - 1)/(n + 1)
@XFAIL
def test_R10():
n, m, r, k = symbols('n m r k', integer=True, positive=True)
Sm = Sum(binomial(n, k)*binomial(m, r - k), (k, 0, r))
T = Sm.doit()
T2 = T.combsimp().rewrite(factorial)
assert T2 == factorial(m + n)/(factorial(r)*factorial(m + n - r))
assert T2 == binomial(m + n, r).rewrite(factorial)
# rewrite(binomial) is not working.
# https://code.google.com/p/sympy/issues/detail?id=4036
T3 = T2.rewrite(binomial)
assert T3 == binomial(m + n, r)
@XFAIL
def test_R11():
n, k = symbols('n k', integer=True, positive=True)
sk = binomial(n, k)*fibonacci(k)
Sm = Sum(sk, (k, 0, n))
T = Sm.doit()
# Fibonacci simplification not implemented
# https://code.google.com/p/sympy/issues/detail?id=4035
assert T == fibonacci(2*n)
@XFAIL
def test_R12():
n, k = symbols('n k', integer=True, positive=True)
Sm = Sum(fibonacci(k)**2, (k, 0, n))
T = Sm.doit()
assert T == fibonacci(n)*fibonacci(n + 1)
@XFAIL
def test_R13():
n, k = symbols('n k', integer=True, positive=True)
Sm = Sum(sin(k*x), (k, 1, n))
T = Sm.doit() # Sum is not calculated
assert T.simplify() == cot(x/2)/2 - cos(x*(2*n + 1)/2)/(2*sin(x/2))
@XFAIL
def test_R14():
n, k = symbols('n k', integer=True, positive=True)
Sm = Sum(sin((2*k - 1)*x), (k, 1, n))
T = Sm.doit() # Sum is not calculated
assert T.simplify() == sin(n*x)**2/sin(x)
@XFAIL
def test_R15():
n, k = symbols('n k', integer=True, positive=True)
Sm = Sum(binomial(n - k, k), (k, 0, floor(n/2)))
T = Sm.doit() # Sum is not calculated
assert T.simplify() == fibonacci(n + 1)
def test_R16():
k = symbols('k', integer=True, positive=True)
Sm = Sum(1/k**2 + 1/k**3, (k, 1, oo))
assert Sm.doit() == zeta(3) + pi**2/6
def test_R17():
k = symbols('k', integer=True, positive=True)
assert abs(float(Sum(1/k**2 + 1/k**3, (k, 1, oo)))
- 2.8469909700078206) < 1e-15
@XFAIL
def test_R18():
k = symbols('k', integer=True, positive=True)
Sm = Sum(1/(2**k*k**2), (k, 1, oo))
# returns polylog(2, 1/2), particular value for 1/2 is not known.
# https://code.google.com/p/sympy/issues/detail?id=4033
T = Sm.doit()
assert T.simplify() == -log(2)**2/2 + pi**2/12
@XFAIL
def test_R19():
k = symbols('k', integer=True, positive=True)
Sm = Sum(1/((3*k + 1)*(3*k + 2)*(3*k + 3)), (k, 0, oo))
T = Sm.doit()
# assert fails, T not simplified
assert T.simplify() == -log(3)/4 + sqrt(3)*pi/12
@XFAIL
def test_R20():
n, k = symbols('n k', integer=True, positive=True)
Sm = Sum(binomial(n, 4*k), (k, 0, oo))
T = Sm.doit()
# assert fails, T not simplified
assert T.simplify() == 2**(n/2)*cos(pi*n/4)/2 + 2**(n - 1)/2
@XFAIL
def test_R21():
k = symbols('k', integer=True, positive=True)
Sm = Sum(1/(sqrt(k*(k + 1)) * (sqrt(k) + sqrt(k + 1))), (k, 1, oo))
T = Sm.doit() # Sum not calculated
assert T.simplify() == 1
# test_R22 answer not available in Wester samples
# Sum(Sum(binomial(n, k)*binomial(n - k, n - 2*k)*x**n*y**(n - 2*k),
# (k, 0, floor(n/2))), (n, 0, oo)) with abs(x*y)<1?
@XFAIL
def test_R23():
n, k = symbols('n k', integer=True, positive=True)
Sm = Sum(Sum((factorial(n)/(factorial(k)**2*factorial(n - 2*k)))*
(x/y)**k*(x*y)**(n - k), (n, 2*k, oo)), (k, 0, oo))
# Missing how to express constraint abs(x*y)<1?
T = Sm.doit() # Sum not calculated
assert T == -1/sqrt(x**2*y**2 - 4*x**2 - 2*x*y + 1)
def test_R24():
m, k = symbols('m k', integer=True, positive=True)
Sm = Sum(Product(k/(2*k - 1), (k, 1, m)), (m, 2, oo))
assert Sm.doit() == pi/2
def test_S1():
k = symbols('k', integer=True, positive=True)
Pr = Product(gamma(k/3), (k, 1, 8))
assert Pr.doit().simplify() == 640*sqrt(3)*pi**3/6561
def test_S2():
n, k = symbols('n k', integer=True, positive=True)
assert Product(k, (k, 1, n)).doit() == factorial(n)
def test_S3():
n, k = symbols('n k', integer=True, positive=True)
assert Product(x**k, (k, 1, n)).doit().simplify() == x**(n*(n + 1)/2)
def test_S4():
n, k = symbols('n k', integer=True, positive=True)
assert Product(1 + 1/k, (k, 1, n -1)).doit().simplify() == n
def test_S5():
n, k = symbols('n k', integer=True, positive=True)
assert (Product((2*k - 1)/(2*k), (k, 1, n)).doit().combsimp() ==
factorial(n - Rational(1, 2))/(sqrt(pi)*factorial(n)))
@XFAIL
def test_S6():
n, k = symbols('n k', integer=True, positive=True)
# Product raises Infinite recursion error.
# https://code.google.com/p/sympy/issues/detail?id=4034
assert (Product(x**2 -2*x*cos(k*pi/n) + 1, (k, 1, n - 1)).doit().simplify()
== (x**(2*n) - 1)/(x**2 - 1))
@XFAIL
def test_S7():
k = symbols('k', integer=True, positive=True)
Pr = Product((k**3 - 1)/(k**3 + 1), (k, 2, oo))
T = Pr.doit()
assert T.simplify() == Rational(2, 3) # T simplifies incorrectly to 0
@XFAIL
def test_S8():
k = symbols('k', integer=True, positive=True)
Pr = Product(1 - 1/(2*k)**2, (k, 1, oo))
T = Pr.doit()
# T = nan https://code.google.com/p/sympy/issues/detail?id=4037
assert T.simplify() == 2/pi
@XFAIL
def test_S9():
k = symbols('k', integer=True, positive=True)
Pr = Product(1 + (-1)**(k + 1)/(2*k - 1), (k, 1, oo))
# Product.doit() raises Infinite recursion error.
# https://code.google.com/p/sympy/issues/detail?id=4034
T = Pr.doit()
assert T.simplify() == sqrt(2)
@XFAIL
def test_S10():
k = symbols('k', integer=True, positive=True)
Pr = Product((k*(k + 1) + 1 + I)/(k*(k + 1) + 1 - I), (k, 0, oo))
T = Pr.doit()
# raises OverflowError
# https://code.google.com/p/sympy/issues/detail?id=4038
assert T.simplify() == -1
def test_T1():
assert limit((1 + 1/n)**n, n, oo) == E
assert limit((1 - cos(x))/x**2, x, 0) == Rational(1, 2)
def test_T2():
assert limit((3**x + 5**x)**(1/x), x, oo) == 5
@XFAIL
def test_T3():
assert limit(log(x)/(log(x) + sin(x)), x, oo) == 1 # raises PoleError
def test_T4():
assert limit((exp(x*exp(-x)/(exp(-x) + exp(-2*x**2/(x + 1))))
- exp(x))/x, x, oo) == -exp(2)
def test_T5():
assert limit(x*log(x)*log(x*exp(x) - x**2)**2/log(log(x**2
+ 2*exp(exp(3*x**3*log(x))))), x, oo) == Rational(1, 3)
def test_T6():
assert limit(1/n * factorial(n)**(1/n), n, oo) == exp(-1)
def test_T7():
limit(1/n * gamma(n + 1)**(1/n), n, oo)
def test_T8():
a, z = symbols('a z', real=True, positive=True)
assert limit(gamma(z + a)/gamma(z)*exp(-a*log(z)), z, oo) == 1
@XFAIL
def test_T9():
z, k = symbols('z k', real=True, positive=True)
# raises NotImplementedError:
# Don't know how to calculate the mrv of '(1, k)'
assert limit(hyper((1, k), (1,), z/k), k, oo) == exp(z)
@XFAIL
def test_T10():
# raises PoleError should return euler-mascheroni constant
limit(zeta(x) - 1/(x - 1), x, 1)
@XFAIL
def test_T11():
n, k = symbols('n k', integer=True, positive=True)
# raises NotImplementedError
assert limit(n**x/(x*product((1 + x/k), (k, 1, n))), n, oo) == gamma(x)
@XFAIL
def test_T12():
x, t = symbols('x t', real=True)
# raises PoleError: Don't know how to calculate the
# limit(sqrt(pi)*x*erf(x)/(2*(1 - exp(-x**2))), x, 0, dir=+)
assert limit(x * integrate(exp(-t**2), (t, 0, x))/(1 - exp(-x**2)),
x, 0) == 1
def test_T13():
x = symbols('x', real=True)
assert [limit(x/abs(x), x, 0, dir='-'),
limit(x/abs(x), x, 0, dir='+')] == [-1, 1]
def test_T14():
x = symbols('x', real=True)
assert limit(atan(-log(x)), x, 0, dir='+') == pi/2
def test_U1():
x = symbols('x', real=True)
assert diff(abs(x), x) == sign(x)
def test_U2():
f = Lambda(x, Piecewise((-x, x < 0), (x, x >= 0)))
assert diff(f(x), x) == Piecewise((-1, x < 0), (1, x >= 0))
def test_U3():
f = Lambda(x, Piecewise((x**2 - 1, x == 1), (x**3, x != 1)))
f1 = Lambda(x, diff(f(x), x))
assert f1(x) == 3*x**2
assert f1(1) == 3
@XFAIL
def test_U4():
n = symbols('n', integer=True, positive=True)
x = symbols('x', real=True)
diff(x**n, x, n)
assert diff(x**n, x, n).rewrite(factorial) == factorial(n)
@XFAIL
def test_U5():
# https://code.google.com/p/sympy/issues/detail?id=3582
# f(g(x)).diff(x,2) returns Derivative(g(x), x)**2*Subs(Derivative(
# f(_xi_1), _xi_1, _xi_1), (_xi_1,), (g(x),)) + Derivative(g(x), x, x)*
# Subs(Derivative(f(_xi_1), _xi_1), (_xi_1,), (g(x),))
raise NotImplementedError("f(g(t)).diff(t,2) Subs not performed")
@XFAIL
def test_U6():
h = Function('h')
# raises ValueError: Invalid limits given: (y, h(x), g(x))
T = integrate(f(y), y, h(x), g(x))
T.diff(x)
@XFAIL
def test_U7():
p, t = symbols('p t', real=True)
# Exact differential => d(V(P, T)) => dV/dP DP + dV/dT DT
# raises ValueError: Since there is more than one variable in the
# expression, the variable(s) of differentiation must be supplied to
# differentiate f(p,t)
diff(f(p, t))
def test_U8():
x, y = symbols('x y', real=True)
eq = cos(x*y) + x
eq = eq.subs(y, f(x))
# If SymPy had implicit_diff() function this hack could be avoided
assert (solve((f(x) - eq).diff(x), f(x).diff(x))[0].subs(f(x), y) ==
(-y*sin(x*y) + 1)/(x*sin(x*y) + 1))
@XFAIL
def test_U9():
# Wester sample case for Maple:
# O29 := diff(f(x, y), x) + diff(f(x, y), y);
# /d \ /d \
# |-- f(x, y)| + |-- f(x, y)|
# \dx / \dy /
#
# O30 := factor(subs(f(x, y) = g(x^2 + y^2), %));
# 2 2
# 2 D(g)(x + y ) (x + y)
x, y = symbols('x y', real=True)
su = diff(f(x, y), x) + diff(f(x, y), y)
s2 = Subs(su, f(x, y), g(x**2 + y**2)).doit()
s3 = s2.doit().factor()
# Subs not performed, s3 = 2*(x + y)*Subs(Derivative(
# g(_xi_1), _xi_1), (_xi_1,), (x**2 + y**2,))
# Derivative(g(x*2 + y**2), x**2 + y**2) is not valid in SymPy,
# and probably will remain that way. You can take derivatives with respect
# to other expressions only if they are atomic, like a symbol or a
# function.
# D operator should be added to SymPy
# See https://code.google.com/p/sympy/issues/detail?id=1620.
# raises ValueError: Can't differentiate wrt the variable: x**2 + y**2
assert s3 == 2*(x + y)*Derivative(g(x**2 + y**2), x**2 + y**2)
@XFAIL
def test_U10():
z = symbols('z')
# returns wrong value-3/4 . problem seems to come from series expansion
assert residue((z**3 + 5)/((z**4 - 1)*(z + 1)), z, -1) == Rational(-9, 4)
def test_U11():
(dx, dy, dz) = MV.setup('dx dy dz')
# answer is correct, but SymPy doc does not indicate how/if differential
# forms are supported
assert (2*dx + dz) ^ (3*dx + dy + dz) ^ (dx + dy + 4*dz) == 8*dx ^ dy ^dz
@XFAIL
def test_U12():
# Wester sample case:
# (c41) /* d(3 x^5 dy /\ dz + 5 x y^2 dz /\ dx + 8 z dx /\ dy)
# => (15 x^4 + 10 x y + 8) dx /\ dy /\ dz */
# factor(ext_diff(3*x^5 * dy ~ dz + 5*x*y^2 * dz ~ dx + 8*z * dx ~ dy));
# 4
# (d41) (10 x y + 15 x + 8) dx dy dz
raise NotImplementedError(
"External diff of differential form not supported")
@XFAIL
def test_U13():
#assert minimize(x**4 - x + 1, x)== -3*2**Rational(1,3)/8 + 1
raise NotImplementedError("minimize() not supported")
@XFAIL
def test_U14():
#f = 1/(x**2 + y**2 + 1)
#assert [minimize(f), maximize(f)] == [0,1]
raise NotImplementedError("minimize(), maximize() not supported")
@XFAIL
def test_U15():
raise NotImplementedError("minimize() not supported and also solve does \
not support multivariate inequalities")
@XFAIL
def test_U16():
raise NotImplementedError("minimize() not supported in SymPy and also \
solve does not support multivariate inequalities")
@XFAIL
def test_U17():
raise NotImplementedError("Linear programming, symbolic simplex not \
supported in SymPy")
@XFAIL
def test_V1():
x = symbols('x', real=True)
# integral not calculated
# https://code.google.com/p/sympy/issues/detail?id=1113
assert integrate(abs(x), x) == x*abs(x)/2
def test_V2():
assert (integrate(Piecewise((-x, x < 0), (x, x >= 0)), x) ==
Piecewise((-x**2/2, x < 0), (x**2/2, x >= 0)))
def test_V3():
assert integrate(1/(x**3 + 2),x).diff().simplify() == 1/(x**3 + 2)
@XFAIL
def test_V4():
assert integrate(2**x/sqrt(1 + 4**x), x) == asinh(2**x)/log(2)
@XFAIL
@slow
def test_V5():
# Takes extremely long time
# https://code.google.com/p/sympy/issues/detail?id=4050
assert (integrate((3*x - 5)**2/(2*x - 1)**(Rational(7, 2)), x) ==
(-41 + 80*x - 45*x**2)/(5*(2*x - 1)**Rational(5, 2)))
@XFAIL
def test_V6():
# returns RootSum(40*_z**2 - 1, Lambda(_i, _i*log(-4*_i + exp(-m*x))))/m
assert (integrate(1/(2*exp(m*x) - 5*exp(-m*x)), x) == sqrt(10)*(
log(2*exp(m*x) - sqrt(10)) - log(2*exp(m*x) + sqrt(10)))/(20*m))
def test_V7():
r1 = integrate(sinh(x)**4/cosh(x)**2)
assert r1.simplify() == -3*x/2 + sinh(x)**3/(2*cosh(x)) + 3*tanh(x)/2
@XFAIL
def test_V8_V9():
#Macsyma test case:
#(c27) /* This example involves several symbolic parameters
# => 1/sqrt(b^2 - a^2) log([sqrt(b^2 - a^2) tan(x/2) + a + b]/
# [sqrt(b^2 - a^2) tan(x/2) - a - b]) (a^2 < b^2)
# [Gradshteyn and Ryzhik 2.553(3)] */
#assume(b^2 > a^2)$
#(c28) integrate(1/(a + b*cos(x)), x);
#(c29) trigsimp(ratsimp(diff(%, x)));
# 1
#(d29) ------------
# b cos(x) + a
raise NotImplementedError(
"Integrate with assumption not supported")
def test_V10():
assert integrate(1/(3 + 3*cos(x) + 4*sin(x)), x) == log(tan(x/2) + 3/4)/4
def test_V11():
# x = symbols('x', real=True)
r1 = integrate(1/(4 + 3*cos(x) + 4*sin(x)), x)
r2 = factor(r1)
assert (logcombine(r2, force=True) ==
log(((tan(x/2) + 1)/(tan(x/2) + 7))**(1/3)))
@XFAIL
def test_V12():
r1 = integrate(1/(5 + 3*cos(x) + 4*sin(x)), x)
# Correct result in python2.7.4 wrong result in python3.3.1
# https://code.google.com/p/sympy/issues/detail?id=4058
assert r1 == -1/(tan(x/2) + 2)
@XFAIL
def test_V13():
r1 = integrate(1/(6 + 3*cos(x) + 4*sin(x)), x)
# expression not simplified, returns: -sqrt(11)*I*log(tan(x/2) + 4/3
# - sqrt(11)*I/3)/11 + sqrt(11)*I*log(tan(x/2) + 4/3 + sqrt(11)*I/3)/11
assert r1.simplify() == 2*sqrt(11)*atan(sqrt(11)*(3*tan(x/2) + 4)/11)/11
@XFAIL
def test_V14():
r1 = integrate(log(abs(x**2 - y**2)), x)
# I.simplify() raises AttributeError
# https://code.google.com/p/sympy/issues/detail?id=4059
assert (r1.simplify() == x*log(abs(x**2 - y**2))
+ y*log(x + y) - y*log(x - y) - 2*x)
def test_V15():
r1 = integrate(x*acot(x/y), x)
assert simplify(r1 - (x*y + (x**2 + y**2)*acot(x/y))/2) == 0
@XFAIL
def test_V16():
# test case in Mathematica syntax:
# In[53]:= Integrate[Cos[5*x]*CosIntegral[2*x], x]
# CosIntegral[2 x] Sin[5 x] -SinIntegral[3 x] - SinIntegral[7 x]
# Out[53]= ------------------------- + ------------------------------------
# 5 10
# cosine Integral function not supported
# http://reference.wolfram.com/mathematica/ref/CosIntegral.html
raise NotImplementedError("cosine integral function not supported")
@XFAIL
def test_V17():
r1 = integrate((diff(f(x), x)*g(x)
- f(x)*diff(g(x), x))/(f(x)**2 - g(x)**2), x)
# integral not calculated
assert simplify(r1 - (f(x) - g(x))/(f(x) + g(x))/2) == 0
@XFAIL
def test_W1():
# The function has a pole at y.
# The integral has a Cauchy principal value of zero but SymPy returns -I*pi
# https://code.google.com/p/sympy/issues/detail?id=4060
assert integrate(1/(x - y), (x, y - 1, y + 1)) == 0
@XFAIL
def test_W2():
# The function has a pole at y.
# The integral is divergent but SymPy returns -2
# https://code.google.com/p/sympy/issues/detail?id=4061
# Test case in Macsyma:
# (c6) errcatch(integrate(1/(x - a)^2, x, a - 1, a + 1));
# Integral is divergent
assert integrate(1/(x - y)**2, (x, y - 1, y + 1)) == zoo
@XFAIL
def test_W3():
# integral is not calculated
# https://code.google.com/p/sympy/issues/detail?id=4062
assert integrate(sqrt(x + 1/x - 2), (x, 0, 1)) == S(4)/3
@XFAIL
def test_W4():
# integral is not calculated
assert integrate(sqrt(x + 1/x - 2), (x, 1, 2)) == -2*sqrt(2)/3 + S(4)/3
@XFAIL
def test_W5():
# integral is not calculated
assert integrate(sqrt(x + 1/x - 2), (x, 0, 2)) == -2*sqrt(2)/3 + S(8)/3
@XFAIL
@slow
def test_W6():
# integral is not calculated
assert integrate(sqrt(2 - 2*cos(2*x))/2, (x, -3*pi/4, -pi/4)) == sqrt(2)
def test_W7():
a = symbols('a', real=True, positive=True)
r1 = integrate(cos(x)/(x**2 + a**2), (x, -oo, oo))
assert r1.simplify() == pi*exp(-a)/a
@XFAIL
def test_W8():
# Test case in Mathematica:
# In[19]:= Integrate[t^(a - 1)/(1 + t), {t, 0, Infinity},
# Assumptions -> 0 < a < 1]
# Out[19]= Pi Csc[a Pi]
raise NotImplementedError(
"Integrate with assumption 0 < a < 1 not supported")
@XFAIL
def test_W9():
# Integrand with a residue at infinity => -2 pi [sin(pi/5) + sin(2pi/5)]
# (principal value) [Levinson and Redheffer, p. 234] *)
r1 = integrate(5*x**3/(1 + x + x**2 + x**3 + x**4), (x, -oo, oo))
r2 = r1.doit()
assert r2 == -2*pi*(sqrt(-sqrt(5)/8 + 5/8) + sqrt(sqrt(5)/8 + 5/8))
@XFAIL
def test_W10():
# integrate(1/[1 + x + x^2 + ... + x^(2 n)], x = -infinity..infinity) =
# 2 pi/(2 n + 1) [1 + cos(pi/[2 n + 1])] csc(2 pi/[2 n + 1])
# [Levinson and Redheffer, p. 255] => 2 pi/5 [1 + cos(pi/5)] csc(2 pi/5) */
r1 = integrate(x/(1 + x + x**2 + x**4), (x, -oo, oo))
r2 = r1.doit()
assert r2 == 2*pi*(sqrt(5)/4 + 5/4)*csc(2*pi/5)/5
@XFAIL
def test_W11():
# integral not calculated
assert (integrate(sqrt(1 - x**2)/(1 + x**2), (x, -1, 1)) ==
pi*(-1 + sqrt(2)))
def test_W12():
p = symbols('p', real=True, positive=True)
q = symbols('q', real=True)
r1 = integrate(x*exp(-p*x**2 + 2*q*x), (x, -oo, oo))
assert r1.simplify() == sqrt(pi)*q*exp(q**2/p)/p**(3/2)
@XFAIL
def test_W13():
# Integral not calculated. Expected result is 2*(Euler_mascheroni_constant)
r1 = integrate(1/log(x) + 1/(1 - x) - log(log(1/x)), (x, 0, 1))
assert r1 == 2*EulerGamma
def test_W14():
assert integrate(sin(x)/x*exp(2*I*x), (x, -oo, oo)) == 0
@XFAIL
def test_W15():
# integral not calculated
assert integrate(log(gamma(x))*cos(6*pi*x), (x, 0, 1)) == S(1)/12
def test_W16():
assert integrate((1 + x)**3*legendre_poly(1, x)*legendre_poly(2, x),
(x, -1, 1)) == S(36)/35
def test_W17():
a, b = symbols('a b', real=True, positive=True)
assert integrate(exp(-a*x)*besselj(0, b*x),
(x, 0, oo)) == 1/(b*sqrt(a**2/b**2 + 1))
def test_W18():
assert integrate((besselj(1, x)/x)**2, (x, 0, oo)) == 4/(3*pi)
@XFAIL
def test_W19():
# integrate(cos_int(x)*bessel_j[0](2*sqrt(7*x)), x, 0, inf);
# Expected result is cos 7 - 1)/7 [Gradshteyn and Ryzhik 6.782(3)]
raise NotImplementedError("cosine integral function not supported")
@XFAIL
def test_W20():
# integral not calculated
assert (integrate(x**2*polylog(3, 1/(x + 1)), (x, 0, 1)) ==
-pi**2/36 - S(17)/108 + zeta(3)/4 +
(-pi**2/2 - 4*log(2) + log(2)**2 + 35/3)*log(2)/9)
def test_W21():
assert abs(N(integrate(x**2*polylog(3, 1/(x + 1)), (x, 0, 1)))
- 0.210882859565594) < 1e-15
def test_W22():
t, u = symbols('t u', real=True)
s = Lambda(x, Piecewise((1, And(x >= 1, x <= 2)), (0, True)))
assert (integrate(s(t)*cos(t), (t, 0, u)) ==
Piecewise((sin(u) - sin(1), And(u <= 2, u >= 1)),
(0, u <= 1),
(-sin(1) + sin(2), True)))
@XFAIL
@slow
def test_W23():
a, b = symbols('a b', real=True, positive=True)
r1 = integrate(integrate(x/(x**2 + y**2), (x, a, b)), (y, -oo, oo))
assert r1.simplify() == pi*(-a + b)
# integrate raises RuntimeError: maximum recursion depth exceeded
r2 = integrate(integrate(x/(x**2 + y**2), (y, -oo, oo)), (x, a, b))
assert r1 == r2
@XFAIL
@slow
def test_W24():
x, y = symbols('x y', real=True)
r1 = integrate(integrate(sqrt(x**2 + y**2), (x, 0, 1)), (y, 0, 1))
assert (r1 - (sqrt(2) + asinh(1))/3).simplify() == 0
@XFAIL
@slow
def test_W25():
a, x, y = symbols('a x y', real=True)
i1 = integrate(sin(a)*sin(y)/sqrt(1- sin(a)**2*sin(x)**2*sin(y)**2),
(x, 0, pi/2))
i2 = integrate(i1, (y, 0, pi/2))
assert (i2 - pi*a/2).simplify() == 0
@XFAIL
def test_W26():
x, y = symbols('x y', real=True)
# integrate(abs(y - x**2), (y,0,2)) raises ValueError: gamma function pole
# https://code.google.com/p/sympy/issues/detail?id=4066
assert integrate(integrate(abs(y - x**2), (y, 0, 2)),
(x, -1, 1)) == S(46)/15
def test_W27():
a, b, c = symbols('a b c')
assert integrate(integrate(integrate(1, (z, 0, c*(1 - x/a - y/b))),
(y, 0, b*(1 - x/a))),
(x, 0, a)) == a*b*c/6
def test_X1():
v, c = symbols('v c', real=True)
assert (series(1/sqrt(1 - (v/c)**2), v, x0=0, n=8) ==
5*v**6/(16*c**6) + 3*v**4/(8*c**4) + v**2/(2*c**2) + 1 + O(v**8))
def test_X2():
v, c = symbols('v c', real=True)
s1 = series(1/sqrt(1 - (v/c)**2), v, x0=0, n=8)
assert (1/s1**2).series(v, x0=0, n=8) == -v**2/c**2 + 1 + O(v**8)
def test_X3():
s1 = (sin(x).series()/cos(x).series()).series()
s2 = tan(x).series()
assert s2 == x + x**3/3 + 2*x**5/15 + O(x**6)
assert s1 == s2
def test_X4():
s1 = log(sin(x)/x).series()
assert s1 == -x**2/6 - x**4/180 + O(x**6)
assert log(series(sin(x)/x)).series() == s1
@XFAIL
def test_X5():
# test case in Mathematica syntax:
# In[21]:= (* => [a f'(a d) + g(b d) + integrate(h(c y), y = 0..d)]
# + [a^2 f''(a d) + b g'(b d) + h(c d)] (x - d) *)
# In[22]:= D[f[a*x], x] + g[b*x] + Integrate[h[c*y], {y, 0, x}]
# Out[22]= g[b x] + Integrate[h[c y], {y, 0, x}] + a f'[a x]
# In[23]:= Series[%, {x, d, 1}]
# Out[23]= (g[b d] + Integrate[h[c y], {y, 0, d}] + a f'[a d]) +
# 2 2
# (h[c d] + b g'[b d] + a f''[a d]) (-d + x) + O[-d + x]
h = Function('h')
a, b, c, d = symbols('a b c d', real=True)
# series() raises NotImplementedError:
# The _eval_nseries method should be added to <class
# 'sympy.core.function.Subs'> to give terms up to O(x**n) at x=0
series(diff(f(a*x), x) + g(b*x) + integrate(h(c*y), (y, 0, x)),
x, x0=d, n=2)
# assert missing, until exception is removed
def test_X6():
# Taylor series of nonscalar objects (noncommutative multiplication)
# expected result => (B A - A B) t^2/2 + O(t^3) [Stanly Steinberg]
a, b = symbols('a b', commutative=False, scalar=False)
assert (series(exp((a + b)*x) - exp(a*x) * exp(b*x), x, x0=0, n=3) ==
x**2*(-a*b/2 + b*a/2) + O(x**3))
def test_X7():
# => sum( Bernoulli[k]/k! x^(k - 2), k = 1..infinity )
# = 1/x^2 - 1/(2 x) + 1/12 - x^2/720 + x^4/30240 + O(x^6)
# [Levinson and Redheffer, p. 173]
assert (series(1/(x*(exp(x) - 1)), x, 0, 7) == x**(-2) - 1/(2*x) +
S(1)/12 - x**2/720 + x**4/30240 - x**6/1209600 + O(x**7))
@XFAIL
def test_X8():
# Puiseux series (terms with fractional degree):
# => 1/sqrt(x - 3/2 pi) + (x - 3/2 pi)^(3/2) / 12 + O([x - 3/2 pi]^(7/2))
x = symbols('x', real=True)
# raises PoleError: Cannot expand sec(_x + 3*pi/2) around 0
# https://code.google.com/p/sympy/issues/detail?id=4068
series(sqrt(sec(x)), x, x0=pi*3/2, n=4)
# assert missing, until exception is removed
def test_X9():
assert (series(x**x, x, x0=0, n=4) == 1 + x*log(x) + x**2*log(x)**2/2 +
x**3*log(x)**3/6 + O(x**4*log(x)**4))
def test_X10():
z, w = symbols('z w')
assert (series(log(sinh(z)) + log(cosh(z + w)), z, x0=0, n=2) ==
log(cosh(w)) + log(z) + z*sinh(w)/cosh(w) + O(z**2))
def test_X11():
z, w = symbols('z w')
assert (series(log(sinh(z) * cosh(z + w)), z, x0=0, n=2) ==
log(cosh(w)) + log(z) + z*sinh(w)/cosh(w) + O(z**2))
@XFAIL
def test_X12():
# Look at the generalized Taylor series around x = 1
# Result => (x - 1)^a/e^b [1 - (a + 2 b) (x - 1) / 2 + O((x - 1)^2)]
a, b, x = symbols('a b x', real=True)
# series returns O(log(x)**2)
# https://code.google.com/p/sympy/issues/detail?id=4069
assert (series(log(x)**a*exp(-b*x), x, x0=1, n=2) ==
(x - 1)**a/exp(b)*(1 - (a + 2*b)*(x - 1)/2 + O((x - 1)**2)))
def test_X13():
assert series(sqrt(2*x**2 + 1), x, x0=oo, n=1) == sqrt(2)*x + O(1/x, (x, oo))
@XFAIL
def test_X14():
# Wallis' product => 1/sqrt(pi n) + ... [Knopp, p. 385]
assert series(1/2**(2*n)*binomial(2*n, n),
n, x==oo, n=1) == 1/(sqrt(pi)*sqrt(n)) + O(1/x, (x, oo))
@XFAIL
def test_X15():
# => 0!/x - 1!/x^2 + 2!/x^3 - 3!/x^4 + O(1/x^5) [Knopp, p. 544]
x, t = symbols('x t', real=True)
# raises RuntimeError: maximum recursion depth exceeded
# https://code.google.com/p/sympy/issues/detail?id=4065
e1 = integrate(exp(-t)/t, (t, x, oo))
assert (series(e1, x, x0=oo, n=5) ==
6/x**4 + 2/x**3 - 1/x**2 + 1/x + O(x**(-5), (x, oo)))
def test_X16():
# Multivariate Taylor series expansion => 1 - (x^2 + 2 x y + y^2)/2 + O(x^4)
assert (series(cos(x + y), x + y, x0=0, n=4) == 1 - (x + y)**2/2 +
O(x**4 + x**3*y + x**2*y**2 + x*y**3 + y**4, x, y))
@XFAIL
def test_X17():
# Power series (compute the general formula)
# (c41) powerseries(log(sin(x)/x), x, 0);
# /aquarius/data2/opt/local/macsyma_422/library1/trgred.so being loaded.
# inf
# ==== i1 2 i1 2 i1
# \ (- 1) 2 bern(2 i1) x
# (d41) > ------------------------------
# / 2 i1 (2 i1)!
# ====
# i1 = 1
raise NotImplementedError("Formal power series not supported")
@XFAIL
def test_X18():
# Power series (compute the general formula). Maple FPS:
# > FormalPowerSeries(exp(-x)*sin(x), x = 0);
# infinity
# ----- (1/2 k) k
# \ 2 sin(3/4 k Pi) x
# ) -------------------------
# / k!
# -----
raise NotImplementedError("Formal power series not supported")
@XFAIL
def test_X19():
# (c45) /* Derive an explicit Taylor series solution of y as a function of
# x from the following implicit relation:
# y = x - 1 + (x - 1)^2/2 + 2/3 (x - 1)^3 + (x - 1)^4 +
# 17/10 (x - 1)^5 + ...
# */
# x = sin(y) + cos(y);
# Time= 0 msecs
# (d45) x = sin(y) + cos(y)
#
# (c46) taylor_revert(%, y, 7);
raise NotImplementedError("Solve using series not supported. \
Inverse Taylor series expansion also not supported")
@XFAIL
def test_X20():
# Pade (rational function) approximation => (2 - x)/(2 + x)
# > numapprox[pade](exp(-x), x = 0, [1, 1]);
# bytes used=9019816, alloc=3669344, time=13.12
# 1 - 1/2 x
# ---------
# 1 + 1/2 x
# mpmath support numeric Pade approximant but there is
# no symbolic implementation in SymPy
# http://en.wikipedia.org/wiki/Pad%C3%A9_approximant
raise NotImplementedError("Symbolic Pade approximant not supported")
@XFAIL
def test_X21():
# (c48) /* Fourier series of f(x) of period 2 p over the interval [-p, p]
# => - (2 p / pi) sum( (-1)^n sin(n pi x / p) / n, n = 1..infinity ) */
# assume(p > 0)$
# Time= 0 msecs
#
# (c49) fourier_series(x, x, p);
# /aquarius/data2/opt/local/macsyma_422/share/fourier.so being loaded.
# (e49) a = 0
# 0
#
# (e50) a = 0
# %nn
#
# %nn
# 2 (- 1) p
# (e51) b = - ------------
# %nn %pi %nn
#
# Time= 4540 msecs
# inf %nn %pi %nn x
# ==== (- 1) sin(---------)
# \ p
# 2 p > -----------------------
# / %nn
# ====
# %nn = 1
# (d51) - -----------------------------------
# %pi
raise NotImplementedError("Fourier series not supported")
@XFAIL
def test_X22():
# (c52) /* => p / 2
# - (2 p / pi^2) sum( [1 - (-1)^n] cos(n pi x / p) / n^2,
# n = 1..infinity ) */
# fourier_series(abs(x), x, p);
# p
# (e52) a = -
# 0 2
#
# %nn
# (2 (- 1) - 2) p
# (e53) a = ------------------
# %nn 2 2
# %pi %nn
#
# (e54) b = 0
# %nn
#
# Time= 5290 msecs
# inf %nn %pi %nn x
# ==== (2 (- 1) - 2) cos(---------)
# \ p
# p > -------------------------------
# / 2
# ==== %nn
# %nn = 1 p
# (d54) ----------------------------------------- + -
# 2 2
# %pi
raise NotImplementedError("Fourier series not supported")
def test_Y1():
t = symbols('t', real=True, positive=True)
w = symbols('w', real=True)
s = symbols('s')
F, _, _ = laplace_transform(cos((w - 1)*t), t, s)
assert F == s/(s**2 + (w - 1)**2)
def test_Y2():
t = symbols('t', real=True, positive=True)
w = symbols('w', real=True)
s = symbols('s')
f = inverse_laplace_transform(s/(s**2 + (w - 1)**2), s, t)
assert f == cos(t*abs(w - 1))
@XFAIL
def test_Y3():
t = symbols('t', real=True, positive=True)
w = symbols('w', real=True)
s = symbols('s')
F, _, _ = laplace_transform(sinh(w*t)*cosh(w*t), t, s)
assert F == w/(s**2 - 4*w**2)
def test_Y4():
t = symbols('t', real=True, positive=True)
s = symbols('s')
F, _, _ = laplace_transform(erf(3/sqrt(t)), t, s)
assert F == (1 - exp(-6*sqrt(s)))/s
@XFAIL
def test_Y5_Y6():
# Solve y'' + y = 4 [H(t - 1) - H(t - 2)], y(0) = 1, y'(0) = 0 where H is the
# Heaviside (unit step) function (the RHS describes a pulse of magnitude 4 and
# duration 1). See David A. Sanchez, Richard C. Allen, Jr. and Walter T.
# Kyner, _Differential Equations: An Introduction_, Addison-Wesley Publishing
# Company, 1983, p. 211. First, take the Laplace transform of the ODE
# => s^2 Y(s) - s + Y(s) = 4/s [e^(-s) - e^(-2 s)]
# where Y(s) is the Laplace transform of y(t)
t = symbols('t', real=True, positive=True)
s = symbols('s')
y = Function('y')
F, _, _ = laplace_transform(diff(y(t), t, 2)
+ y(t)
- 4*(Heaviside(t - 1)
- Heaviside(t - 2)), t, s)
# Laplace transform for diff() not calculated
# https://code.google.com/p/sympy/issues/detail?id=4077
assert (F == s**2*LaplaceTransform(y(t), t, s) - s
+ LaplaceTransform(y(t), t, s) - 4*exp(-s)/s + 4*exp(-2*s)/s)
# TODO implement second part of test case
# Now, solve for Y(s) and then take the inverse Laplace transform
# => Y(s) = s/(s^2 + 1) + 4 [1/s - s/(s^2 + 1)] [e^(-s) - e^(-2 s)]
# => y(t) = cos t + 4 {[1 - cos(t - 1)] H(t - 1) - [1 - cos(t - 2)] H(t - 2)}
@XFAIL
def test_Y7():
# What is the Laplace transform of an infinite square wave?
# => 1/s + 2 sum( (-1)^n e^(- s n a)/s, n = 1..infinity )
# [Sanchez, Allen and Kyner, p. 213]
t = symbols('t', real=True, positive=True)
a = symbols('a', real=True)
s = symbols('s')
F, _, _ = laplace_transform(1 + 2*Sum((-1)**n*Heaviside(t - n*a),
(n, 1, oo)), t, s)
# returns 2*LaplaceTransform(Sum((-1)**n*Heaviside(-a*n + t),
# (n, 1, oo)), t, s) + 1/s
# https://code.google.com/p/sympy/issues/detail?id=4078
assert F == 2*Sum((-1)**n*exp(-a*n*s)/s, (n, 1, oo)) + 1/s
@XFAIL
def test_Y8():
assert fourier_transform(1, x, z) == DiracDelta(z)
def test_Y9():
assert (fourier_transform(exp(-9*x**2), x, z) ==
sqrt(pi)*exp(-pi**2*z**2/9)/3)
def test_Y10():
assert (fourier_transform(abs(x)*exp(-3*abs(x)), x, z) ==
(-8*pi**2*z**2 + 18)/(16*pi**4*z**4 + 72*pi**2*z**2 + 81))
@XFAIL
@slow
def test_Y11():
# => pi cot(pi s) (0 < Re s < 1) [Gradshteyn and Ryzhik 17.43(5)]
x, s = symbols('x s')
# raises RuntimeError: maximum recursion depth exceeded
# https://code.google.com/p/sympy/issues/detail?id=4082
F, _, _ = mellin_transform(1/(1 - x), x, s)
assert F == pi*cot(pi*s)
@XFAIL
def test_Y12():
# => 2^(s - 4) gamma(s/2)/gamma(4 - s/2) (0 < Re s < 1)
# [Gradshteyn and Ryzhik 17.43(16)]
x, s = symbols('x s')
# returns Wrong value -2**(s - 4)*gamma(s/2 - 3)/gamma(-s/2 + 1)
# https://code.google.com/p/sympy/issues/detail?id=4083
F, _, _ = mellin_transform(besselj(3, x)/x**3, x, s)
assert F == -2**(s - 4)*gamma(s/2)/gamma(-s/2 + 4)
@XFAIL
def test_Y13():
# Z[H(t - m T)] => z/[z^m (z - 1)] (H is the Heaviside (unit step) function) z
raise NotImplementedError("z-transform not supported")
@XFAIL
def test_Y14():
# Z[H(t - m T)] => z/[z^m (z - 1)] (H is the Heaviside (unit step) function)
raise NotImplementedError("z-transform not supported")
def test_Z1():
r = Function('r')
assert (rsolve(r(n + 2) - 2*r(n + 1) + r(n) - 2, r(n),
{r(0): 1, r(1): m}).simplify() == n**2 + n*(m - 2) + 1)
def test_Z2():
r = Function('r')
assert (rsolve(r(n) - (5*r(n - 1) - 6*r(n - 2)), r(n), {r(0): 0, r(1): 1})
== -2**n + 3**n)
def test_Z3():
# => r(n) = Fibonacci[n + 1] [Cohen, p. 83]
r = Function('r')
# recurrence solution is correct, Wester expects it to be simplified to
# fibonacci(n+1), but that is quite hard
assert (rsolve(r(n) - (r(n - 1) + r(n - 2)), r(n),
{r(1): 1, r(2): 2}).simplify()
== 2**(-n)*((1 + sqrt(5))**n*(sqrt(5) + 5) +
(-sqrt(5) + 1)**n*(-sqrt(5) + 5))/10)
@XFAIL
def test_Z4():
# => [c^(n+1) [c^(n+1) - 2 c - 2] + (n+1) c^2 + 2 c - n] / [(c-1)^3 (c+1)]
# [Joan Z. Yu and Robert Israel in sci.math.symbolic]
r = Function('r')
c = symbols('c')
# raises ValueError: Polynomial or rational function expected,
# got '(c**2 - c**n)/(c - c**n)
s = rsolve(r(n) - ((1 + c - c**(n-1) - c**(n+1))/(1 - c**n)*r(n - 1)
- c*(1 - c**(n-2))/(1 - c**(n-1))*r(n - 2) + 1),
r(n), {r(1): 1, r(2): (2 + 2*c + c**2)/(1 + c)})
assert (s - (c*(n + 1)*(c*(n + 1) - 2*c - 2) +
(n + 1)*c**2 + 2*c - n)/((c-1)**3*(c+1)) == 0)
@XFAIL
def test_Z5():
# Second order ODE with initial conditions---solve directly
# transform: f(t) = sin(2 t)/8 - t cos(2 t)/4
C1, C2 = symbols('C1 C2')
# initial conditions not supported, this is a manual workaround
# https://code.google.com/p/sympy/issues/detail?id=1621
eq = Derivative(f(x), x, 2) + 4*f(x) - sin(2*x)
sol = dsolve(eq, f(x))
f0 = Lambda(x, sol.rhs)
assert f0(x) == C2*sin(2*x) + (C1 - x/4)*cos(2*x)
f1 = Lambda(x, diff(f0(x), x))
const_dict = solve((f0(0), f1(0)))
result = f0(x).subs(C1, const_dict[C1]).subs(C2, const_dict[C2])
assert result == -x*cos(2*x)/4 + sin(2*x)/8
# Result is OK, but ODE solving with initial conditions should be
# supported without all this manual work
raise NotImplementedError('ODE solving with initial conditions \
not supported')
@XFAIL
def test_Z6():
# Second order ODE with initial conditions---solve using Laplace
# transform: f(t) = sin(2 t)/8 - t cos(2 t)/4
t = symbols('t', real=True, positive=True)
s = symbols('s')
eq = Derivative(f(t), t, 2) + 4*f(t) - sin(2*t)
F, _, _ = laplace_transform(eq, t, s)
# Laplace transform for diff() not calculated
# https://code.google.com/p/sympy/issues/detail?id=4077
assert (F == s**2*LaplaceTransform(f(t), t, s) +
4*LaplaceTransform(f(t), t, s) - 2/(s**2 + 4))
# rest of test case not implemented
| 30.224545 | 285 | 0.485008 |
74bace823b9ce189ed4b74d75c92b13308e32dd9 | 3,511 | py | Python | GetStarted/07_image_statistics.py | pberezina/earthengine-py-notebooks | 4cbe3c52bcc9ed3f1337bf097aa5799442991a5e | [
"MIT"
] | 1 | 2020-03-20T19:39:34.000Z | 2020-03-20T19:39:34.000Z | GetStarted/07_image_statistics.py | pberezina/earthengine-py-notebooks | 4cbe3c52bcc9ed3f1337bf097aa5799442991a5e | [
"MIT"
] | null | null | null | GetStarted/07_image_statistics.py | pberezina/earthengine-py-notebooks | 4cbe3c52bcc9ed3f1337bf097aa5799442991a5e | [
"MIT"
] | null | null | null | '''
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/GetStarted/07_image_statistics.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/GetStarted/07_image_statistics.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=GetStarted/07_image_statistics.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/GetStarted/07_image_statistics.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
'''
# %%
'''
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The following script checks if the geehydro package has been installed. If not, it will install geehydro, which automatically install its dependencies, including earthengine-api and folium.
'''
# %%
import subprocess
try:
import geehydro
except ImportError:
print('geehydro package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geehydro'])
# %%
'''
Import libraries
'''
# %%
import ee
import folium
import geehydro
# %%
'''
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once.
'''
# %%
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
# %%
'''
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
'''
# %%
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
# %%
'''
## Add Earth Engine Python script
'''
# %%
# Load and display a Landsat TOA image.
image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318')
Map.addLayer(image, {'bands': ['B4', 'B3', 'B2'], 'max': 0.3}, 'Landsat 8')
# Create an arbitrary rectangle as a region and display it.
region = ee.Geometry.Rectangle(-122.2806, 37.1209, -122.0554, 37.2413)
Map.centerObject(ee.FeatureCollection(region), 13)
Map.addLayer(ee.Image().paint(region, 0, 2), {}, 'Region')
# Get a dictionary of means in the region. Keys are bandnames.
mean = image.reduceRegion(**{
'reducer': ee.Reducer.mean(),
'geometry': region,
'scale': 30
})
print(mean.getInfo())
# %%
'''
## Display Earth Engine data layers
'''
# %%
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map | 36.572917 | 422 | 0.72458 |
b29a22b0604a2cd8de1df862748ea730f5b9e8c6 | 12,593 | py | Python | {{cookiecutter.project_slug}}/config/settings/production.py | gogobody/cookiecutter-django | 9cd0ee0272f99e2e2619d98c1cb975a74a557400 | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_slug}}/config/settings/production.py | gogobody/cookiecutter-django | 9cd0ee0272f99e2e2619d98c1cb975a74a557400 | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_slug}}/config/settings/production.py | gogobody/cookiecutter-django | 9cd0ee0272f99e2e2619d98c1cb975a74a557400 | [
"BSD-3-Clause"
] | null | null | null | import logging
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env('DJANGO_SECRET_KEY')
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['{{ cookiecutter.domain_name }}'])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES['default'] = env.db('DATABASE_URL') # noqa F405
DATABASES['default']['ATOMIC_REQUESTS'] = True # noqa F405
DATABASES['default']['CONN_MAX_AGE'] = env.int('CONN_MAX_AGE', default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': f'{env("REDIS_URL", default="redis://127.0.0.1:6379")}/{0}',
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
'IGNORE_EXCEPTIONS': True,
}
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool('DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool('DJANGO_SECURE_HSTS_PRELOAD', default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool('DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = 'DENY'
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ['storages'] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_AUTO_CREATE_BUCKET = True
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': f'max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate',
}
# STATIC
# ------------------------
{% if cookiecutter.use_whitenoise == 'y' -%}
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
{%- else %}
STATICFILES_STORAGE = 'config.settings.production.StaticRootS3BotoStorage'
STATIC_URL = 'https://s3.amazonaws.com/%s/static/' % AWS_STORAGE_BUCKET_NAME
{%- endif %}
# MEDIA
# ------------------------------------------------------------------------------
{% if cookiecutter.use_whitenoise == 'y' -%}
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
MEDIA_URL = f'https://s3.amazonaws.com/{AWS_STORAGE_BUCKET_NAME}/'
{%- else %}
# region http://stackoverflow.com/questions/10390244/
from storages.backends.s3boto3 import S3Boto3Storage
StaticRootS3BotoStorage = lambda: S3Boto3Storage(location='static') # noqa
MediaRootS3BotoStorage = lambda: S3Boto3Storage(location='media', file_overwrite=False) # noqa
# endregion
DEFAULT_FILE_STORAGE = 'config.settings.production.MediaRootS3BotoStorage'
MEDIA_URL = f'https://s3.amazonaws.com/{AWS_STORAGE_BUCKET_NAME}/media/'
{%- endif %}
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]['OPTIONS']['loaders'] = [ # noqa F405
(
'django.template.loaders.cached.Loader',
[
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
),
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
'DJANGO_DEFAULT_FROM_EMAIL',
default='{{cookiecutter.project_name}} <noreply@{{cookiecutter.domain_name}}>'
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[{{cookiecutter.project_name}}]')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ['anymail'] # noqa F405
EMAIL_BACKEND = 'anymail.backends.mailgun.EmailBackend'
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
'MAILGUN_API_KEY': env('MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_DOMAIN')
}
# Gunicorn
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['gunicorn'] # noqa F405
{% if cookiecutter.use_whitenoise == 'y' -%}
# WhiteNoise
# ------------------------------------------------------------------------------
# http://whitenoise.evans.io/en/latest/django.html#enable-whitenoise
MIDDLEWARE = ['whitenoise.middleware.WhiteNoiseMiddleware'] + MIDDLEWARE # noqa F405
{%- endif %}
{% if cookiecutter.use_compressor == 'y' -%}
# django-compressor
# ------------------------------------------------------------------------------
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_ENABLED
COMPRESS_ENABLED = env.bool('COMPRESS_ENABLED', default=True)
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_STORAGE
COMPRESS_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_URL
COMPRESS_URL = STATIC_URL
{%- endif %}
{% if cookiecutter.use_whitenoise == 'n' -%}
# Collectfast
# ------------------------------------------------------------------------------
# https://github.com/antonagestam/collectfast#installation
INSTALLED_APPS = ['collectfast'] + INSTALLED_APPS # noqa F405
AWS_PRELOAD_METADATA = True
{%- endif %}
{% if cookiecutter.use_sentry_for_error_reporting == 'y' -%}
# raven
# ------------------------------------------------------------------------------
# https://docs.sentry.io/clients/python/integrations/django/
INSTALLED_APPS += ['raven.contrib.django.raven_compat'] # noqa F405
MIDDLEWARE = ['raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware'] + MIDDLEWARE
# Sentry
# ------------------------------------------------------------------------------
SENTRY_DSN = env('DJANGO_SENTRY_DSN')
SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT', default='raven.contrib.django.raven_compat.DjangoClient')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'sentry'],
'propagate': False,
},
},
}
SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
RAVEN_CONFIG = {
'CELERY_LOGLEVEL': env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO),
'DSN': SENTRY_DSN
}
{%- else %}
# LOGGING
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins bon every HTTP 500 error when DEBUG=False.
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
{%- endif %}
{% if cookiecutter.use_opbeat == 'y' -%}
# opbeat
# ------------------------------------------------------------------------------
# https://opbeat.com/docs/articles/get-started-with-django/#setup
INSTALLED_APPS += ['opbeat.contrib.django'] # noqa F405
# https://opbeat.com/docs/articles/get-started-with-django/#setup
OPBEAT = {
'ORGANIZATION_ID': env('DJANGO_OPBEAT_ORGANIZATION_ID'),
'APP_ID': env('DJANGO_OPBEAT_APP_ID'),
'SECRET_TOKEN': env('DJANGO_OPBEAT_SECRET_TOKEN')
}
# https://opbeat.com/docs/articles/get-started-with-django/#performance-metrics
MIDDLEWARE = ['opbeat.contrib.django.middleware.OpbeatAPMMiddleware'] + MIDDLEWARE
{%- endif %}
# Your stuff...
# ------------------------------------------------------------------------------
| 40.622581 | 106 | 0.605813 |
050db85bab7a58f60a65d97afcab0b790e19b6df | 2,345 | py | Python | nbviewer/tests/test_format_slides.py | SylvainCorlay/nbviewer | 3e187a7531060c924991f05555026142a5e53996 | [
"BSD-3-Clause-Clear"
] | 1 | 2020-04-01T07:17:27.000Z | 2020-04-01T07:17:27.000Z | nbviewer/tests/test_format_slides.py | SylvainCorlay/nbviewer | 3e187a7531060c924991f05555026142a5e53996 | [
"BSD-3-Clause-Clear"
] | null | null | null | nbviewer/tests/test_format_slides.py | SylvainCorlay/nbviewer | 3e187a7531060c924991f05555026142a5e53996 | [
"BSD-3-Clause-Clear"
] | 2 | 2017-04-09T08:18:58.000Z | 2019-10-13T13:01:26.000Z | #-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import requests
from .base import NBViewerTestCase
from ..providers.local.tests.test_localfile import LocalFileDefaultTestCase
class SlidesGistTestCase(NBViewerTestCase):
def test_gist(self):
url = self.url('/format/slides/0c5b3639b10ed3d7cc85/single-cell.ipynb')
r = requests.get(url)
self.assertEqual(r.status_code, 200)
html = r.content
self.assertIn('reveal.js', html)
def test_html_exporter_link(self):
url = self.url('/format/slides/0c5b3639b10ed3d7cc85/single-cell.ipynb')
r = requests.get(url)
self.assertEqual(r.status_code, 200)
html = r.content
self.assertIn('/gist/minrk/0c5b3639b10ed3d7cc85/single-cell.ipynb', html)
self.assertNotIn('//gist/minrk/0c5b3639b10ed3d7cc85/single-cell.ipynb', html)
def test_no_slides_exporter_link(self):
url = self.url('/0c5b3639b10ed3d7cc85/single-cell.ipynb')
r = requests.get(url)
self.assertEqual(r.status_code, 200)
html = r.content
self.assertNotIn(
'/format/slides/gist/minrk/7518294/Untitled0.ipynb',
html
)
class SlideLocalFileDefaultTestCase(LocalFileDefaultTestCase):
def test_slides_local(self):
## assumes being run from base of this repo
url = self.url('format/slides/localfile/nbviewer/tests/notebook.ipynb')
r = requests.get(url)
self.assertEqual(r.status_code, 200)
html = r.content
self.assertIn('reveal.js', html)
class SlidesGitHubTestCase(NBViewerTestCase):
def ipython_example(self, *parts, **kwargs):
ref = kwargs.get('ref', 'rel-2.0.0')
return self.url(
'/format/slides/github/ipython/ipython/blob/%s/examples' % ref,
*parts
)
def test_github(self):
url = self.ipython_example('Index.ipynb')
r = requests.get(url)
self.assertEqual(r.status_code, 200)
html = r.content
self.assertIn('reveal.js', html)
| 36.076923 | 85 | 0.620469 |
85909567e8818eab96dbc4e5e63d79959e7db37b | 23,749 | py | Python | yt/frontends/art/io.py | cevans216/yt | c19c3c615b996c8a6e418362ffea9041a616d673 | [
"BSD-3-Clause-Clear"
] | null | null | null | yt/frontends/art/io.py | cevans216/yt | c19c3c615b996c8a6e418362ffea9041a616d673 | [
"BSD-3-Clause-Clear"
] | null | null | null | yt/frontends/art/io.py | cevans216/yt | c19c3c615b996c8a6e418362ffea9041a616d673 | [
"BSD-3-Clause-Clear"
] | null | null | null | import os
import os.path
from collections import defaultdict
from functools import partial
import numpy as np
from yt.frontends.art.definitions import (
hydro_struct,
particle_fields,
particle_star_fields,
star_struct,
)
from yt.units.yt_array import YTArray, YTQuantity
from yt.utilities.fortran_utils import read_vector, skip
from yt.utilities.io_handler import BaseIOHandler
from yt.utilities.lib.geometry_utils import compute_morton
from yt.utilities.logger import ytLogger as mylog
class IOHandlerART(BaseIOHandler):
_dataset_type = "art"
tb, ages = None, None
cache = None
masks = None
caching = True
def __init__(self, *args, **kwargs):
self.cache = {}
self.masks = {}
super(IOHandlerART, self).__init__(*args, **kwargs)
self.ws = self.ds.parameters["wspecies"]
self.ls = self.ds.parameters["lspecies"]
self.file_particle = self.ds._file_particle_data
self.file_stars = self.ds._file_particle_stars
self.Nrow = self.ds.parameters["Nrow"]
def _read_fluid_selection(self, chunks, selector, fields, size):
# Chunks in this case will have affiliated domain subset objects
# Each domain subset will contain a hydro_offset array, which gives
# pointers to level-by-level hydro information
tr = defaultdict(list)
cp = 0
for chunk in chunks:
for subset in chunk.objs:
# Now we read the entire thing
f = open(subset.domain.ds._file_amr, "rb")
# This contains the boundary information, so we skim through
# and pick off the right vectors
rv = subset.fill(f, fields, selector)
for ft, f in fields:
d = rv.pop(f)
mylog.debug(
"Filling %s with %s (%0.3e %0.3e) (%s:%s)",
f,
d.size,
d.min(),
d.max(),
cp,
cp + d.size,
)
tr[(ft, f)].append(d)
cp += d.size
d = {}
for field in fields:
d[field] = np.concatenate(tr.pop(field))
return d
def _get_mask(self, selector, ftype):
key = (selector, ftype)
if key in self.masks.keys() and self.caching:
return self.masks[key]
pstr = "particle_position_%s"
x, y, z = [self._get_field((ftype, pstr % ax)) for ax in "xyz"]
mask = selector.select_points(x, y, z, 0.0)
if self.caching:
self.masks[key] = mask
return self.masks[key]
else:
return mask
def _read_particle_coords(self, chunks, ptf):
chunks = list(chunks)
for chunk in chunks:
for ptype, field_list in sorted(ptf.items()):
x = self._get_field((ptype, "particle_position_x"))
y = self._get_field((ptype, "particle_position_y"))
z = self._get_field((ptype, "particle_position_z"))
yield ptype, (x, y, z)
def _read_particle_fields(self, chunks, ptf, selector):
chunks = list(chunks)
for chunk in chunks:
for ptype, field_list in sorted(ptf.items()):
x = self._get_field((ptype, "particle_position_x"))
y = self._get_field((ptype, "particle_position_y"))
z = self._get_field((ptype, "particle_position_z"))
mask = selector.select_points(x, y, z, 0.0)
if mask is None:
continue
for field in field_list:
data = self._get_field((ptype, field))
yield (ptype, field), data[mask]
def _get_field(self, field):
if field in self.cache.keys() and self.caching:
mylog.debug("Cached %s", str(field))
return self.cache[field]
mylog.debug("Reading %s", str(field))
tr = {}
ftype, fname = field
ptmax = self.ws[-1]
pbool, idxa, idxb = _determine_field_size(self.ds, ftype, self.ls, ptmax)
npa = idxb - idxa
sizes = np.diff(np.concatenate(([0], self.ls)))
rp = partial(
read_particles, self.file_particle, self.Nrow, idxa=idxa, idxb=idxb
)
for i, ax in enumerate("xyz"):
if fname.startswith("particle_position_%s" % ax):
dd = self.ds.domain_dimensions[0]
off = 1.0 / dd
tr[field] = rp(fields=[ax])[0] / dd - off
if fname.startswith("particle_velocity_%s" % ax):
(tr[field],) = rp(["v" + ax])
if fname.startswith("particle_mass"):
a = 0
data = np.zeros(npa, dtype="f8")
for ptb, size, m in zip(pbool, sizes, self.ws):
if ptb:
data[a : a + size] = m
a += size
tr[field] = data
elif fname == "particle_index":
tr[field] = np.arange(idxa, idxb)
elif fname == "particle_type":
a = 0
data = np.zeros(npa, dtype="int")
for i, (ptb, size) in enumerate(zip(pbool, sizes)):
if ptb:
data[a : a + size] = i
a += size
tr[field] = data
if pbool[-1] and fname in particle_star_fields:
data = read_star_field(self.file_stars, field=fname)
temp = tr.get(field, np.zeros(npa, "f8"))
nstars = self.ls[-1] - self.ls[-2]
if nstars > 0:
temp[-nstars:] = data
tr[field] = temp
if fname == "particle_creation_time":
self.tb, self.ages, data = interpolate_ages(
tr[field][-nstars:],
self.file_stars,
self.tb,
self.ages,
self.ds.current_time,
)
temp = tr.get(field, np.zeros(npa, "f8"))
temp[-nstars:] = data
tr[field] = temp
del data
# We check again, after it's been filled
if fname.startswith("particle_mass"):
# We now divide by NGrid in order to make this match up. Note that
# this means that even when requested in *code units*, we are
# giving them as modified by the ng value. This only works for
# dark_matter -- stars are regular matter.
tr[field] /= self.ds.domain_dimensions.prod()
if tr == {}:
tr = dict((f, np.array([])) for f in [field])
if self.caching:
self.cache[field] = tr[field]
return self.cache[field]
else:
return tr[field]
class IOHandlerDarkMatterART(IOHandlerART):
_dataset_type = "dm_art"
def _count_particles(self, data_file):
return {
k: self.ds.parameters["lspecies"][i]
for i, k in enumerate(self.ds.particle_types_raw)
}
def _initialize_index(self, data_file, regions):
totcount = 4096 ** 2 # file is always this size
count = data_file.ds.parameters["lspecies"][-1]
DLE = data_file.ds.domain_left_edge
DRE = data_file.ds.domain_right_edge
with open(data_file.filename, "rb") as f:
# The first total_particles * 3 values are positions
pp = np.fromfile(f, dtype=">f4", count=totcount * 3)
pp.shape = (3, totcount)
pp = pp[:, :count] # remove zeros
pp = np.transpose(pp).astype(
np.float32
) # cast as float32 for compute_morton
pp = (pp - 1.0) / data_file.ds.parameters[
"ng"
] # correct the dm particle units
regions.add_data_file(pp, data_file.file_id)
morton = compute_morton(pp[:, 0], pp[:, 1], pp[:, 2], DLE, DRE)
return morton
def _identify_fields(self, domain):
field_list = []
self.particle_field_list = [f for f in particle_fields]
for ptype in self.ds.particle_types_raw:
for pfield in self.particle_field_list:
pfn = (ptype, pfield)
field_list.append(pfn)
return field_list, {}
def _get_field(self, field):
if field in self.cache.keys() and self.caching:
mylog.debug("Cached %s", str(field))
return self.cache[field]
mylog.debug("Reading %s", str(field))
tr = {}
ftype, fname = field
ptmax = self.ws[-1]
pbool, idxa, idxb = _determine_field_size(self.ds, ftype, self.ls, ptmax)
npa = idxb - idxa
sizes = np.diff(np.concatenate(([0], self.ls)))
rp = partial(
read_particles, self.file_particle, self.Nrow, idxa=idxa, idxb=idxb
)
for i, ax in enumerate("xyz"):
if fname.startswith("particle_position_%s" % ax):
# This is not the same as domain_dimensions
dd = self.ds.parameters["ng"]
off = 1.0 / dd
tr[field] = rp(fields=[ax])[0] / dd - off
if fname.startswith("particle_velocity_%s" % ax):
(tr[field],) = rp(["v" + ax])
if fname.startswith("particle_mass"):
a = 0
data = np.zeros(npa, dtype="f8")
for ptb, size, m in zip(pbool, sizes, self.ws):
if ptb:
data[a : a + size] = m
a += size
tr[field] = data
elif fname == "particle_index":
tr[field] = np.arange(idxa, idxb)
elif fname == "particle_type":
a = 0
data = np.zeros(npa, dtype="int")
for i, (ptb, size) in enumerate(zip(pbool, sizes)):
if ptb:
data[a : a + size] = i
a += size
tr[field] = data
# We check again, after it's been filled
if fname.startswith("particle_mass"):
# We now divide by NGrid in order to make this match up. Note that
# this means that even when requested in *code units*, we are
# giving them as modified by the ng value. This only works for
# dark_matter -- stars are regular matter.
tr[field] /= self.ds.domain_dimensions.prod()
if tr == {}:
tr[field] = np.array([])
if self.caching:
self.cache[field] = tr[field]
return self.cache[field]
else:
return tr[field]
def _yield_coordinates(self, data_file):
for ptype in self.ds.particle_types_raw:
x = self._get_field((ptype, "particle_position_x"))
y = self._get_field((ptype, "particle_position_y"))
z = self._get_field((ptype, "particle_position_z"))
yield ptype, np.stack((x, y, z), axis=-1)
def _determine_field_size(pf, field, lspecies, ptmax):
pbool = np.zeros(len(lspecies), dtype="bool")
idxas = np.concatenate(([0,], lspecies[:-1]))
idxbs = lspecies
if "specie" in field:
index = int(field.replace("specie", ""))
pbool[index] = True
else:
raise RuntimeError
idxa, idxb = idxas[pbool][0], idxbs[pbool][-1]
return pbool, idxa, idxb
def interpolate_ages(
data, file_stars, interp_tb=None, interp_ages=None, current_time=None
):
if interp_tb is None:
t_stars, a_stars = read_star_field(file_stars, field="t_stars")
# timestamp of file should match amr timestamp
if current_time:
tdiff = YTQuantity(b2t(t_stars), "Gyr") - current_time.in_units("Gyr")
if np.abs(tdiff) > 1e-4:
mylog.info("Timestamp mismatch in star " + "particle header: %s", tdiff)
mylog.info("Interpolating ages")
interp_tb, interp_ages = b2t(data)
interp_tb = YTArray(interp_tb, "Gyr")
interp_ages = YTArray(interp_ages, "Gyr")
temp = np.interp(data, interp_tb, interp_ages)
return interp_tb, interp_ages, temp
def _read_art_level_info(
f, level_oct_offsets, level, coarse_grid=128, ncell0=None, root_level=None
):
pos = f.tell()
f.seek(level_oct_offsets[level])
# Get the info for this level, skip the rest
junk, nLevel, iOct = read_vector(f, "i", ">")
# fortran indices start at 1
# Skip all the oct index data
le = np.zeros((nLevel, 3), dtype="int64")
fl = np.ones((nLevel, 6), dtype="int64")
iocts = np.zeros(nLevel + 1, dtype="int64")
idxa, idxb = 0, 0
chunk = int(1e6) # this is ~111MB for 15 dimensional 64 bit arrays
left = nLevel
while left > 0:
this_chunk = min(chunk, left)
idxb = idxa + this_chunk
data = np.fromfile(f, dtype=">i", count=this_chunk * 15)
data = data.reshape(this_chunk, 15)
left -= this_chunk
le[idxa:idxb, :] = data[:, 1:4]
fl[idxa:idxb, 1] = np.arange(idxa, idxb)
# pad byte is last, LL2, then ioct right before it
iocts[idxa:idxb] = data[:, -3]
idxa = idxa + this_chunk
del data
# emulate fortran code
# do ic1 = 1 , nLevel
# read(19) (iOctPs(i,iOct),i=1,3),(iOctNb(i,iOct),i=1,6),
# & iOctPr(iOct), iOctLv(iOct), iOctLL1(iOct),
# & iOctLL2(iOct)
# iOct = iOctLL1(iOct)
# ioct always represents the index of the next variable
# not the current, so shift forward one index
# the last index isn't used
iocts[1:] = iocts[:-1] # shift
iocts = iocts[:nLevel] # chop off the last, unused, index
iocts[0] = iOct # starting value
# now correct iocts for fortran indices start @ 1
iocts = iocts - 1
assert np.unique(iocts).shape[0] == nLevel
# left edges are expressed as if they were on
# level 15, so no matter what level max(le)=2**15
# correct to the yt convention
# le = le/2**(root_level-1-level)-1
# try to find the root_level first
def cfc(root_level, level, le):
d_x = 1.0 / (2.0 ** (root_level - level + 1))
fc = (d_x * le) - 2 ** (level - 1)
return fc
if root_level is None:
root_level = np.floor(np.log2(le.max() * 1.0 / coarse_grid))
root_level = root_level.astype("int64")
for i in range(10):
fc = cfc(root_level, level, le)
go = np.diff(np.unique(fc)).min() < 1.1
if go:
break
root_level += 1
else:
fc = cfc(root_level, level, le)
unitary_center = fc / (coarse_grid * 2.0 ** (level - 1))
assert np.all(unitary_center < 1.0)
# again emulate the fortran code
# This is all for calculating child oct locations
# iC_ = iC + nbshift
# iO = ishft ( iC_ , - ndim )
# id = ishft ( 1, MaxLevel - iOctLv(iO) )
# j = iC_ + 1 - ishft( iO , ndim )
# Posx = d_x * (iOctPs(1,iO) + sign ( id , idelta(j,1) ))
# Posy = d_x * (iOctPs(2,iO) + sign ( id , idelta(j,2) ))
# Posz = d_x * (iOctPs(3,iO) + sign ( id , idelta(j,3) ))
# idelta = [[-1, 1, -1, 1, -1, 1, -1, 1],
# [-1, -1, 1, 1, -1, -1, 1, 1],
# [-1, -1, -1, -1, 1, 1, 1, 1]]
# idelta = np.array(idelta)
# if ncell0 is None:
# ncell0 = coarse_grid**3
# nchild = 8
# ndim = 3
# nshift = nchild -1
# nbshift = nshift - ncell0
# iC = iocts #+ nbshift
# iO = iC >> ndim #possibly >>
# id = 1 << (root_level - level)
# j = iC + 1 - ( iO << 3)
# delta = np.abs(id)*idelta[:,j-1]
# try without the -1
# le = le/2**(root_level+1-level)
# now read the hvars and vars arrays
# we are looking for iOctCh
# we record if iOctCh is >0, in which it is subdivided
# iOctCh = np.zeros((nLevel+1,8),dtype='bool')
f.seek(pos)
return unitary_center, fl, iocts, nLevel, root_level
def get_ranges(
skip, count, field, words=6, real_size=4, np_per_page=4096 ** 2, num_pages=1
):
# translate every particle index into a file position ranges
ranges = []
arr_size = np_per_page * real_size
idxa, idxb = 0, 0
posa, posb = 0, 0
for page in range(num_pages):
idxb += np_per_page
for i, fname in enumerate(["x", "y", "z", "vx", "vy", "vz"]):
posb += arr_size
if i == field or fname == field:
if skip < np_per_page and count > 0:
left_in_page = np_per_page - skip
this_count = min(left_in_page, count)
count -= this_count
start = posa + skip * real_size
end = posa + this_count * real_size
ranges.append((start, this_count))
skip = 0
assert end <= posb
else:
skip -= np_per_page
posa += arr_size
idxa += np_per_page
assert count == 0
return ranges
def read_particles(file, Nrow, idxa, idxb, fields):
words = 6 # words (reals) per particle: x,y,z,vx,vy,vz
real_size = 4 # for file_particle_data; not always true?
np_per_page = Nrow ** 2 # defined in ART a_setup.h, # of particles/page
num_pages = os.path.getsize(file) // (real_size * words * np_per_page)
fh = open(file, "r")
skip, count = idxa, idxb - idxa
kwargs = dict(
words=words, real_size=real_size, np_per_page=np_per_page, num_pages=num_pages
)
arrs = []
for field in fields:
ranges = get_ranges(skip, count, field, **kwargs)
data = None
for seek, this_count in ranges:
fh.seek(seek)
temp = np.fromfile(fh, count=this_count, dtype=">f4")
if data is None:
data = temp
else:
data = np.concatenate((data, temp))
arrs.append(data.astype("f8"))
fh.close()
return arrs
def read_star_field(file, field=None):
data = {}
with open(file, "rb") as fh:
for dtype, variables in star_struct:
found = (
isinstance(variables, tuple) and field in variables
) or field == variables
if found:
data[field] = read_vector(fh, dtype[1], dtype[0])
else:
skip(fh, endian=">")
return data.pop(field)
def _read_child_mask_level(f, level_child_offsets, level, nLevel, nhydro_vars):
f.seek(level_child_offsets[level])
ioctch = np.zeros(nLevel, dtype="uint8")
idc = np.zeros(nLevel, dtype="int32")
chunk = int(1e6)
left = nLevel
width = nhydro_vars + 6
a, b = 0, 0
while left > 0:
chunk = min(chunk, left)
b += chunk
arr = np.fromfile(f, dtype=">i", count=chunk * width)
arr = arr.reshape((width, chunk), order="F")
assert np.all(arr[0, :] == arr[-1, :]) # pads must be equal
idc[a:b] = arr[1, :] - 1 # fix fortran indexing
ioctch[a:b] = arr[2, :] == 0 # if it is above zero, then refined available
# zero in the mask means there is refinement available
a = b
left -= chunk
assert left == 0
return idc, ioctch
nchem = 8 + 2
dtyp = np.dtype(">i4,>i8,>i8" + ",>%sf4" % (nchem) + ",>%sf4" % (2) + ",>i4")
def _read_child_level(
f,
level_child_offsets,
level_oct_offsets,
level_info,
level,
fields,
domain_dimensions,
ncell0,
nhydro_vars=10,
nchild=8,
noct_range=None,
):
# emulate the fortran code for reading cell data
# read ( 19 ) idc, iOctCh(idc), (hvar(i,idc),i=1,nhvar),
# & (var(i,idc), i=2,3)
# contiguous 8-cell sections are for the same oct;
# ie, we don't write out just the 0 cells, then the 1 cells
# optionally, we only read noct_range to save memory
left_index, fl, octs, nocts, root_level = _read_art_level_info(
f, level_oct_offsets, level, coarse_grid=domain_dimensions[0]
)
if noct_range is None:
nocts = level_info[level]
ncells = nocts * 8
f.seek(level_child_offsets[level])
arr = np.fromfile(f, dtype=hydro_struct, count=ncells)
assert np.all(arr["pad1"] == arr["pad2"]) # pads must be equal
# idc = np.argsort(arr['idc']) #correct fortran indices
# translate idc into icell, and then to iOct
icell = (arr["idc"] >> 3) << 3
iocts = (icell - ncell0) / nchild # without a F correction, theres a +1
# assert that the children are read in the same order as the octs
assert np.all(octs == iocts[::nchild])
else:
start, end = noct_range
nocts = min(end - start, level_info[level])
end = start + nocts
ncells = nocts * 8
skip = np.dtype(hydro_struct).itemsize * start * 8
f.seek(level_child_offsets[level] + skip)
arr = np.fromfile(f, dtype=hydro_struct, count=ncells)
assert np.all(arr["pad1"] == arr["pad2"]) # pads must be equal
source = {}
for field in fields:
sh = (nocts, 8)
source[field] = np.reshape(arr[field], sh, order="C").astype("float64")
return source
def _read_root_level(f, level_offsets, level_info, nhydro_vars=10):
nocts = level_info[0]
f.seek(level_offsets[0]) # Ditch the header
hvar = read_vector(f, "f", ">")
var = read_vector(f, "f", ">")
hvar = hvar.reshape((nhydro_vars, nocts * 8), order="F")
var = var.reshape((2, nocts * 8), order="F")
arr = np.concatenate((hvar, var))
return arr
# All of these functions are to convert from hydro time var to
# proper time
sqrt = np.sqrt
sign = np.sign
def find_root(f, a, b, tol=1e-6):
c = (a + b) / 2.0
last = -np.inf
assert sign(f(a)) != sign(f(b))
while np.abs(f(c) - last) > tol:
last = f(c)
if sign(last) == sign(f(b)):
b = c
else:
a = c
c = (a + b) / 2.0
return c
def quad(fintegrand, xmin, xmax, n=1e4):
spacings = np.logspace(np.log10(xmin), np.log10(xmax), num=int(n))
integrand_arr = fintegrand(spacings)
val = np.trapz(integrand_arr, dx=np.diff(spacings))
return val
def a2b(at, Om0=0.27, Oml0=0.73, h=0.700):
def f_a2b(x):
val = 0.5 * sqrt(Om0) / x ** 3.0
val /= sqrt(Om0 / x ** 3.0 + Oml0 + (1.0 - Om0 - Oml0) / x ** 2.0)
return val
# val, err = si.quad(f_a2b,1,at)
val = quad(f_a2b, 1, at)
return val
def b2a(bt, **kwargs):
# converts code time into expansion factor
# if Om0 ==1and OmL == 0 then b2a is (1 / (1-td))**2
# if bt < -190.0 or bt > -.10: raise 'bt outside of range'
def f_b2a(at):
return a2b(at, **kwargs) - bt
return find_root(f_b2a, 1e-4, 1.1)
# return so.brenth(f_b2a,1e-4,1.1)
# return brent.brent(f_b2a)
def a2t(at, Om0=0.27, Oml0=0.73, h=0.700):
def integrand(x):
return 1.0 / (x * sqrt(Oml0 + Om0 * x ** -3.0))
# current_time,err = si.quad(integrand,0.0,at,epsabs=1e-6,epsrel=1e-6)
current_time = quad(integrand, 1e-4, at)
# spacings = np.logspace(-5,np.log10(at),num=int(1e5))
# integrand_arr = integrand(spacings)
# current_time = np.trapz(integrand_arr,dx=np.diff(spacings))
current_time *= 9.779 / h
return current_time
def b2t(tb, n=1e2, logger=None, **kwargs):
tb = np.array(tb)
if isinstance(tb, type(1.1)):
return a2t(b2a(tb))
if tb.shape == ():
return a2t(b2a(tb))
if len(tb) < n:
n = len(tb)
tbs = -1.0 * np.logspace(np.log10(-tb.min()), np.log10(-tb.max()), n)
ages = []
for i, tbi in enumerate(tbs):
ages += (a2t(b2a(tbi)),)
if logger:
logger(i)
ages = np.array(ages)
return tbs, ages
| 36.037936 | 88 | 0.551392 |
ae64bc40689576263cfc1c320a3e757c7e1cf74c | 133 | py | Python | modules/2.79/bpy/types/CompositorNodeDistanceMatte.py | cmbasnett/fake-bpy-module | acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55 | [
"MIT"
] | null | null | null | modules/2.79/bpy/types/CompositorNodeDistanceMatte.py | cmbasnett/fake-bpy-module | acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55 | [
"MIT"
] | null | null | null | modules/2.79/bpy/types/CompositorNodeDistanceMatte.py | cmbasnett/fake-bpy-module | acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55 | [
"MIT"
] | null | null | null | class CompositorNodeDistanceMatte:
channel = None
falloff = None
tolerance = None
def update(self):
pass
| 12.090909 | 34 | 0.631579 |
f5d7b57fc9682071de6f7d8163fadda5bef0d911 | 1,927 | py | Python | Base/Recommender_utils_Test.py | marcomussi/RecommenderSystemPolimi | ce45b1eee2231abe1a844697648e94b98dadabea | [
"MIT"
] | null | null | null | Base/Recommender_utils_Test.py | marcomussi/RecommenderSystemPolimi | ce45b1eee2231abe1a844697648e94b98dadabea | [
"MIT"
] | null | null | null | Base/Recommender_utils_Test.py | marcomussi/RecommenderSystemPolimi | ce45b1eee2231abe1a844697648e94b98dadabea | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 30/09/17
@author: Maurizio Ferrari Dacrema
"""
from Base.Recommender_utils import similarityMatrixTopK
import numpy as np
import scipy.sparse as sps
import unittest
class MyTestCase(unittest.TestCase):
def test_similarityMatrixTopK_denseToDense(self):
numRows = 100
TopK = 20
dense_input = np.random.random((numRows, numRows))
dense_output = similarityMatrixTopK(dense_input, k=TopK, forceSparseOutput=False)
numExpectedNonZeroCells = TopK*numRows
numNonZeroCells = np.sum(dense_output!=0)
self.assertEqual(numExpectedNonZeroCells, numNonZeroCells, "DenseToDense incorrect")
def test_similarityMatrixTopK_denseToSparse(self):
numRows = 100
TopK = 20
dense = np.random.random((numRows, numRows))
sparse = similarityMatrixTopK(dense, k=TopK, forceSparseOutput=True)
dense = similarityMatrixTopK(dense, k=TopK, forceSparseOutput=False)
self.assertTrue(np.equal(dense, sparse.todense()).all(), "denseToSparse incorrect")
def test_similarityMatrixTopK_sparseToSparse(self):
numRows = 20
TopK = 5
dense_input = np.random.random((numRows, numRows))
sparse_input = sps.csr_matrix(dense_input)
dense_output = similarityMatrixTopK(dense_input, k=TopK, forceSparseOutput=False, inplace=False)
sparse_output = similarityMatrixTopK(sparse_input, k=TopK, forceSparseOutput=True)
self.assertTrue(np.allclose(dense_output, sparse_output.todense()), "sparseToSparse CSR incorrect")
sparse_input = sps.csc_matrix(dense_input)
sparse_output = similarityMatrixTopK(sparse_input, k=TopK, forceSparseOutput=True)
self.assertTrue(np.allclose(dense_output, sparse_output.todense()), "sparseToSparse CSC incorrect")
if __name__ == '__main__':
unittest.main()
| 27.140845 | 107 | 0.714063 |
f6f5e693b2fb5437959fb9c05bbfde8d850a9767 | 587 | py | Python | Backend/autonomus/utils/events_export.py | IrinaMBejan/Autonom | 4a97da1b26ed22e3ec8bb939359148765392b692 | [
"MIT"
] | 2 | 2019-03-08T10:04:35.000Z | 2020-03-14T15:24:56.000Z | Backend/autonomus/utils/events_export.py | IrinaMBejan/Autonom | 4a97da1b26ed22e3ec8bb939359148765392b692 | [
"MIT"
] | null | null | null | Backend/autonomus/utils/events_export.py | IrinaMBejan/Autonom | 4a97da1b26ed22e3ec8bb939359148765392b692 | [
"MIT"
] | 2 | 2019-03-16T14:47:36.000Z | 2020-04-28T14:09:45.000Z | from ics import Calendar, Event
from autonomus.controllers import get_user_events
import csv
def export_events_ical(events):
calendar = Calendar()
for event_key in events:
event = event_key.get()
if event is None:
continue
e = Event()
e.name = event.title
e.begin = event.date
e.description = event.description
e.location = event.location
calendar.events.add(e)
return str(calendar)
def export(urlsafe):
events = get_user_events(urlsafe)
return export_events_ical(events)
| 20.241379 | 49 | 0.640545 |
ccc4ac19293339584901e5cb928a0df85bd1389c | 1,656 | py | Python | validation_functions.py | MrHellYea/Art-Gallery-SQL | 6e56ee18eb5dc0d2f4efa2fbaf01d887ed808dbf | [
"MIT"
] | null | null | null | validation_functions.py | MrHellYea/Art-Gallery-SQL | 6e56ee18eb5dc0d2f4efa2fbaf01d887ed808dbf | [
"MIT"
] | null | null | null | validation_functions.py | MrHellYea/Art-Gallery-SQL | 6e56ee18eb5dc0d2f4efa2fbaf01d887ed808dbf | [
"MIT"
] | null | null | null | from typing import Callable, Optional
from generic_functions import *
def is_num(num: str) -> bool:
try:
float(num)
except ValueError:
return False
return True
def is_not_empty(text: str) -> bool:
if text == "":
return False
return True
def validate_input(text: str, function: Callable, allow_empty: Optional[bool]=False) -> str:
while True:
x = input(text)
if (allow_empty and x == ""):
return None
if function(x):
return x
def validate_cpf(cpf: str) -> bool:
if len(cpf) != 11:
return False
remain = get_remain(cpf, 10, -2)
if not str(remain) == cpf[-2]:
return False
remain = get_remain(cpf, 11, -1)
if not str(remain) == cpf[-1]:
return False
return True
def validate_date(date: str) -> bool:
try:
day, month, year = [int(x) for x in date.split("/")]
except ValueError:
return False
month_lengths = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
if year % 400 == 0 or (year % 100 != 0 and year % 4 == 0):
month_lengths[1] = 29
if 1 <= month <= 12 and 1 <= day <= month_lengths[month - 1]:
return True
return False
def validate_phone(phone: str) -> bool:
if is_num(phone) and len(phone) == 9:
return True
return False
def validate_time(time: str) -> bool:
try:
hour, minute = [int(x) for x in time.split(":")]
except ValueError:
return False
if 0 <= hour <= 23 and 0 <= minute <= 59:
return True
return False
| 23.657143 | 93 | 0.544686 |
3453a405a99dce380533e6518a8d6cedd73c1b6f | 3,690 | py | Python | src/manager/om/script/gspylib/os/gssysctl.py | wotchin/openGauss-server | ebd92e92b0cfd76b121d98e4c57a22d334573159 | [
"MulanPSL-1.0"
] | 1 | 2020-06-30T15:00:50.000Z | 2020-06-30T15:00:50.000Z | src/manager/om/script/gspylib/os/gssysctl.py | wotchin/openGauss-server | ebd92e92b0cfd76b121d98e4c57a22d334573159 | [
"MulanPSL-1.0"
] | null | null | null | src/manager/om/script/gspylib/os/gssysctl.py | wotchin/openGauss-server | ebd92e92b0cfd76b121d98e4c57a22d334573159 | [
"MulanPSL-1.0"
] | null | null | null | # -*- coding:utf-8 -*-
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
#
# openGauss is licensed under Mulan PSL v2.
# You can use this software according to the terms
# and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
# ----------------------------------------------------------------------------
import sys
import subprocess
sys.path.append(sys.path[0] + "/../../")
from gspylib.common.ErrorCode import ErrorCode
from gspylib.os.gsplatform import g_Platform
class SysctlInfo:
"""
class: SysctlInfo
"""
def __init__(self):
"""
function: constructor
"""
self.sysctlFile = g_Platform.getSysctlFile()
def GetSysPara(self, paraList):
"""
function : Get system parameters by paraList
input : paraList parameters list
output : para_dict parameters dict
"""
para_dict = {}
fullParaDict = {}
try:
cmd = "'%s' -a" % g_Platform.getSysctlCmd()
(status, output) = subprocess.getstatusoutput(cmd)
if (status != 0):
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd +
" Error: \n%s" % str(output))
line_list = output.split('\n')
for line in line_list:
words = line.split('=')
if (len(words) < 2):
continue
fullParaDict[words[0].strip()] = words[1].strip()
# chose para
for para in paraList:
if (para in fullParaDict.keys()):
para_dict[para] = fullParaDict[para]
except Exception as e:
raise Exception(str(e))
return para_dict
def SetSysPara(self, paraDict):
"""
function : Set system parameters by dict
input : paraDict parameters dict
output : null
"""
try:
# write or change configure file
configure_file = self.sysctlFile
with open(configure_file, 'r') as fp:
full_line = fp.readlines()
with open(configure_file, 'w') as fp:
for current_line in full_line:
isFind = False
for key in paraDict.keys():
if current_line.find(key) >= 0 \
and current_line.strip()[0] != '#':
new_line = "#" + current_line
fp.write(current_line.replace(current_line,
new_line))
isFind = True
if not isFind:
fp.write(current_line.replace(current_line,
current_line))
for key in paraDict.keys():
new_line = "\n" + key + " =" + paraDict[key]
fp.write(new_line)
# restart server
cmd = "'%s' -p" % g_Platform.getSysctlCmd()
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd +
" Error: \n%s" % str(output))
except Exception as e:
raise Exception(str(e))
g_sysctl = SysctlInfo()
| 35.825243 | 78 | 0.507317 |
1dc9153648e8a531e2499fe2504a9dafee77369f | 1,112 | py | Python | setup.py | mverleg/fenpei | 2142e4fe4a1e2897d8d8185ef4b86adc7323e1eb | [
"BSD-3-Clause"
] | null | null | null | setup.py | mverleg/fenpei | 2142e4fe4a1e2897d8d8185ef4b86adc7323e1eb | [
"BSD-3-Clause"
] | 2 | 2016-11-17T12:10:36.000Z | 2017-02-08T09:06:37.000Z | setup.py | mverleg/fenpei | 2142e4fe4a1e2897d8d8185ef4b86adc7323e1eb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import setup
with open('README.rst', 'r') as fh:
readme = fh.read()
setup(
name='fenpei',
description='Distribution of tasks.',
long_description=readme,
url='https://github.com/mverleg/fenpei',
author='Mark V',
maintainer='(the author)',
author_email='mdilligaf@gmail.com',
license='Revised BSD License (LICENSE.txt)',
keywords=[],
version='2.7.2',
packages=['fenpei'],
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
'bardeen',
'jinja2',
'xxhash',
],
)
| 25.272727 | 65 | 0.668165 |
083d4666dc4b562ab9bb9b17850d35c230094b58 | 166 | py | Python | daemon/config.py | T1duS/ccextractor-web | 73e704640d13c9b5d84ae2e8bc5bdcf352caaa75 | [
"MIT"
] | 19 | 2018-05-18T13:55:54.000Z | 2019-10-26T10:08:45.000Z | daemon/config.py | T1duS/ccextractor-web | 73e704640d13c9b5d84ae2e8bc5bdcf352caaa75 | [
"MIT"
] | 23 | 2018-06-04T07:10:15.000Z | 2019-10-27T18:45:21.000Z | daemon/config.py | T1duS/ccextractor-web | 73e704640d13c9b5d84ae2e8bc5bdcf352caaa75 | [
"MIT"
] | 21 | 2018-07-07T07:54:12.000Z | 2020-11-24T14:35:27.000Z | LOG_FILE_EXTENSION = '.log'
RETRY_TIME = 5
"""
LOG_LEVEL possible values :
CRITICAL
ERROR
WARNING
INFO
DEBUG
NOTSET
"""
LOG_LEVEL = "DEBUG"
LOG_FILE_DIR = "logs/"
| 9.764706 | 27 | 0.722892 |
fd867da2e476053154b67356cb5fccffafc472eb | 117 | py | Python | python/__init__.py | Red-Eyed/sky_watcher_focuser_control | 5d54aa79f6da7569e1200803d623f850097228d8 | [
"MIT"
] | null | null | null | python/__init__.py | Red-Eyed/sky_watcher_focuser_control | 5d54aa79f6da7569e1200803d623f850097228d8 | [
"MIT"
] | null | null | null | python/__init__.py | Red-Eyed/sky_watcher_focuser_control | 5d54aa79f6da7569e1200803d623f850097228d8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Vadym Stupakov"
__email__ = "vadim.stupakov@gmail.com"
| 19.5 | 38 | 0.675214 |
ca3e713122a8b0abce41685029c84f73e2dc3ea0 | 1,108 | py | Python | 970.py | OmangRawat/Leetcode | 6fa696367ef9c5e6b08940b11e2202382d1afc07 | [
"MIT"
] | null | null | null | 970.py | OmangRawat/Leetcode | 6fa696367ef9c5e6b08940b11e2202382d1afc07 | [
"MIT"
] | null | null | null | 970.py | OmangRawat/Leetcode | 6fa696367ef9c5e6b08940b11e2202382d1afc07 | [
"MIT"
] | null | null | null | """
---> Powerful Integers
---> Medium
"""
class Solution:
def powerful_integers(self, x, y, bound):
a, b, i, ans = 0, 0, 0, set()
x_pow, y_pow = [], []
if x == 1:
if bound != 0:
x_pow.append(1)
a = 1
if y == 1:
if bound != 0:
y_pow.append(1)
b = 1
while True:
q = x ** i
r = y ** i
if a and b:
break
if a == 0:
if q <= bound:
x_pow.append(q)
else:
a = 1
if b == 0:
if r <= bound:
y_pow.append(r)
else:
b = 1
i += 1
ans = set([x + y for x in x_pow for y in y_pow if x + y <= bound])
return ans
xi = 3
yi = 5
bound = 15
a = Solution()
print(a.powerful_integers(xi, yi, bound))
"""
Get set of all powers of x and y smaller than bound then make set of pairs with sum smaller than the bound
Complexities:
Time ->
Space ->
"""
| 19.438596 | 106 | 0.393502 |
0506ea92800610828bdd5183498361f755185bf4 | 5,456 | py | Python | doc/moveit_cpp/launch/moveit_cpp_tutorial.launch.py | TiejunMS/moveit2_tutorials | 01a8041342e9a47a27b540f12f8bbf315e31a2c2 | [
"BSD-3-Clause"
] | 22 | 2021-03-05T08:31:35.000Z | 2022-03-27T11:31:36.000Z | doc/moveit_cpp/launch/moveit_cpp_tutorial.launch.py | vatanaksoytezer/moveit2_tutorials | 01a8041342e9a47a27b540f12f8bbf315e31a2c2 | [
"BSD-3-Clause"
] | 207 | 2021-02-04T19:42:04.000Z | 2022-03-29T14:58:10.000Z | doc/moveit_cpp/launch/moveit_cpp_tutorial.launch.py | vatanaksoytezer/moveit2_tutorials | 01a8041342e9a47a27b540f12f8bbf315e31a2c2 | [
"BSD-3-Clause"
] | 62 | 2021-02-07T14:12:19.000Z | 2022-03-27T07:51:16.000Z | import os
import yaml
from launch import LaunchDescription
from launch_ros.actions import Node
from launch.actions import ExecuteProcess
from ament_index_python.packages import get_package_share_directory
import xacro
def load_file(package_name, file_path):
package_path = get_package_share_directory(package_name)
absolute_file_path = os.path.join(package_path, file_path)
try:
with open(absolute_file_path, "r") as file:
return file.read()
except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available
return None
def load_yaml(package_name, file_path):
package_path = get_package_share_directory(package_name)
absolute_file_path = os.path.join(package_path, file_path)
try:
with open(absolute_file_path, "r") as file:
return yaml.safe_load(file)
except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available
return None
def generate_launch_description():
# moveit_cpp.yaml is passed by filename for now since it's node specific
moveit_cpp_yaml_file_name = (
get_package_share_directory("moveit2_tutorials") + "/config/moveit_cpp.yaml"
)
# Component yaml files are grouped in separate namespaces
robot_description_config = xacro.process_file(
os.path.join(
get_package_share_directory("moveit_resources_panda_moveit_config"),
"config",
"panda.urdf.xacro",
)
)
robot_description = {"robot_description": robot_description_config.toxml()}
robot_description_semantic_config = load_file(
"moveit_resources_panda_moveit_config", "config/panda.srdf"
)
robot_description_semantic = {
"robot_description_semantic": robot_description_semantic_config
}
kinematics_yaml = load_yaml(
"moveit_resources_panda_moveit_config", "config/kinematics.yaml"
)
moveit_simple_controllers_yaml = load_yaml(
"moveit_resources_panda_moveit_config", "config/panda_controllers.yaml"
)
moveit_controllers = {
"moveit_simple_controller_manager": moveit_simple_controllers_yaml,
"moveit_controller_manager": "moveit_simple_controller_manager/MoveItSimpleControllerManager",
}
ompl_planning_pipeline_config = {
"ompl": {
"planning_plugin": "ompl_interface/OMPLPlanner",
"request_adapters": """default_planner_request_adapters/AddTimeOptimalParameterization default_planner_request_adapters/FixWorkspaceBounds default_planner_request_adapters/FixStartStateBounds default_planner_request_adapters/FixStartStateCollision default_planner_request_adapters/FixStartStatePathConstraints""",
"start_state_max_bounds_error": 0.1,
}
}
ompl_planning_yaml = load_yaml(
"moveit_resources_panda_moveit_config", "config/ompl_planning.yaml"
)
ompl_planning_pipeline_config["ompl"].update(ompl_planning_yaml)
# MoveItCpp demo executable
moveit_cpp_node = Node(
name="moveit_cpp_tutorial",
package="moveit2_tutorials",
executable="moveit_cpp_tutorial",
output="screen",
parameters=[
moveit_cpp_yaml_file_name,
robot_description,
robot_description_semantic,
kinematics_yaml,
ompl_planning_pipeline_config,
moveit_controllers,
],
)
# RViz
rviz_config_file = (
get_package_share_directory("moveit2_tutorials")
+ "/launch/moveit_cpp_tutorial.rviz"
)
rviz_node = Node(
package="rviz2",
executable="rviz2",
name="rviz2",
output="log",
arguments=["-d", rviz_config_file],
parameters=[robot_description, robot_description_semantic],
)
# Static TF
static_tf = Node(
package="tf2_ros",
executable="static_transform_publisher",
name="static_transform_publisher",
output="log",
arguments=["0.0", "0.0", "0.0", "0.0", "0.0", "0.0", "world", "panda_link0"],
)
# Publish TF
robot_state_publisher = Node(
package="robot_state_publisher",
executable="robot_state_publisher",
name="robot_state_publisher",
output="both",
parameters=[robot_description],
)
# ros2_control using FakeSystem as hardware
ros2_controllers_path = os.path.join(
get_package_share_directory("moveit_resources_panda_moveit_config"),
"config",
"panda_ros_controllers.yaml",
)
ros2_control_node = Node(
package="controller_manager",
executable="ros2_control_node",
parameters=[robot_description, ros2_controllers_path],
output={
"stdout": "screen",
"stderr": "screen",
},
)
# Load controllers
load_controllers = []
for controller in [
"panda_arm_controller",
"panda_hand_controller",
"joint_state_broadcaster",
]:
load_controllers += [
ExecuteProcess(
cmd=["ros2 run controller_manager spawner {}".format(controller)],
shell=True,
output="screen",
)
]
return LaunchDescription(
[
static_tf,
robot_state_publisher,
rviz_node,
moveit_cpp_node,
ros2_control_node,
]
+ load_controllers
)
| 32.47619 | 325 | 0.670455 |
470fad1b2f830d6bab07a4d02eb76e4cb45bae64 | 5,623 | py | Python | test/integration/ggrc_workflows/converters/test_export_cycle_task_group_object_tasks.py | HLD/ggrc-core | 9bdc0fc6ca9e252f4919db682d80e360d5581eb4 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | test/integration/ggrc_workflows/converters/test_export_cycle_task_group_object_tasks.py | HLD/ggrc-core | 9bdc0fc6ca9e252f4919db682d80e360d5581eb4 | [
"ECL-2.0",
"Apache-2.0"
] | 10 | 2018-07-06T00:04:23.000Z | 2021-02-26T21:13:20.000Z | test/integration/ggrc_workflows/converters/test_export_cycle_task_group_object_tasks.py | HLD/ggrc-core | 9bdc0fc6ca9e252f4919db682d80e360d5581eb4 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2017-11-11T22:16:56.000Z | 2017-11-11T22:16:56.000Z | # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for task group task specific export."""
from collections import defaultdict
from ddt import data, ddt, unpack
from integration.ggrc_workflows.models import factories
from integration.ggrc.models import factories as ggrc_factories
from integration.ggrc import TestCase
from ggrc.models import all_models
@ddt
class TestExportTasks(TestCase):
"""Test imports for basic workflow objects."""
model = all_models.CycleTaskGroupObjectTask
def setUp(self):
super(TestExportTasks, self).setUp()
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "GGRC",
"X-export-view": "blocks",
}
@staticmethod
def generate_tasks_for_cycle(task_count):
"""generate number of task groups and task for current task group"""
results = []
with ggrc_factories.single_commit():
for idx in range(task_count):
person = ggrc_factories.PersonFactory(
name="user for group {}".format(idx)
)
task = factories.CycleTaskFactory()
role = all_models.AccessControlRole.query.filter(
all_models.AccessControlRole.name == "Task Assignees",
all_models.AccessControlRole.object_type == task.type,
).one()
ggrc_factories.AccessControlListFactory(
ac_role=role, object=task, person=person)
results.append(task.id)
return results
@data(0, 1, 2)
def test_filter_by_task_title(self, task_count):
"""Test filter tasks by title"""
generated = self.generate_tasks_for_cycle(task_count)
self.assertEqual(bool(task_count), bool(generated))
for task_id in generated:
task = all_models.CycleTaskGroupObjectTask.query.filter(
all_models.CycleTaskGroupObjectTask.id == task_id
).one()
self.assert_slugs("task title", task.title, [task.slug])
@data(0, 1, 2)
def test_filter_by_task_due_date(self, task_count):
"""Test filter by task due date"""
due_date_dict = defaultdict(set)
generated = self.generate_tasks_for_cycle(task_count)
self.assertEqual(bool(task_count), bool(generated))
for task_id in generated:
task = all_models.CycleTaskGroupObjectTask.query.filter(
all_models.CycleTaskGroupObjectTask.id == task_id
).one()
due_date_dict[str(task.end_date)].add(task.slug)
for due_date, slugs in due_date_dict.iteritems():
self.assert_slugs("task due date", due_date, list(slugs))
@data(0, 1, 2,)
def test_filter_by_task_assignee(self, task_count):
"""Test filter task by assignee name or email"""
generated = self.generate_tasks_for_cycle(task_count)
self.assertEqual(bool(task_count), bool(generated))
for task_id in generated:
task = all_models.CycleTaskGroupObjectTask.query.filter(
all_models.CycleTaskGroupObjectTask.id == task_id
).one()
role = all_models.AccessControlRole.query.filter(
all_models.AccessControlRole.name == "Task Assignees",
all_models.AccessControlRole.object_type == task.type,
).one()
assignees = [i.person for i in task.access_control_list
if i.ac_role_id == role.id]
self.assertEqual(1, len(assignees))
self.assert_slugs("task assignees", assignees[0].email, [task.slug])
self.assert_slugs("task assignees", assignees[0].name, [task.slug])
def test_filter_by_task_comment(self):
"""Test filter by comments"""
task_id = self.generate_tasks_for_cycle(4)[0]
comment_text = "123"
task = all_models.CycleTaskGroupObjectTask.query.filter(
all_models.CycleTaskGroupObjectTask.id == task_id
).one()
factories.CycleTaskEntryFactory(
cycle_task_group_object_task=task,
description=comment_text,
)
self.assert_slugs("task comments", comment_text, [task.slug])
@data(
("status", ["Task State", "task state", "task status"]),
("end_date", ["Task Due Date", "task due date", "task end_date"]),
(
"start_date",
["task Start Date", "task start date", "task start_date"],
),
)
@unpack
def test_filter_by_aliases(self, field, aliases):
"""Test filter by alias"""
expected_results = defaultdict(list)
tasks = all_models.CycleTaskGroupObjectTask.query.filter(
all_models.CycleTaskGroupObjectTask.id.in_(
self.generate_tasks_for_cycle(4)
)
).all()
for task in tasks:
expected_results[str(getattr(task, field))].append(task.slug)
for value, slugs in expected_results.iteritems():
for alias in aliases:
self.assert_slugs(alias, value, slugs)
@data(
(
"updated_at",
["task Last updated", "task last updated", "task updated_at"],
),
(
"created_at",
["task Created Date", "task created Date", "task created_at"],
),
)
@unpack
def test_filter_by_datetime_aliases(self, field, aliases):
"""Test filter by datetime field and it's aliases"""
expected_results = defaultdict(list)
tasks = all_models.CycleTaskGroupObjectTask.query.filter(
all_models.CycleTaskGroupObjectTask.id.in_(
self.generate_tasks_for_cycle(4)
)
).all()
for task in tasks:
for value in self.generate_date_strings(getattr(task, field)):
expected_results[value].append(task.slug)
for value, slugs in expected_results.iteritems():
for alias in aliases:
self.assert_slugs(alias, value, slugs)
| 35.588608 | 78 | 0.67793 |
5ef4d9e1348f49542ba726f0ce0ba57a36151949 | 173 | py | Python | app/__init__.py | abcnever/tensorflow_som_colours | d487bd7f8570c2962ef1003b610731d5cef64124 | [
"MIT"
] | null | null | null | app/__init__.py | abcnever/tensorflow_som_colours | d487bd7f8570c2962ef1003b610731d5cef64124 | [
"MIT"
] | null | null | null | app/__init__.py | abcnever/tensorflow_som_colours | d487bd7f8570c2962ef1003b610731d5cef64124 | [
"MIT"
] | null | null | null | from flask import Flask
from flask_cors import CORS
app = Flask(__name__)
CORS(app) # needed for cross-domain requests, allow everything by default
from app import routes
| 21.625 | 73 | 0.797688 |
40f628a6ef911305a058230682dc3c57fa567893 | 5,053 | py | Python | migrations/0001_initial.py | catalpainternational/riotjs-gettext | 2f9ac7682a0af4f22b339d8af20a3dbd6e76eb27 | [
"BSD-3-Clause"
] | null | null | null | migrations/0001_initial.py | catalpainternational/riotjs-gettext | 2f9ac7682a0af4f22b339d8af20a3dbd6e76eb27 | [
"BSD-3-Clause"
] | null | null | null | migrations/0001_initial.py | catalpainternational/riotjs-gettext | 2f9ac7682a0af4f22b339d8af20a3dbd6e76eb27 | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 3.1.7 on 2021-04-02 04:43
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Translatable",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"msgid",
models.CharField(
help_text="The message text to translate from", max_length=512
),
),
(
"msgid_plural",
django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(
help_text="The message text to translate for a plural case",
max_length=512,
),
blank=True,
null=True,
size=None,
),
),
(
"msgctxt",
models.CharField(
blank=True,
help_text="Optional context marker for the message",
max_length=512,
null=True,
),
),
(
"comment",
models.CharField(
blank=True,
help_text="Extracted comments",
max_length=512,
null=True,
),
),
(
"tcomment",
models.CharField(
blank=True,
help_text="Translator comments",
max_length=512,
null=True,
),
),
(
"occurences",
django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(
blank=True,
help_text="Describe where this occurs",
max_length=512,
null=True,
),
blank=True,
null=True,
size=None,
),
),
(
"flags",
django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=128),
blank=True,
null=True,
size=None,
),
),
(
"previous_msgctxt",
models.CharField(blank=True, max_length=512, null=True),
),
(
"previous_msgid",
models.CharField(blank=True, max_length=512, null=True),
),
(
"previous_msgid_plural",
models.CharField(blank=True, max_length=512, null=True),
),
("linenum", models.IntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name="Translated",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"msgstr",
models.CharField(
help_text="The translation of the entry message string",
max_length=512,
),
),
(
"msgstr_plural",
django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=512),
blank=True,
null=True,
size=None,
),
),
("obsolete", models.BooleanField(blank=True, null=True)),
("language", models.CharField(max_length=5)),
(
"msg",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="gettext_utils.translatable",
),
),
],
),
]
| 33.686667 | 88 | 0.348506 |
1a19ca0c3b8eef89d3290fa4bbf0cba32651942e | 12,600 | py | Python | examples/pwr_run/checkpointing/jobs_max_pwr/main.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/jobs_max_pwr/main.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/jobs_max_pwr/main.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | import pdb
import time
import os
import subprocess
import re
import random
import json
import numpy as np
import glob
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
queue = [49, 15, 50, 39, 14, 40, 13, 37, 32, 44, 1, 25, 6, 12, 43, 35, 29, 7, 46, 23, 47, 34, 21, 33, 36, 24, 28, 48, 17, 8, 45, 30, 2, 41, 16, 3, 27, 20, 38, 11, 42, 10, 22, 4, 18, 19, 5, 9, 26, 31]
job_start = {} #{'49': time1, '15': time2...}
JCT = {}
PJCT = {} # practical complete time, not applicable for all jobs
index = 0
K80_cap = 8
V100_cap = 4
K80_used = 0
V100_used = 0
qualified_jobs = 0
K80_job = []
V100_job = []
all_job = []
qualified_job = []
pc_job = [] # list of jobs that are pratically completed
nodelist='c2178,d1018' ### change this ###
testcase = 'max_pwr_0.001' ### change this ###
### also, change .h5 file folder in jobs ###
INTERVAL = 60 # make decision every 30s
QUALIFY_TIME = 600 #600s or 10min as threshold
# takes in a list of jobs qualified for promote, returns a list of jobs that get upgraded, and a list for demoted jobs
def random_promotion(V100_free, promote_list, force_demote):
num_demote = len(force_demote)
V100_avail = num_demote + V100_free
if V100_avail >= len(promote_list):
return promote_list, force_demote
else:
return random.sample(promote_list, V100_avail), force_demote
def max_power_promotion(V100_job, promote_list, force_demote):
power_dict = {}
with open('power.json', 'r') as fp:
power_dict = json.load(fp)
V100_qual = list(set(V100_job) - set(force_demote))
V100_pool = V100_qual + promote_list # any current V100 job not demoted and K80 job qualified can go into the pool
num_V100_pool = len(V100_pool)
if num_V100_pool <= 4:
# every thing in the pool should go into V100
return promote_list, force_demote
else:
pool_dict = {}
for job in V100_pool:
# make sure the key exists
if 'job'+job in power_dict:
pool_dict[job] = power_dict['job'+job]
# sort dict keys from big value to small value, and take the first 4 only
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=True)[:4]
promotion_list = list(set(promote_list).intersection(sorted_pool))
demotion_list = list(set(V100_job).difference(sorted_pool))
return promotion_list, demotion_list
# checks squeue every 10s to see if the job has ended
def wait_till_job_ends(job, gpu):
CHECK_INTERVAL = 10 # 10s
cmd = 'squeue -u $USER --name=job' + job + '_' + gpu + ' --nodelist=' + nodelist + ' | awk \"/job/\" | awk \'{print $3}\''
while True:
try:
stdout = subprocess.check_output([cmd], shell=True)
stdout = str(stdout)
job_run = re.findall(r'\d+', stdout) # this is list of currently running jobs in string, e.g. ['20', '48'...]
if job not in job_run:
break
except: #subprocess.CalledProcessError:
print('encountered squeue error when waiting for job to end')
time.sleep(CHECK_INTERVAL)
scancel_job(job, gpu)
time.sleep(CHECK_INTERVAL)
# checks output and makes sure no exception occurs
def check_output(cmd):
RETRY_INTERVAL = 10
while True:
try:
stdout = subprocess.check_output([cmd], shell=True)
return stdout
except: #subprocess.CalledProcessError:
print('encountered scancel error when calling check_output function, retrying...')
time.sleep(RETRY_INTERVAL)
# cancel job, and make sure there is no error with scancel command
def scancel_job(job, gpu): # scancel_job('50', 'K')
RETRY_INTERVAL = 10
cmd = 'scancel --signal=TERM --name=job' + job + '_' + gpu + ' --nodelist=' + nodelist
while True:
try:
subprocess.check_output([cmd], shell=True)
break
except: #subprocess.CalledProcessError:
print('encountered scancel error, retrying...')
time.sleep(RETRY_INTERVAL)
# kill job, and make sure there is no error with scancel command
def kill_job(job, gpu): # scancel_job('50', 'K')
RETRY_INTERVAL = 10
cmd = 'scancel --name=job' + job + '_' + gpu + ' --nodelist=' + nodelist
while True:
try:
subprocess.check_output([cmd], shell=True)
break
except: #subprocess.CalledProcessError:
print('encountered scancel error, retrying...')
time.sleep(RETRY_INTERVAL)
# resume job, and make sure there is no error with sbatch submission
def resume_job(job, gpu): # resume_job('1', 'V')
RETRY_INTERVAL = 10
cmd = './run_resume.sh job' + job + ' ' + gpu + ' ' + testcase
while True:
stdout = subprocess.run([cmd], shell=True, stdout=subprocess.PIPE).stdout
stdout = str(stdout)
print(stdout)
if 'Submitted batch job' in stdout:
break
else:
kill_job(job, gpu) # make sure repeated submission doesn't exist
print('encountered sbatch error on job resume, retrying...')
time.sleep(RETRY_INTERVAL)
# start job, and make sure there is no error with sbatch submission
def start_job(job): # start_job('1')
RETRY_INTERVAL = 10
cmd = './run_start.sh job' + job + ' K ' + testcase
while True:
stdout = subprocess.run([cmd], shell=True, stdout=subprocess.PIPE).stdout
stdout = str(stdout)
print(stdout)
if 'Submitted batch job' in stdout:
break
else:
kill_job(job, 'K')
print('encountered sbatch error on job start, retrying...')
time.sleep(RETRY_INTERVAL)
# function that checks the tensorboard log of currently running jobs and logs practical complete jobs in a global list
# once a job reaches practical complete, it cannot be promoted. If it's already promoted, it gets demoted.
# criteria for practical complete: loss improvement has been smaller than 0.01 for last 3 consecutive epochs
def check_practical_complete(job_list):
log_path = '/scratch/li.baol/tsrbrd_log/job_runs/' + testcase + '/'
threshold = 0.001
global pc_job
global PJCT
for job in job_list:
# only check for job outside of practical complete job list
if job not in pc_job:
log_dir = log_path + 'job' + job + '/*'
dirs = glob.glob(log_dir)
dirs.sort()
loss_combine = []
for tc in dirs:
iterator = EventAccumulator(tc).Reload()
if len(iterator.Tags()['scalars']) > 0:
tag = 'loss' #iterator.Tags()['scalars'][2] # this is tag for loss
loss = [item.value for item in iterator.Scalars(tag)]
loss_combine += loss
# now that we have the loss at each epoch, we can check if it has reached practical complete
if len(loss_combine) >= 4:
latest_loss = loss_combine[-4:]
finished = True
for i in range(3):
# if the difference is >= 0.01, the job has not reached practical complete yet
if latest_loss[i] - latest_loss[i+1] >= threshold:
finished = False
break
if finished:
print('job' + job + ' has reached practical complete, the last 4 loss values are')
print(str(latest_loss))
pc_job.append(job)
PJCT[job] = int(time.time() - job_start[job])
############### first clear finish status and recorded power of all jobs ####################
finish_dict = {}
with open('finish.json', 'r') as fp:
finish_dict = json.load(fp)
for key in finish_dict:
finish_dict[key] = 0
with open('finish.json', 'w') as fp:
json.dump(finish_dict, fp)
power_dict = {}
with open('power.json', 'r') as fp:
power_dict = json.load(fp)
for key in power_dict:
power_dict[key] = 0
with open('power.json', 'w') as fp:
json.dump(power_dict, fp)
######################################################################
while True:
# termination condition:
# all the jobs have finished
################### check for finished jobs on K80 and V100 ##############################
with open('finish.json', 'r') as fp:
finish_dict = json.load(fp)
for job in K80_job[:]:
if finish_dict['job'+job] == 1:
K80_used -= 1
K80_job.remove(job)
print('K80 finished job: ' + job)
JCT[job] = int(time.time() - job_start[job])
for job in V100_job[:]:
if finish_dict['job'+job] == 1:
V100_used -= 1
V100_job.remove(job)
print('V100 finished job: ' + job)
JCT[job] = int(time.time() - job_start[job])
################ check for practical finished jobs on K80 and V100 ######################
all_job = K80_job + V100_job
check_practical_complete(all_job)
################ check run time of current K80 job, update qualified_job #################
with open('power.json', 'r') as fp:
power_dict = json.load(fp)
for job in K80_job:
if job not in qualified_job:
pwr_meas = power_dict['job'+job]
if pwr_meas > 0:
qualified_job.append(job)
print('job' + job + ' has been qualified for promotion')
################ make promotion decisions ########################
V100_free = V100_cap - V100_used
# this returns available jobs for promotion. Has to be qualified, and currently in K80, but not practically complete
promote_list = list(set(qualified_job).intersection(K80_job).difference(pc_job))
# this returns job forced to be demoted. Currently in V100, and is practically complete
force_demote = list(set(V100_job).intersection(pc_job))
if len(promote_list) > 0:
#promoted, demoted = random_promotion(V100_free, promote_list, force_demote)
promoted, demoted = max_power_promotion(V100_job, promote_list, force_demote)
if len(promoted) > 0:
print('promoted jobs: ', promoted)
if len(demoted) > 0:
print('demoted jobs: ', demoted)
# stop all promoted jobs on K80
for job in promoted:
scancel_job(job, 'K')
print('K80 job canceled: ' + job)
K80_job.remove(job)
K80_used -= 1
# stop all demoted jobs on V100
for job in demoted:
scancel_job(job, 'V')
print('V100 job canceled: ' + job)
V100_job.remove(job)
V100_used -= 1
# resume promoted jobs on V100
for job in promoted:
wait_till_job_ends(job, 'K')
resume_job(job, 'V')
print('V100 job resumed: ' + job)
V100_job.append(job)
V100_used += 1
# resume demoted jobs on K80
for job in demoted:
wait_till_job_ends(job, 'V')
resume_job(job, 'K')
print('K80 job resumed: ' + job)
K80_job.append(job)
K80_used += 1
################ submit new jobs to vacant K80 GPUs ############################
# check if there are vacant K80s
## yes: submit jobs from queue
## no: do nothing
if K80_used < K80_cap:
K80_free = K80_cap - K80_used
for i in range(K80_free):
if index < len(queue):
job = str(queue[index])
start_job(job)
print('new job created on K80: ' + job)
K80_job.append(job)
job_start[job] = time.time()
index += 1
K80_used += 1
############### wait for next iteration
time.sleep(INTERVAL)
################ check if termination condition is met ################
if len(K80_job) == 0 and len(V100_job) == 0 and index == len(queue):
break
# get average JCT
average_JCT = np.average(list(JCT.values()))
JCT['average'] = average_JCT
average_PJCT = np.average(list(PJCT.values()))
PJCT['average'] = average_PJCT
print('finished all runs')
JCT_name = testcase + '_JCT.json'
PJCT_name = testcase + '_PJCT.json'
with open(JCT_name, 'w') as fp1:
json.dump(JCT, fp1, sort_keys=True, indent=4)
with open(PJCT_name, 'w') as fp2:
json.dump(PJCT, fp2, sort_keys=True, indent=4)
| 37.5 | 199 | 0.585317 |
3e4ecbf52ac23d8cce5e68a342d97d8af521c887 | 560 | py | Python | threshold_value/migrations/0003_auto_20141030_1047.py | sigurdsa/angelika-api | df594ecf34c29ab94d424a0697d77157a50c3500 | [
"MIT"
] | 2 | 2015-01-30T09:38:45.000Z | 2018-02-12T09:38:11.000Z | threshold_value/migrations/0003_auto_20141030_1047.py | sigurdsa/angelika-api | df594ecf34c29ab94d424a0697d77157a50c3500 | [
"MIT"
] | null | null | null | threshold_value/migrations/0003_auto_20141030_1047.py | sigurdsa/angelika-api | df594ecf34c29ab94d424a0697d77157a50c3500 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('threshold_value', '0002_auto_20141030_1044'),
]
operations = [
migrations.AlterField(
model_name='thresholdvalue',
name='is_upper_threshold',
field=models.BooleanField(default=False, help_text=b'If true, the threshold value is upper, if false, the threshold value is lower'),
preserve_default=True,
),
]
| 26.666667 | 145 | 0.651786 |
63afcff33e5a63208fa5cd00a3fffb8225c4b4a8 | 5,754 | py | Python | subtract.negatives.count.table.py | tbj128/mothur-microbiome-scripts | 1a75b309ca2e22152c7cbf0fb6c0ad5beb340ad6 | [
"MIT"
] | null | null | null | subtract.negatives.count.table.py | tbj128/mothur-microbiome-scripts | 1a75b309ca2e22152c7cbf0fb6c0ad5beb340ad6 | [
"MIT"
] | null | null | null | subtract.negatives.count.table.py | tbj128/mothur-microbiome-scripts | 1a75b309ca2e22152c7cbf0fb6c0ad5beb340ad6 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import print_function
import csv
import sys
import os
import processing as ps
COUNT_TABLE_SEQUENCE_COL = 0
COUNT_TABLE_SAMPLE_START_COL = 2
sampleIDCol = 0
diseaseCol = 1
batchCol = 2
if len(sys.argv) != 5:
print(("Usage: python " + sys.argv[0] + " [.count_table File] [Metadata File] [Negative Control Identifier] [Use Batch? - Y/N]"))
sys.exit(1)
print("\n")
sharedFile = sys.argv[1]
metadataFile = sys.argv[2]
negSamplePhrase = sys.argv[3]
useBatch = sys.argv[4]
isBatch = False
if useBatch.lower() == "y" or useBatch.lower() == "yes":
isBatch = True
print("Negative Control Sample Sequence Subtraction [By Batch]\n")
else:
print("Negative Control Sample Sequence Subtraction\n")
sharedFile = sharedFile.replace('"', "")
countTable = ps.readInputFile(sharedFile)
metadata = ps.readInputFile(metadataFile)
#
# Process
#
newFilename = sharedFile.rsplit('.', 1)[0]
fileExt = sharedFile.rsplit('.', 1)[-1]
if isBatch:
newFilename = newFilename + ".batch.negsubtracted." + fileExt
else:
newFilename = newFilename + ".negsubtracted." + fileExt
accnosFile = sharedFile.rsplit('.', 1)[0]
if isBatch:
accnosFile = accnosFile + ".batch.negremoved.accnos"
else:
accnosFile = accnosFile + ".negremoved.accnos"
if not isBatch:
negativeIDs = {}
for row in metadata:
if row[diseaseCol] == negSamplePhrase:
negativeIDs[row[sampleIDCol]] = 1
numRemoved = 0
negativeCols = []
rowsToRemove = {}
i = 0
while i < len(countTable):
if i == 0:
j = COUNT_TABLE_SAMPLE_START_COL
while j < len(countTable[i]):
if countTable[i][j] in negativeIDs:
negativeCols.append(j)
j += 1
else:
# Take the average of the negative control samples
negTotal = 0
for negativeCol in negativeCols:
negTotal += float(countTable[i][negativeCol])
negAvg = negTotal / float(len(negativeCols))
# Subtract the "averaged" negative control sample values
total = 0
j = COUNT_TABLE_SAMPLE_START_COL
while j < len(countTable[i]):
if j not in negativeCols and countTable[i][j] != "":
countTable[i][j] = float(countTable[i][j]) - negAvg
if countTable[i][j] < 0:
countTable[i][j] = 0
total += countTable[i][j]
j += 1
countTable[i][1] = total
if total == 0:
rowsToRemove[i] = 1
numRemoved += 1
if i % 200 == 0 and i > 0:
print("Processed " + str(i) + " / " + str(len(countTable)))
i += 1
# Creates new count table excluding the negative samples columns and excluding the sequences with all zero counts
removedSequences = []
r = 0
newCountTable = []
while r < len(countTable):
if r not in rowsToRemove:
newRow = []
c = 0
while c < len(countTable[r]):
if c not in negativeCols and countTable[r][c] != "":
newRow.append(countTable[r][c])
c = c + 1
newCountTable.append(newRow)
else:
removedSequences.append([countTable[r][COUNT_TABLE_SEQUENCE_COL]])
r += 1
ps.exportToFile(newCountTable, newFilename)
print("=================================================")
print("Finished subtracting negative sequence counts")
if numRemoved > 0:
ps.exportToFile(removedSequences, accnosFile)
print("Number of sequences removed : " + str(numRemoved))
print("Accnos file at: " + accnosFile)
print("Output file at: " + newFilename)
else:
negativeBatchToSampleID = {}
sampleIDToDisease = {}
sampleIDToBatch = {}
negativeIDs = {}
for row in metadata:
sampleID = row[sampleIDCol]
sampleIDToDisease[sampleID] = row[diseaseCol]
sampleIDToBatch[sampleID] = row[batchCol]
if row[diseaseCol] == negSamplePhrase:
negativeBatchToSampleID[row[batchCol]] = sampleID
negativeIDs[sampleID] = 1
sampleIDToCol = {}
colToSampleID = {}
negativeCols = []
rowsToRemove = {}
numRemoved = 0
i = 0
while i < len(countTable):
if i == 0:
j = COUNT_TABLE_SAMPLE_START_COL
while j < len(countTable[i]):
if countTable[i][j] != "":
colToSampleID[j] = countTable[i][j]
sampleIDToCol[countTable[i][j]] = j
if countTable[i][j] in negativeIDs:
negativeCols.append(j)
j += 1
else:
# For each representative sequence, get the negative control values per batch
j = COUNT_TABLE_SAMPLE_START_COL
newSum = 0
while j < len(countTable[i]):
if countTable[i][j] != "":
sampleID = colToSampleID[j]
batch = sampleIDToBatch[sampleID]
if j not in negativeCols and batch in negativeBatchToSampleID:
negativeBatchCol = sampleIDToCol[negativeBatchToSampleID[batch]]
countTable[i][j] = int(countTable[i][j]) - int(countTable[i][negativeBatchCol])
if countTable[i][j] < 0:
countTable[i][j] = 0
newSum += int(countTable[i][j])
j += 1
countTable[i][1] = newSum
if newSum == 0:
rowsToRemove[i] = 1
numRemoved += 1
if i % 200 == 0 and i > 0:
print("Processed " + str(i) + " / " + str(len(countTable)))
i += 1
# Creates new count table excluding the negative samples columns and excluding the sequences with all zero counts
removedSequences = []
r = 0
newCountTable = []
while r < len(countTable):
if r not in rowsToRemove:
newRow = []
c = 0
while c < len(countTable[r]):
if c not in negativeCols:
newRow.append(countTable[r][c])
c = c + 1
newCountTable.append(newRow)
else:
removedSequences.append([countTable[r][COUNT_TABLE_SEQUENCE_COL]])
r += 1
ps.exportToFile(newCountTable, newFilename)
print("=================================================")
print("Finished subtracting negative sequence counts by batch")
if numRemoved > 0:
ps.exportToFile(removedSequences, accnosFile)
print("Number of sequences removed : " + str(numRemoved))
print("Accnos file at: " + accnosFile)
print("Output file at: " + newFilename) | 27.014085 | 130 | 0.665798 |
912a8b0e2ca6ebfee10024254fbdb67fad11443a | 637 | py | Python | sdk/python/pulumi_aws_native/redshift/__init__.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws_native/redshift/__init__.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws_native/redshift/__init__.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .cluster import *
from .cluster_parameter_group import *
from .cluster_security_group import *
from .cluster_security_group_ingress import *
from .cluster_subnet_group import *
from .endpoint_access import *
from .endpoint_authorization import *
from .event_subscription import *
from .scheduled_action import *
from ._inputs import *
from . import outputs
| 31.85 | 80 | 0.77551 |
28f6acc0d24d16e625f14f538a4fa2bc214bda6b | 2,395 | py | Python | applications/experimental/pipelines/pipelines/nodes/other/docs2answers.py | SunYanCN/PaddleNLP | 31deea6c989f399b4552ee711d9f7d62768d645f | [
"Apache-2.0"
] | null | null | null | applications/experimental/pipelines/pipelines/nodes/other/docs2answers.py | SunYanCN/PaddleNLP | 31deea6c989f399b4552ee711d9f7d62768d645f | [
"Apache-2.0"
] | null | null | null | applications/experimental/pipelines/pipelines/nodes/other/docs2answers.py | SunYanCN/PaddleNLP | 31deea6c989f399b4552ee711d9f7d62768d645f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2021 deepset GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from pipelines.schema import Document, Answer, Span
from pipelines.nodes.base import BaseComponent
class Docs2Answers(BaseComponent):
"""
This Node is used to convert retrieved documents into predicted answers format.
It is useful for situations where you are calling a Retriever only pipeline via REST API.
This ensures that your output is in a compatible format.
"""
outgoing_edges = 1
def __init__(self):
self.set_config()
def run(self, query: str, documents: List[Document]): # type: ignore
# conversion from Document -> Answer
answers: List[Answer] = []
for doc in documents:
# For FAQ style QA use cases
if "answer" in doc.meta:
doc.meta[
"query"] = doc.content # question from the existing FAQ
cur_answer = Answer(
answer=doc.meta["answer"],
type="other",
score=doc.score,
context=doc.meta["answer"],
offsets_in_context=[
Span(
start=0, end=len(doc.meta["answer"]))
],
document_id=doc.id,
meta=doc.meta, )
else:
# Regular docs
cur_answer = Answer(
answer="",
type="other",
score=doc.score,
context=doc.content,
document_id=doc.id,
meta=doc.meta, )
answers.append(cur_answer)
output = {"query": query, "answers": answers}
return output, "output_1"
| 35.746269 | 93 | 0.582463 |
d5b490b242457540e49c41ea2a789a039cf53e6b | 2,402 | py | Python | cirq-core/cirq/testing/consistent_pauli_expansion_test.py | BearerPipelineTest/Cirq | e00767a2ef1233e82e9089cf3801a77e4cc3aea3 | [
"Apache-2.0"
] | 1 | 2022-02-05T22:17:39.000Z | 2022-02-05T22:17:39.000Z | cirq-core/cirq/testing/consistent_pauli_expansion_test.py | BearerPipelineTest/Cirq | e00767a2ef1233e82e9089cf3801a77e4cc3aea3 | [
"Apache-2.0"
] | 4 | 2022-01-16T14:12:15.000Z | 2022-02-24T03:58:46.000Z | cirq-core/cirq/testing/consistent_pauli_expansion_test.py | Nexuscompute/Cirq | 640ef8f82d6a56ec95361388ce7976e096cca906 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import cirq
X = np.array([[0, 1], [1, 0]])
Y = np.array([[0, -1j], [1j, 0]])
Z = np.diag([1, -1])
class GoodGateExplicitPauliExpansion(cirq.testing.SingleQubitGate):
def _unitary_(self) -> np.ndarray:
return np.sqrt(1 / 2) * X + np.sqrt(1 / 3) * Y + np.sqrt(1 / 6) * Z
def _pauli_expansion_(self) -> cirq.LinearDict[str]:
return cirq.LinearDict({'X': np.sqrt(1 / 2), 'Y': np.sqrt(1 / 3), 'Z': np.sqrt(1 / 6)})
class GoodGateNoPauliExpansion(cirq.Gate):
def num_qubits(self) -> int:
return 4
class GoodGateNoUnitary(cirq.testing.SingleQubitGate):
def _pauli_expansion_(self) -> cirq.LinearDict[str]:
return cirq.LinearDict({'X': np.sqrt(1 / 2), 'Y': np.sqrt(1 / 2)})
class GoodGateNoPauliExpansionNoUnitary(cirq.testing.SingleQubitGate):
pass
class BadGateInconsistentPauliExpansion(cirq.testing.SingleQubitGate):
def _unitary_(self) -> np.ndarray:
return np.sqrt(1 / 2) * X + np.sqrt(1 / 3) * Y + np.sqrt(1 / 6) * Z
def _pauli_expansion_(self) -> cirq.LinearDict[str]:
return cirq.LinearDict({'X': np.sqrt(1 / 6), 'Y': np.sqrt(1 / 3), 'Z': np.sqrt(1 / 2)})
def test_assert_pauli_expansion_is_consistent_with_unitary():
cirq.testing.assert_pauli_expansion_is_consistent_with_unitary(GoodGateExplicitPauliExpansion())
cirq.testing.assert_pauli_expansion_is_consistent_with_unitary(GoodGateNoPauliExpansion())
cirq.testing.assert_pauli_expansion_is_consistent_with_unitary(GoodGateNoUnitary())
cirq.testing.assert_pauli_expansion_is_consistent_with_unitary(
GoodGateNoPauliExpansionNoUnitary()
)
with pytest.raises(AssertionError):
cirq.testing.assert_pauli_expansion_is_consistent_with_unitary(
BadGateInconsistentPauliExpansion()
)
| 35.850746 | 100 | 0.715654 |
9536957ecc1f0990019f1aed83fad074631c2dd5 | 26,218 | py | Python | Tests/ML/visualizers/test_plot_cross_validation.py | albernsurya/InnerEye-DeepLearning | 62ed6aace84c451a20c4e546f88987454c1bf4bd | [
"MIT"
] | 1 | 2021-07-03T14:05:17.000Z | 2021-07-03T14:05:17.000Z | Tests/ML/visualizers/test_plot_cross_validation.py | albernsrya/InnerEye-DeepLearning | 420fb1d452d7834d2c0a79c7bdc711ec16509680 | [
"MIT"
] | null | null | null | Tests/ML/visualizers/test_plot_cross_validation.py | albernsrya/InnerEye-DeepLearning | 420fb1d452d7834d2c0a79c7bdc711ec16509680 | [
"MIT"
] | null | null | null | # ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
from pathlib import Path
import shutil
from typing import Callable, Dict, List, Optional, Set, Tuple
import pandas as pd
import pytest
from azureml.core import Run
from pandas.core.dtypes.common import is_string_dtype
from InnerEye.Azure.azure_util import CROSS_VALIDATION_SPLIT_INDEX_TAG_KEY
from InnerEye.Common.common_util import CROSSVAL_RESULTS_FOLDER, FULL_METRICS_DATAFRAME_FILE, \
METRICS_AGGREGATES_FILE, SUBJECT_METRICS_FILE_NAME, logging_to_stdout
from InnerEye.Common.fixed_paths import DEFAULT_AML_UPLOAD_DIR
from InnerEye.Common.fixed_paths_for_tests import full_ml_test_data_path
from InnerEye.Common.metrics_constants import LoggingColumns
from InnerEye.Common.output_directories import OutputFolderForTests
from InnerEye.ML.common import DATASET_CSV_FILE_NAME, ModelExecutionMode
from InnerEye.ML.deep_learning_config import ModelCategory
from InnerEye.ML.run_ml import MLRunner
from InnerEye.ML.utils.csv_util import CSV_INSTITUTION_HEADER, CSV_SERIES_HEADER
from InnerEye.ML.visualizers.plot_cross_validation import COL_MODE, \
METRICS_BY_MODE_AND_STRUCTURE_FILE, METRICS_BY_MODE_FILE, \
OfflineCrossvalConfigAndFiles, PlotCrossValidationConfig, RUN_RECOVERY_ID_KEY, \
RunResultFiles, add_comparison_data, check_result_file_counts, \
create_results_breakdown, download_crossval_result_files, get_split_id, load_dataframes, \
plot_cross_validation_from_files, save_outliers
from Tests.AfterTraining.test_after_training import get_most_recent_run_id
from Tests.ML.models.architectures.sequential.test_rnn_classifier import ToyMultiLabelSequenceModel, \
_get_multi_label_sequence_dataframe
from Tests.ML.util import assert_text_files_match, get_default_azure_config
@pytest.fixture
def test_config() -> PlotCrossValidationConfig:
return PlotCrossValidationConfig(
run_recovery_id=get_most_recent_run_id(),
epoch=1,
model_category=ModelCategory.Segmentation
)
@pytest.fixture
def test_config_comparison() -> PlotCrossValidationConfig:
return PlotCrossValidationConfig(
run_recovery_id=get_most_recent_run_id() + "_0",
epoch=1,
comparison_run_recovery_ids=[get_most_recent_run_id() + "_1"],
model_category=ModelCategory.Segmentation
)
def _get_metrics_df(run_recovery_id: str, mode: ModelExecutionMode) -> pd.DataFrame:
metrics_df = pd.read_csv(full_ml_test_data_path("{}_agg_splits.csv".format(mode.value)))
# noinspection PyUnresolvedReferences
metrics_df.split = [run_recovery_id + "_" + index for index in metrics_df.split.astype(str)]
return metrics_df.sort_values(list(metrics_df.columns), ascending=True).reset_index(drop=True)
def download_metrics(config: PlotCrossValidationConfig) -> \
Tuple[Dict[ModelExecutionMode, Optional[pd.DataFrame]], Path]:
result_files, root_folder = download_crossval_result_files(config)
dataframes = load_dataframes(result_files, config)
return dataframes, root_folder
def create_run_result_file_list(config: PlotCrossValidationConfig, folder: str) -> List[RunResultFiles]:
"""
Creates a list of input files for cross validation analysis, from files stored inside of the test data folder.
:param config: The overall cross validation config
:param folder: The folder to read from, inside of test_data/plot_cross_validation.
:return:
"""
full_folder = full_ml_test_data_path("plot_cross_validation") / folder
files: List[RunResultFiles] = []
previous_dataset_file = None
for split in ["0", "1"]:
for mode in config.execution_modes_to_download():
metrics_file = full_folder / split / mode.value / SUBJECT_METRICS_FILE_NAME
dataset_file: Optional[Path] = full_folder / split / DATASET_CSV_FILE_NAME
if dataset_file.exists(): # type: ignore
# Reduce amount of checked-in large files. dataset files can be large, and usually duplicate across
# runs. Store only a copy in split 0, re-use in split 1.
previous_dataset_file = dataset_file
else:
dataset_file = previous_dataset_file
if metrics_file.exists():
file = RunResultFiles(execution_mode=mode,
metrics_file=metrics_file,
dataset_csv_file=dataset_file,
run_recovery_id=config.run_recovery_id + "_" + split, # type: ignore
split_index=split)
files.append(file)
return files
def create_file_list_for_segmentation_recovery_run(test_config_ensemble: PlotCrossValidationConfig) -> \
List[RunResultFiles]:
return create_run_result_file_list(config=test_config_ensemble,
folder="main_1570466706163110")
def copy_run_result_files(files: List[RunResultFiles], src_prefix_path: Path,
dst_prefix_path: Path, transformer: Callable) -> List[RunResultFiles]:
"""
Copy dataset_csv_files from a list of RunResultFiles to a working directory, and then
transform them using a callback.
:param files: List of RunResultFiles to copy.
:param src_prefix_path: Shared prefix path for the dataset_csv_files to be removed.
:param dst_prefix_path: Shared prefix path to use for the copied dataset_csv_files.
:param transformer: Callback function to apply to the copied dataset_csv_files.
:return: New list of RunResultFiles pointing at the copied files.
"""
file_copies = []
files_copied = []
for file in files:
if not file.dataset_csv_file:
dataset_csv_file: Optional[Path] = None
else:
# Replace prefix path
dst_dataset_csv_file = dst_prefix_path / file.dataset_csv_file.relative_to(src_prefix_path)
if dst_dataset_csv_file not in files_copied:
dst_dataset_csv_file.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(file.dataset_csv_file, dst_dataset_csv_file)
files_copied.append(dst_dataset_csv_file)
transformer(dst_dataset_csv_file)
dataset_csv_file = dst_dataset_csv_file
file_copy = RunResultFiles(execution_mode=file.execution_mode,
metrics_file=file.metrics_file,
dataset_csv_file=dataset_csv_file,
run_recovery_id=file.run_recovery_id,
split_index=file.split_index)
file_copies.append(file_copy)
return file_copies
@pytest.mark.after_training_ensemble_run
@pytest.mark.parametrize("drop_column", [None, CSV_INSTITUTION_HEADER, CSV_SERIES_HEADER])
def test_metrics_preparation_for_segmentation(drop_column: Optional[str],
test_config: PlotCrossValidationConfig,
test_output_dirs: OutputFolderForTests) -> None:
"""
Test if metrics dataframes can be loaded and prepared. The files in question are checked in, but
were downloaded from a run, ID given in DEFAULT_ENSEMBLE_RUN_RECOVERY_ID.
Additionally test that CSV_INSTITUTION_HEADER or CSV_SERIES_HEADER can be dropped from the dataset_csv_file.
"""
files = create_file_list_for_segmentation_recovery_run(test_config)
if drop_column:
def drop_csv_column(path: Path) -> None:
"""
Load a csv file, drop a column, and save the csv file.
:param path: Path to csv file.
"""
df = pd.read_csv(path)
dropped_df = df.drop(drop_column, axis=1)
dropped_df.to_csv(path)
files = copy_run_result_files(files, full_ml_test_data_path(), test_output_dirs.root_dir, drop_csv_column)
downloaded_metrics = load_dataframes(files, test_config)
assert test_config.run_recovery_id
for mode in test_config.execution_modes_to_download():
expected_df = _get_metrics_df(test_config.run_recovery_id, mode)
if drop_column:
# If dropped a column from dataset_csv_file, remove it from expected dataframe.
expected_df[drop_column] = ''
# Drop the "mode" column, because that was added after creating the test data
metrics = downloaded_metrics[mode]
assert metrics is not None
actual_df = metrics.drop(COL_MODE, axis=1)
actual_df = actual_df.sort_values(list(actual_df.columns), ascending=True).reset_index(drop=True)
pd.testing.assert_frame_equal(expected_df, actual_df, check_like=True, check_dtype=False)
def load_result_files_for_classification() -> \
Tuple[List[RunResultFiles], PlotCrossValidationConfig]:
plotting_config = PlotCrossValidationConfig(
run_recovery_id="local_branch:HD_cfff5ceb-a227-41d6-a23c-0ebbc33b6301",
epoch=3,
model_category=ModelCategory.Classification
)
files = create_run_result_file_list(config=plotting_config,
folder="HD_cfff5ceb-a227-41d6-a23c-0ebbc33b6301")
return files, plotting_config
def test_metrics_preparation_for_classification() -> None:
"""
Test if metrics from classification models can be loaded and prepared. The files in question are checked in,
and were downloaded from a run on AzureML.
"""
files, plotting_config = load_result_files_for_classification()
downloaded_metrics = load_dataframes(files, plotting_config)
assert ModelExecutionMode.TEST not in downloaded_metrics
metrics = downloaded_metrics[ModelExecutionMode.VAL]
assert metrics is not None
expected_metrics_file = "metrics_preparation_for_classification_VAL.csv"
expected_df_csv = full_ml_test_data_path("plot_cross_validation") / expected_metrics_file
metrics = metrics.sort_values(list(metrics.columns), ascending=True).reset_index(drop=True)
# To write new test results:
# metrics.to_csv(expected_df_csv, index=False)
expected_df = pd.read_csv(expected_df_csv).sort_values(list(metrics.columns), ascending=True).reset_index(drop=True)
pd.testing.assert_frame_equal(expected_df, metrics, check_like=True, check_dtype=False)
def _test_result_aggregation_for_classification(files: List[RunResultFiles],
plotting_config: PlotCrossValidationConfig,
expected_aggregate_metrics: List[str],
expected_epochs: Set[int]) -> None:
"""
Test how metrics are aggregated for cross validation runs on classification models.
"""
print(f"Writing aggregated metrics to {plotting_config.outputs_directory}")
root_folder = plotting_config.outputs_directory
plot_cross_validation_from_files(OfflineCrossvalConfigAndFiles(config=plotting_config, files=files),
root_folder=root_folder)
aggregates_file = root_folder / METRICS_AGGREGATES_FILE
actual_aggregates = aggregates_file.read_text().splitlines()
header_line = "prediction_target,area_under_roc_curve,area_under_pr_curve,accuracy_at_optimal_threshold," \
"false_positive_rate_at_optimal_threshold,false_negative_rate_at_optimal_threshold," \
"optimal_threshold,cross_entropy,accuracy_at_threshold_05,subject_count,data_split,epoch"
expected_aggregate_metrics = [header_line] + expected_aggregate_metrics
assert len(actual_aggregates) == len(expected_aggregate_metrics), "Number of lines in aggregated metrics file"
for i, (actual, expected) in enumerate(zip(actual_aggregates, expected_aggregate_metrics)):
assert actual == expected, f"Mismatch in aggregate metrics at index {i}"
per_subject_metrics = pd.read_csv(root_folder / FULL_METRICS_DATAFRAME_FILE)
assert LoggingColumns.Label.value in per_subject_metrics
assert set(per_subject_metrics[LoggingColumns.Label.value].unique()) == {0.0, 1.0}
assert LoggingColumns.ModelOutput.value in per_subject_metrics
assert LoggingColumns.Patient.value in per_subject_metrics
assert len(per_subject_metrics[LoggingColumns.Patient.value].unique()) == 356
assert LoggingColumns.Epoch.value in per_subject_metrics
assert set(per_subject_metrics[LoggingColumns.Epoch.value].unique()) == expected_epochs
assert LoggingColumns.CrossValidationSplitIndex.value in per_subject_metrics
assert set(per_subject_metrics[LoggingColumns.CrossValidationSplitIndex.value].unique()) == {0, 1}
assert LoggingColumns.DataSplit.value in per_subject_metrics
assert per_subject_metrics[LoggingColumns.DataSplit.value].unique() == ["Val"]
def test_result_aggregation_for_classification(test_output_dirs: OutputFolderForTests) -> None:
"""
Test how metrics are aggregated for cross validation runs on classification models.
"""
files, plotting_config = load_result_files_for_classification()
plotting_config.outputs_directory = test_output_dirs.root_dir
plotting_config.epoch = 3
expected_aggregates = ["Default,0.75740,0.91814,0.66854,0.23684,0.35357,0.44438,0.73170,0.33427,356.00000,Val,3"]
_test_result_aggregation_for_classification(files, plotting_config,
expected_aggregate_metrics=expected_aggregates,
expected_epochs={plotting_config.epoch})
dataset_csv = plotting_config.outputs_directory / DATASET_CSV_FILE_NAME
assert dataset_csv.exists()
def test_invalid_number_of_cv_files() -> None:
"""
Test that an error is raised if the expected number of cross validation fold
is not equal to the number of results files provided.
"""
files, plotting_config = load_result_files_for_classification()
plotting_config.number_of_cross_validation_splits = 4
print(f"Writing aggregated metrics to {plotting_config.outputs_directory}")
with pytest.raises(ValueError):
plot_cross_validation_from_files(OfflineCrossvalConfigAndFiles(config=plotting_config, files=files),
root_folder=plotting_config.outputs_directory)
def test_check_result_file_counts() -> None:
"""
More tests on the function that checks the number of files of each ModeExecutionMode.
"""
val_files, plotting_config = load_result_files_for_classification()
# This test assumes that the loaded val_files all have mode Val
assert all(file.execution_mode == ModelExecutionMode.VAL for file in val_files)
plotting_config.number_of_cross_validation_splits = len(val_files)
# Check that when just the Val files are present, the check does not throw
config_and_files1 = OfflineCrossvalConfigAndFiles(config=plotting_config, files=val_files)
check_result_file_counts(config_and_files1)
# Check that when we add the same number of Test files, the check does not throw
test_files = [RunResultFiles(execution_mode=ModelExecutionMode.TEST,
metrics_file=file.metrics_file,
dataset_csv_file=file.dataset_csv_file,
run_recovery_id=file.run_recovery_id,
split_index=file.split_index) for file in val_files]
config_and_files2 = OfflineCrossvalConfigAndFiles(config=plotting_config, files=val_files + test_files)
check_result_file_counts(config_and_files2)
# Check that when we have the same number of files as the number of splits, but they are from a mixture
# of modes, the check does throw
config_and_files3 = OfflineCrossvalConfigAndFiles(config=plotting_config, files=val_files[:1] + test_files[1:])
with pytest.raises(ValueError):
check_result_file_counts(config_and_files3)
def test_result_aggregation_for_classification_all_epochs(test_output_dirs: OutputFolderForTests) -> None:
"""
Test how metrics are aggregated for classification models, when no epoch is specified.
"""
files, plotting_config = load_result_files_for_classification()
plotting_config.outputs_directory = test_output_dirs.root_dir
plotting_config.epoch = None
expected_aggregates = \
["Default,0.72361,0.90943,0.55618,0.13158,0.52500,0.33307,0.95800,0.21348,356.00000,Val,1",
"Default,0.75919,0.91962,0.65169,0.19737,0.38571,0.38873,0.82669,0.21348,356.00000,Val,2",
"Default,0.75740,0.91814,0.66854,0.23684,0.35357,0.44438,0.73170,0.33427,356.00000,Val,3"]
_test_result_aggregation_for_classification(files, plotting_config,
expected_aggregate_metrics=expected_aggregates,
expected_epochs={1, 2, 3})
@pytest.mark.after_training_ensemble_run
def test_add_comparison_data(test_config_comparison: PlotCrossValidationConfig) -> None:
test_config_comparison.epoch = 2
metrics_df, root_folder = download_metrics(test_config_comparison)
initial_metrics = pd.concat(list(metrics_df.values()))
all_metrics, focus_splits = add_comparison_data(test_config_comparison, initial_metrics)
focus_split = test_config_comparison.run_recovery_id
comparison_split = test_config_comparison.comparison_run_recovery_ids[0]
assert focus_splits == [focus_split]
assert set(all_metrics.split) == {focus_split, comparison_split}
@pytest.mark.after_training_ensemble_run
def test_save_outliers(test_config: PlotCrossValidationConfig,
test_output_dirs: OutputFolderForTests) -> None:
"""Test to make sure the outlier file for a split is as expected"""
test_config.outputs_directory = test_output_dirs.root_dir
test_config.outlier_range = 0
assert test_config.run_recovery_id
dataset_split_metrics = {x: _get_metrics_df(test_config.run_recovery_id, x) for x in [ModelExecutionMode.VAL]}
outliers_paths = save_outliers(test_config, dataset_split_metrics, test_config.outputs_directory)
filename = f"{ModelExecutionMode.VAL.value}_outliers.txt"
assert_text_files_match(full_file=outliers_paths[ModelExecutionMode.VAL], expected_file=full_ml_test_data_path(filename))
# Now test without the CSV_INSTITUTION_HEADER and CSV_SERIES_HEADER columns, which will be missing in institutions' environments
dataset_split_metrics_pruned = {
x: _get_metrics_df(test_config.run_recovery_id, x).drop(columns=[CSV_INSTITUTION_HEADER, CSV_SERIES_HEADER], errors="ignore")
for x in [ModelExecutionMode.VAL]}
outliers_paths = save_outliers(test_config, dataset_split_metrics_pruned, test_config.outputs_directory)
test_data_filename = f"{ModelExecutionMode.VAL.value}_outliers_pruned.txt"
assert_text_files_match(full_file=outliers_paths[ModelExecutionMode.VAL], expected_file=full_ml_test_data_path(test_data_filename))
def test_create_summary(test_output_dirs: OutputFolderForTests) -> None:
"""
Test that summaries of CV performance per mode, and per mode per structure, look like they should.
"""
root = test_output_dirs.root_dir
test_file = full_ml_test_data_path("MetricsAcrossAllRuns.csv")
df = pd.read_csv(test_file)
file1, file2 = create_results_breakdown(df, root)
expected1 = full_ml_test_data_path(METRICS_BY_MODE_AND_STRUCTURE_FILE)
expected2 = full_ml_test_data_path(METRICS_BY_MODE_FILE)
assert file1.read_text() == expected1.read_text()
assert file2.read_text() == expected2.read_text()
def test_plot_config() -> None:
"""
Test that plotting configurations have the correct error handling.
"""
with pytest.raises(ValueError):
PlotCrossValidationConfig()
PlotCrossValidationConfig(run_recovery_id="foo", epoch=1)
def test_get_split_index() -> None:
"""
Test that get_split_index returns the full run ID only when the
split index itself is negative.
"""
tags = {CROSS_VALIDATION_SPLIT_INDEX_TAG_KEY: "-1",
RUN_RECOVERY_ID_KEY: "foo_bar_23"}
assert get_split_id(tags) == "foo_bar_23"
tags = {CROSS_VALIDATION_SPLIT_INDEX_TAG_KEY: "42",
RUN_RECOVERY_ID_KEY: "foo_bar_23"}
assert get_split_id(tags) == "42"
@pytest.mark.after_training_single_run
@pytest.mark.parametrize("is_current_run", [True, False])
def test_download_or_get_local_blobs(is_current_run: bool,
test_config: PlotCrossValidationConfig,
test_output_dirs: OutputFolderForTests) -> None:
azure_config = get_default_azure_config()
azure_config.get_workspace()
assert test_config.run_recovery_id is not None
run = Run.get_context() if is_current_run else azure_config.fetch_run(test_config.run_recovery_id)
run_outputs_dir = full_ml_test_data_path() if is_current_run else Path(DEFAULT_AML_UPLOAD_DIR)
test_config.outputs_directory = run_outputs_dir
dst = test_config.download_or_get_local_file(
blob_to_download="dataset.csv",
destination=test_output_dirs.root_dir,
run=run
)
assert dst is not None
assert dst.exists()
def test_download_or_get_local_file_2(test_output_dirs: OutputFolderForTests) -> None:
config = PlotCrossValidationConfig(run_recovery_id=None,
model_category=ModelCategory.Classification,
epoch=None,
should_validate=False)
download_to_folder = test_output_dirs.root_dir / CROSSVAL_RESULTS_FOLDER
config.outputs_directory = download_to_folder
local_results = full_ml_test_data_path("plot_cross_validation") / "HD_cfff5ceb-a227-41d6-a23c-0ebbc33b6301"
config.local_run_results = str(local_results)
# A file that sits in the root folder of the local_results should be downloaded into the
# root of the download_to folder
file1 = "dummy.txt"
file_in_folder = config.download_or_get_local_file(None,
file1,
download_to_folder)
assert file_in_folder is not None
assert file_in_folder == download_to_folder / file1
# Copying a file in a sub-folder of the local_results: The full path to the file should be
# preserved and created in the download_to folder.
file2 = Path("0") / "Val" / "metrics.csv"
file_in_folder = config.download_or_get_local_file(None,
file2,
download_to_folder)
assert file_in_folder is not None
assert file_in_folder == download_to_folder / file2
@pytest.mark.skip(reason="This test is only used to create input for test_load_files_with_prediction_target")
def test_run_ml_with_multi_label_sequence_in_crossval(test_output_dirs: OutputFolderForTests) -> None:
"""
Test training and testing of sequence models that predicts at multiple time points,
including aggregation of cross validation results.
"""
logging_to_stdout()
config = ToyMultiLabelSequenceModel(should_validate=False)
assert config.get_target_indices() == [1, 2, 3]
expected_prediction_targets = ["Seq_pos 01", "Seq_pos 02", "Seq_pos 03"]
target_indices = config.get_target_indices()
assert target_indices
assert len(target_indices) == len(expected_prediction_targets)
config.set_output_to(test_output_dirs.root_dir)
config.dataset_data_frame = _get_multi_label_sequence_dataframe()
config.pre_process_dataset_dataframe()
config.num_epochs = 1
config.number_of_cross_validation_splits = 2
azure_config = get_default_azure_config()
azure_config.train = True
MLRunner(config, azure_config=azure_config).run()
def test_load_files_with_prediction_target() -> None:
"""
For multi-week RNNs that predict at multiple sequence points: Test that the dataframes
including the prediction_target column can be loaded.
"""
folder = "multi_label_sequence_in_crossval"
plotting_config = PlotCrossValidationConfig(
run_recovery_id="foo",
epoch=1,
model_category=ModelCategory.Classification
)
files = create_run_result_file_list(plotting_config, folder)
downloaded_metrics = load_dataframes(files, plotting_config)
assert ModelExecutionMode.TEST not in downloaded_metrics
metrics = downloaded_metrics[ModelExecutionMode.VAL]
assert metrics is not None
assert LoggingColumns.Hue.value in metrics
# The prediction target column should always be read as a string, because we will later use it to create
# hue values for a MetricsDict.
assert is_string_dtype(metrics[LoggingColumns.Hue.value].dtype)
assert LoggingColumns.Epoch.value in metrics
assert LoggingColumns.Patient.value in metrics
assert len(metrics[LoggingColumns.Hue.value].unique()) == 3
# Each of the two CV folds has 2 distinct subjects
assert len(metrics[LoggingColumns.Patient.value].unique()) == 4
def test_aggregate_files_with_prediction_target(test_output_dirs: OutputFolderForTests) -> None:
"""
For multi-week RNNs that predict at multiple sequence points: Test that the dataframes
including the prediction_target column can be aggregated.
"""
plotting_config = PlotCrossValidationConfig(
run_recovery_id="foo",
epoch=1,
model_category=ModelCategory.Classification
)
files = create_run_result_file_list(plotting_config, "multi_label_sequence_in_crossval")
root_folder = test_output_dirs.root_dir
print(f"Writing result files to {root_folder}")
plot_cross_validation_from_files(OfflineCrossvalConfigAndFiles(config=plotting_config, files=files),
root_folder=root_folder)
| 52.858871 | 135 | 0.723511 |
91935c47b776cba919f1fba89a36002aef21670e | 33,346 | py | Python | tests/pyfilter/operations/test_ipoverrides.py | Hengle/Houdini-Toolbox | a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3 | [
"MIT"
] | 136 | 2015-01-03T04:03:23.000Z | 2022-02-07T11:08:57.000Z | tests/pyfilter/operations/test_ipoverrides.py | Hengle/Houdini-Toolbox | a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3 | [
"MIT"
] | 11 | 2017-02-09T20:05:04.000Z | 2021-01-24T22:25:59.000Z | tests/pyfilter/operations/test_ipoverrides.py | Hengle/Houdini-Toolbox | a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3 | [
"MIT"
] | 26 | 2015-08-18T12:11:02.000Z | 2020-12-19T01:53:31.000Z | """Test the ht.pyfilter.operations.ipoverrides module."""
# =============================================================================
# IMPORTS
# =============================================================================
# Standard Library
import argparse
import copy
# Third Party
import pytest
# Houdini Toolbox
from ht.pyfilter.manager import PyFilterManager
from ht.pyfilter.operations import ipoverrides
# Houdini
import hou
_DEFAULTS = {
"bucket_size": None,
"disable_aovs": False,
"disable_blur": False,
"disable_deep": False,
"disable_displacement": False,
"disable_matte": False,
"disable_subd": False,
"disable_tilecallback": False,
"res_scale": None,
"sample_scale": None,
"transparent_samples": None,
}
# =============================================================================
# FIXTURES
# =============================================================================
@pytest.fixture
def init_operation(mocker):
"""Fixture to initialize an operation."""
mocker.patch.object(ipoverrides.IpOverrides, "__init__", lambda x, y: None)
def _create(
prop_map: dict = None, as_properties: bool = False
) -> ipoverrides.IpOverrides:
"""Function which instantiates the operation.
:param prop_map: Map of property name:values to set.
:param as_properties: Whether or not to set the values as properties
:return: An basic instantiated IpOverrides object.
"""
op = ipoverrides.IpOverrides(None)
if prop_map is None:
prop_map = {}
values = copy.deepcopy(_DEFAULTS)
values.update(prop_map)
for key, value in list(values.items()):
if as_properties:
if isinstance(value, type):
prop = mocker.PropertyMock(spec=value)
else:
prop = mocker.PropertyMock(return_value=value)
setattr(type(op), key, prop)
else:
setattr(op, "_{}".format(key), value)
return op
return _create
@pytest.fixture
def properties(mocker):
"""Fixture to handle mocking (get|set)_property calls."""
_mock_get = mocker.patch("ht.pyfilter.operations.ipoverrides.get_property")
_mock_set = mocker.patch("ht.pyfilter.operations.ipoverrides.set_property")
class Properties:
"""Fake class for accessing and setting properties."""
@property
def mock_get(self):
"""Access get_property."""
return _mock_get
@property
def mock_set(self):
"""Access set_property."""
return _mock_set
return Properties()
# =============================================================================
# TESTS
# =============================================================================
class Test_IpOverrides:
"""Test the ht.pyfilter.operations.ipoverrides.IpOverride class."""
def test___init__(self, mocker):
"""Test object initialization."""
mock_super_init = mocker.patch.object(ipoverrides.PyFilterOperation, "__init__")
mock_manager = mocker.MagicMock(spec=PyFilterManager)
op = ipoverrides.IpOverrides(mock_manager)
mock_super_init.assert_called_with(mock_manager)
assert op._bucket_size is None
assert not op._disable_aovs
assert not op._disable_blur
assert not op._disable_deep
assert not op._disable_displacement
assert not op._disable_matte
assert not op._disable_subd
assert op._res_scale is None
assert op._sample_scale is None
assert op._transparent_samples is None
# Properties
def test_bucket_size(self, init_operation, mocker):
"""Test the 'bucket_size' property."""
mock_value = mocker.MagicMock(spec=int)
op = init_operation({"bucket_size": mock_value})
assert op.bucket_size == mock_value
def test_disable_aovs(self, init_operation, mocker):
"""Test the 'disable_aovs' property."""
mock_value = mocker.MagicMock(spec=bool)
op = init_operation({"disable_aovs": mock_value})
assert op.disable_aovs == mock_value
def test_disable_blur(self, init_operation, mocker):
"""Test the 'disable_blur' property."""
mock_value = mocker.MagicMock(spec=bool)
op = init_operation({"disable_blur": mock_value})
assert op.disable_blur == mock_value
def test_disable_deep(self, init_operation, mocker):
"""Test the 'disable_deep' property."""
mock_value = mocker.MagicMock(spec=bool)
op = init_operation({"disable_deep": mock_value})
assert op.disable_deep == mock_value
def test_disable_displacement(self, init_operation, mocker):
"""Test the 'disable_displacement' property."""
mock_value = mocker.MagicMock(spec=bool)
op = init_operation({"disable_displacement": mock_value})
assert op.disable_displacement == mock_value
def test_disable_matte(self, init_operation, mocker):
"""Test the 'disable_matte' property."""
mock_value = mocker.MagicMock(spec=bool)
op = init_operation({"disable_matte": mock_value})
assert op.disable_matte == mock_value
def test_disable_subd(self, init_operation, mocker):
"""Test the 'disable_subd' property."""
mock_value = mocker.MagicMock(spec=bool)
op = init_operation({"disable_subd": mock_value})
assert op.disable_subd == mock_value
def test_disable_tilecallback(self, init_operation, mocker):
"""Test the 'disable_tilecallback' property."""
mock_value = mocker.MagicMock(spec=bool)
op = init_operation({"disable_tilecallback": mock_value})
assert op.disable_tilecallback == mock_value
def test_res_scale(self, init_operation, mocker):
"""Test the 'res_scale' property."""
mock_value = mocker.MagicMock(spec=float)
op = init_operation({"res_scale": mock_value})
assert op.res_scale == mock_value
def test_sample_scale(self, init_operation, mocker):
"""Test the 'sample_scale' property."""
mock_value = mocker.MagicMock(spec=float)
op = init_operation({"sample_scale": mock_value})
assert op.sample_scale == mock_value
def test_transparent_samples(self, init_operation, mocker):
"""Test the 'transparent_samples' property."""
mock_value = mocker.MagicMock(spec=int)
op = init_operation({"transparent_samples": mock_value})
assert op.transparent_samples == mock_value
# Static Methods
def test_build_arg_string(self):
"""Test arg string construction."""
result = ipoverrides.IpOverrides.build_arg_string()
assert result == ""
# Res scale
result = ipoverrides.IpOverrides.build_arg_string(res_scale=0.5)
assert result == "--ip-res-scale=0.5"
# Sample scale
result = ipoverrides.IpOverrides.build_arg_string(sample_scale=0.5)
assert result == "--ip-sample-scale=0.5"
# Disable blur
result = ipoverrides.IpOverrides.build_arg_string(disable_blur=True)
assert result == "--ip-disable-blur"
# Disable aovs
result = ipoverrides.IpOverrides.build_arg_string(disable_aovs=True)
assert result == "--ip-disable-aovs"
# Disable deeps
result = ipoverrides.IpOverrides.build_arg_string(disable_deep=True)
assert result == "--ip-disable-deep"
# Disable displacements
result = ipoverrides.IpOverrides.build_arg_string(disable_displacement=True)
assert result == "--ip-disable-displacement"
# Disable matte and phantom objects
result = ipoverrides.IpOverrides.build_arg_string(disable_matte=True)
assert result == "--ip-disable-matte"
# Disable subdivision surfaces
result = ipoverrides.IpOverrides.build_arg_string(disable_subd=True)
assert result == "--ip-disable-subd"
# Disable tilecallback
result = ipoverrides.IpOverrides.build_arg_string(disable_tilecallback=True)
assert result == "--ip-disable-tilecallback"
# Set the bucket size
result = ipoverrides.IpOverrides.build_arg_string(bucket_size=16)
assert result == "--ip-bucket-size=16"
# Set the stochastic samples
result = ipoverrides.IpOverrides.build_arg_string(transparent_samples=3)
assert result == "--ip-transparent-samples=3"
def test_register_parser_args(self, mocker):
"""Test registering all the argument parser args."""
mock_parser = mocker.MagicMock(spec=argparse.ArgumentParser)
ipoverrides.IpOverrides.register_parser_args(mock_parser)
calls = [
mocker.call(
"--ip-res-scale", default=None, type=float, dest="ip_res_scale"
),
mocker.call(
"--ip-sample-scale", default=None, type=float, dest="ip_sample_scale"
),
mocker.call(
"--ip-disable-blur", action="store_true", dest="ip_disable_blur"
),
mocker.call(
"--ip-disable-aovs", action="store_true", dest="ip_disable_aovs"
),
mocker.call(
"--ip-disable-deep", action="store_true", dest="ip_disable_deep"
),
mocker.call(
"--ip-disable-displacement",
action="store_true",
dest="ip_disable_displacement",
),
mocker.call(
"--ip-disable-subd", action="store_true", dest="ip_disable_subd"
),
mocker.call(
"--ip-disable-tilecallback",
action="store_true",
dest="ip_disable_tilecallback",
),
mocker.call(
"--ip-disable-matte", action="store_true", dest="ip_disable_matte"
),
mocker.call(
"--ip-bucket-size",
nargs="?",
default=None,
type=int,
action="store",
dest="ip_bucket_size",
),
mocker.call(
"--ip-transparent-samples",
nargs="?",
default=None,
type=int,
action="store",
dest="ip_transparent_samples",
),
]
mock_parser.add_argument.assert_has_calls(calls)
# Methods
# filter_camera
def test_filter_camera__res_scale(
self, patch_operation_logger, init_operation, properties, mocker
):
"""Test 'filter_camera' when scaling the resolution."""
mock_scale = mocker.patch(
"ht.pyfilter.operations.ipoverrides._scale_resolution"
)
op = init_operation({"res_scale": int}, as_properties=True)
op.filter_camera()
properties.mock_get.assert_called_with("image:resolution")
mock_scale.assert_called_with(properties.mock_get.return_value, op.res_scale)
properties.mock_set.assert_called_with(
"image:resolution", mock_scale.return_value
)
def test_filter_camera__sample_scale(
self, init_operation, properties, patch_operation_logger, mocker
):
"""Test 'filter_camera' when scaling the samples."""
mock_scale = mocker.patch("ht.pyfilter.operations.ipoverrides._scale_samples")
op = init_operation({"sample_scale": float}, as_properties=True)
op.filter_camera()
properties.mock_get.assert_called_with("image:samples")
mock_scale.assert_called_with(properties.mock_get.return_value, op.sample_scale)
properties.mock_set.assert_called_with("image:samples", mock_scale.return_value)
def test_filter_camera__bucket_size(
self, init_operation, properties, patch_operation_logger
):
"""Test 'filter_camera' when setting the bucket size."""
op = init_operation({"bucket_size": int}, as_properties=True)
op.filter_camera()
properties.mock_set.assert_called_with("image:bucket", op.bucket_size)
def test_filter_camera__disable_blur(
self, init_operation, properties, patch_operation_logger, mocker
):
"""Test 'filter_camera' when disabling motion blur."""
op = init_operation({"disable_blur": True}, as_properties=True)
op.filter_camera()
properties.mock_set.has_calls(
[
mocker.call("renderer:blurquality", 0),
mocker.call("renderer:rayblurquality", 0),
]
)
def test_filter_camera__disable_deep(
self, init_operation, properties, patch_operation_logger
):
"""Test 'filter_camera' when disabling motion deep images."""
op = init_operation({"disable_deep": True}, as_properties=True)
op.filter_camera()
properties.mock_set.assert_called_with("image:deepresolver", [])
def test_filter_camera__disable_tilecallback(
self, init_operation, properties, patch_operation_logger
):
"""Test 'filter_camera' when disabling the tile callback."""
op = init_operation({"disable_tilecallback": True}, as_properties=True)
op.filter_camera()
properties.mock_set.assert_called_with("render:tilecallback", "")
def test_filter_camera__transparent_samples(
self, init_operation, properties, patch_operation_logger
):
"""Test 'filter_camera' when setting the transparent samples."""
op = init_operation({"transparent_samples": int}, as_properties=True)
op.filter_camera()
properties.mock_set.assert_any_call(
"image:transparentsamples", op.transparent_samples
)
# filter_instance
def test_filter_instance__disable_displacement(
self, init_operation, patch_operation_logger, properties
):
"""Test 'filter_instance' when disabling displacements."""
op = init_operation({"disable_displacement": True}, as_properties=True)
op.filter_instance()
properties.mock_set.assert_called_with("object:displace", [])
def test_filter_instance__disable_subd(
self, init_operation, properties, patch_operation_logger
):
"""Test 'filter_instance' when disabling subd's."""
op = init_operation({"disable_subd": True}, as_properties=True)
op.filter_instance()
properties.mock_set.assert_called_with("object:rendersubd", 0)
def test_filter_instance__disable_matte_matte_object(
self, init_operation, properties, patch_operation_logger
):
"""Test 'filter_instance' when disabling matte on an object which
is rendering as a matte object.
"""
op = init_operation({"disable_matte": True}, as_properties=True)
values = {"object:matte": True, "object:phantom": False}
properties.mock_get.side_effect = values.get
op.filter_instance()
properties.mock_get.assert_called_with("object:matte")
properties.mock_set.assert_called_with("object:renderable", False)
def test_filter_instance__disable_matte_phantom_object(
self, init_operation, properties, patch_operation_logger, mocker
):
"""Test 'filter_instance' when disabling matte on an object which
is rendering as a phantom object.
"""
op = init_operation({"disable_matte": True}, as_properties=True)
values = {"object:matte": False, "object:phantom": True}
properties.mock_get.side_effect = values.get
op.filter_instance()
properties.mock_get.assert_has_calls(
[mocker.call("object:matte"), mocker.call("object:phantom")]
)
properties.mock_set.assert_called_with("object:renderable", False)
def test_filter_instance__disable_matte_surface_matte(
self, init_operation, properties, patch_operation_logger, mocker
):
"""Test 'filter_instance' when disabling matte on an object which
is has a matte shader applied.
"""
op = init_operation({"disable_matte": True}, as_properties=True)
values = {
"object:matte": False,
"object:phantom": False,
"object:surface": "opdef:/Shop/v_matte",
}
properties.mock_get.side_effect = values.get
op.filter_instance()
properties.mock_get.assert_has_calls(
[
mocker.call("object:matte"),
mocker.call("object:phantom"),
mocker.call("object:surface"),
]
)
properties.mock_set.assert_called_with("object:renderable", False)
def test_filter_instance__disable_matte_noop(
self, init_operation, properties, patch_operation_logger, mocker
):
"""Test 'filter_instance' when disabling matte when the object is
not a matte.
"""
op = init_operation({"disable_matte": True}, as_properties=True)
values = {
"object:matte": False,
"object:phantom": False,
"object:surface": "opdef:/Shop/v_thing",
}
properties.mock_get.side_effect = values.get
op.filter_instance()
properties.mock_get.assert_has_calls(
[
mocker.call("object:matte"),
mocker.call("object:phantom"),
mocker.call("object:surface"),
]
)
properties.mock_set.assert_not_called()
# filter_material
def test_filter_material__disable_displacement(
self, init_operation, properties, patch_operation_logger
):
"""Test 'filter_material' when disabling displacement."""
op = init_operation({"disable_displacement": True}, as_properties=True)
op.filter_material()
properties.mock_set.assert_called_with("object:displace", [])
def test_filter_material__no_disable(
self, init_operation, properties, patch_operation_logger
):
"""Test 'filter_material' when not disabling displacement."""
op = init_operation({"disable_displacement": False}, as_properties=True)
op.filter_material()
properties.mock_set.assert_not_called()
# filter_plane
def test_filter_plane__noop(
self, init_operation, properties, patch_operation_logger
):
"""Test 'filter_plane' when not disabling."""
op = init_operation()
op.filter_plane()
properties.mock_get.assert_not_called()
properties.mock_set.assert_not_called()
def test_filter_plane__disable_aovs(
self, init_operation, properties, patch_operation_logger, mocker
):
"""Test 'filter_plane' when disabling a plane which can be disabled."""
properties.mock_get.return_value = mocker.MagicMock(spec=str)
op = init_operation({"disable_aovs": True}, as_properties=True)
op.filter_plane()
properties.mock_set.assert_called_with("plane:disable", 1)
def test_filter_plane__disable_aovs_cf(
self, init_operation, properties, patch_operation_logger
):
"""Test 'filter_plane' when disabling just a 'Cf' plane."""
properties.mock_get.return_value = "Cf"
op = init_operation({"disable_aovs": True}, as_properties=True)
op.filter_plane()
properties.mock_set.assert_called_with("plane:disable", 1)
def test_filter_plane__disable_aovs_cf_af(
self, init_operation, properties, patch_operation_logger
):
"""Test 'filter_plane' when disabling just a 'Cf+Af' plane."""
properties.mock_get.return_value = "Cf+Af"
op = init_operation({"disable_aovs": True}, as_properties=True)
op.filter_plane()
properties.mock_set.assert_not_called()
# process_parsed_args
def test_process_parsed_args(self, init_operation):
"""Test processing parsed args when all args are set."""
namespace = argparse.Namespace()
namespace.ip_res_scale = 0.5
namespace.ip_sample_scale = 0.75
namespace.ip_disable_aovs = True
namespace.ip_disable_blur = True
namespace.ip_disable_deep = True
namespace.ip_disable_displacement = True
namespace.ip_disable_matte = True
namespace.ip_disable_subd = True
namespace.ip_bucket_size = 16
namespace.ip_transparent_samples = 3
namespace.ip_disable_tilecallback = True
op = init_operation()
op.process_parsed_args(namespace)
assert op._res_scale == 0.5
assert op._sample_scale == 0.75
assert op._disable_aovs
assert op._disable_blur
assert op._disable_deep
assert op._disable_displacement
assert op._disable_matte
assert op._disable_subd
assert op._disable_tilecallback
assert op._bucket_size == 16
assert op._transparent_samples == 3
def test_process_parsed_args__noop(self, init_operation):
"""Test processing parsed args when no args are set."""
namespace = argparse.Namespace()
namespace.ip_res_scale = None
namespace.ip_sample_scale = None
namespace.ip_bucket_size = None
namespace.ip_transparent_samples = None
namespace.ip_disable_aovs = False
namespace.ip_disable_blur = False
namespace.ip_disable_deep = False
namespace.ip_disable_displacement = False
namespace.ip_disable_matte = False
namespace.ip_disable_subd = False
namespace.ip_disable_tilecallback = False
op = init_operation()
op._res_scale = 0.5
op._sample_scale = 0.75
op._bucket_size = 16
op._transparent_samples = 3
op._disable_aovs = False
op._disable_blur = False
op._disable_deep = False
op._disable_displacement = False
op._disable_matte = False
op._disable_subd = False
op._disable_tilecallback = False
op.process_parsed_args(namespace)
assert op._res_scale == 0.5
assert op._sample_scale == 0.75
assert op._bucket_size == 16
assert op._transparent_samples == 3
# should_run
def test_should_run(self, init_operation, properties):
"""Test whether or not the operation should run."""
op = init_operation({"res_scale": float}, as_properties=True)
# Not ip, so it can't run.
assert not op.should_run()
# Need to mimic rendering to ip.
properties.mock_get.return_value = "ip"
op = init_operation(as_properties=True)
# Can't run if ip but nothing else is set.
assert not op.should_run()
# Values to check
data = dict(
bucket_size=int,
disable_aovs=True,
disable_blur=True,
disable_deep=True,
disable_displacement=True,
disable_matte=True,
disable_subd=True,
disable_tilecallback=True,
res_scale=float,
sample_scale=float,
transparent_samples=int,
)
# Create an operation with each property set and ip set.
for key, value in list(data.items()):
op = init_operation({key: value}, as_properties=True)
assert op.should_run()
@pytest.mark.parametrize(
"resolution,scale,expected",
[
((1920, 1080), 1.0, [1920, 1080]),
((1920, 1080), 0.5, [960, 540]),
((1920, 1080), 0.333, [639, 360]),
],
)
def test__scale_resolution(resolution, scale, expected):
"""Test the ht.pyfilter.operations.ipoverrides._scale_resolution."""
assert ipoverrides._scale_resolution(resolution, scale) == expected
@pytest.mark.parametrize(
"samples,scale,expected",
[((10, 10), 1.0, [10, 10]), ((10, 10), 0.5, [5, 5]), ((10, 10), 0.333, [4, 4])],
)
def test__scale_sample_value(samples, scale, expected):
"""Test the ht.pyfilter.operations.ipoverrides._scale_sample_value."""
assert ipoverrides._scale_samples(samples, scale) == expected
class Test_build_arg_string_from_node:
"""Test the ht.pyfilter.operations.ipoverrides.build_arg_string_from_node."""
def test(self, mocker):
"""Test with scaling."""
mock_build = mocker.patch(
"ht.pyfilter.operations.ipoverrides.IpOverrides.build_arg_string"
)
mock_node = mocker.MagicMock()
mock_node.evalParm.return_value = 0
assert ipoverrides.build_arg_string_from_node(mock_node) == ""
parm_data = {
"enable_ip_override": 1,
"ip_override_camerares": 1,
"ip_res_fraction": 0.5,
"ip_transparent": 1,
"ip_transparent_samples": 3,
"ip_sample_scale": 0.5,
"ip_disable_blur": 1,
"ip_disable_aovs": 1,
"ip_disable_deep": 1,
"ip_disable_displacement": 1,
"ip_disable_matte": 1,
"ip_disable_subd": 1,
"ip_disable_tilecallback": 1,
"ip_bucket_size": 16,
}
mock_node.evalParm.side_effect = lambda name: parm_data[name]
assert (
ipoverrides.build_arg_string_from_node(mock_node) == mock_build.return_value
)
mock_build.assert_called_with(
res_scale=parm_data["ip_res_fraction"],
sample_scale=parm_data["ip_sample_scale"],
disable_blur=parm_data["ip_disable_blur"],
disable_aovs=parm_data["ip_disable_aovs"],
disable_deep=parm_data["ip_disable_deep"],
disable_displacement=parm_data["ip_disable_displacement"],
disable_matte=parm_data["ip_disable_matte"],
disable_subd=parm_data["ip_disable_subd"],
disable_tilecallback=parm_data["ip_disable_tilecallback"],
bucket_size=parm_data["ip_bucket_size"],
transparent_samples=parm_data["ip_transparent_samples"],
)
def test_no_scales(self, mocker):
"""Test with no scaling."""
mock_build = mocker.patch(
"ht.pyfilter.operations.ipoverrides.IpOverrides.build_arg_string"
)
mock_node = mocker.MagicMock()
mock_node.evalParm.return_value = 0
assert ipoverrides.build_arg_string_from_node(mock_node) == ""
parm_data = {
"enable_ip_override": 1,
"ip_override_camerares": 0,
"ip_transparent": 0,
"ip_sample_scale": 0.5,
"ip_disable_blur": 1,
"ip_disable_aovs": 1,
"ip_disable_deep": 1,
"ip_disable_displacement": 1,
"ip_disable_matte": 1,
"ip_disable_subd": 1,
"ip_disable_tilecallback": 1,
"ip_bucket_size": 16,
}
mock_node.evalParm.side_effect = lambda name: parm_data[name]
assert (
ipoverrides.build_arg_string_from_node(mock_node) == mock_build.return_value
)
mock_build.assert_called_with(
res_scale=None,
sample_scale=parm_data["ip_sample_scale"],
disable_blur=parm_data["ip_disable_blur"],
disable_aovs=parm_data["ip_disable_aovs"],
disable_deep=parm_data["ip_disable_deep"],
disable_displacement=parm_data["ip_disable_displacement"],
disable_subd=parm_data["ip_disable_subd"],
disable_tilecallback=parm_data["ip_disable_tilecallback"],
bucket_size=parm_data["ip_bucket_size"],
transparent_samples=None,
disable_matte=parm_data["ip_disable_matte"],
)
def test_build_pixel_sample_scale_display(mocker):
"""Test the ht.pyfilter.operations.ipoverrides.build_pixel_sample_scale_display."""
mock_scale = mocker.patch("ht.pyfilter.operations.ipoverrides._scale_samples")
source_samples = (6, 6)
target_samples = (3, 3)
scale = 0.5
mock_node = mocker.MagicMock()
mock_node.evalParmTuple.return_value = source_samples
mock_node.evalParm.return_value = scale
mock_scale.return_value = target_samples
result = ipoverrides.build_pixel_sample_scale_display(mock_node)
mock_node.evalParmTuple.assert_called_with("vm_samples")
mock_node.evalParm.assert_called_with("ip_sample_scale")
mock_scale.assert_called_with(source_samples, scale)
assert result == "{}x{}".format(target_samples[0], target_samples[1])
class Test_build_resolution_scale_display:
"""Test the ht.pyfilter.operations.ipoverrides.build_resolution_scale_display."""
def test_no_camera(self, mocker):
"""Test when there is no target camera."""
mock_node = mocker.MagicMock(spec=hou.RopNode)
mock_node.parm.return_value.evalAsNode.return_value = None
result = ipoverrides.build_resolution_scale_display(mock_node)
assert result == ""
mock_node.parm.assert_called_with("camera")
def test_no_override(self, mocker):
"""Test when there is no override being applied on the Mantra ROP."""
mock_scale = mocker.patch(
"ht.pyfilter.operations.ipoverrides._scale_resolution"
)
mock_scale.return_value = (960, 540)
mock_camera = mocker.MagicMock(spec=hou.ObjNode)
mock_camera.evalParmTuple.return_value = (1920, 1080)
parm_values = {"override_camerares": False, "ip_res_fraction": "0.5"}
mock_node = mocker.MagicMock(spec=hou.RopNode)
mock_node.parm.return_value.evalAsNode.return_value = mock_camera
mock_node.evalParm.side_effect = lambda name: parm_values[name]
result = ipoverrides.build_resolution_scale_display(mock_node)
assert result == "960x540"
mock_node.parm.assert_called_with("camera")
mock_scale.assert_called_with((1920, 1080), 0.5)
def test_override_specific(self, mocker):
"""Test when there is a specific resolution override being applied on the Mantra ROP."""
mock_scale = mocker.patch(
"ht.pyfilter.operations.ipoverrides._scale_resolution"
)
mock_scale.return_value = (250, 250)
mock_camera = mocker.MagicMock(spec=hou.ObjNode)
mock_camera.evalParmTuple.return_value = (1920, 1080)
parm_values = {
"override_camerares": 1,
"ip_res_fraction": "0.25",
"res_fraction": "specific",
}
mock_node = mocker.MagicMock(spec=hou.RopNode)
mock_node.parm.return_value.evalAsNode.return_value = mock_camera
mock_node.evalParmTuple.return_value = (1000, 1000)
mock_node.evalParm.side_effect = lambda name: parm_values[name]
result = ipoverrides.build_resolution_scale_display(mock_node)
assert result == "250x250"
mock_node.parm.assert_called_with("camera")
mock_scale.assert_called_with((1000, 1000), 0.25)
def test_override_scaled(self, mocker):
"""Test when there is a resolution scale override being applied on the Mantra ROP."""
mock_scale = mocker.patch(
"ht.pyfilter.operations.ipoverrides._scale_resolution"
)
mock_scale.side_effect = ((960, 540), (480, 270))
mock_camera = mocker.MagicMock(spec=hou.ObjNode)
mock_camera.evalParmTuple.return_value = (1920, 1080)
parm_values = {
"override_camerares": 1,
"ip_res_fraction": "0.5",
"res_fraction": "0.5",
}
mock_node = mocker.MagicMock(spec=hou.RopNode)
mock_node.parm.return_value.evalAsNode.return_value = mock_camera
mock_node.evalParmTuple.return_value = (1000, 1000)
mock_node.evalParm.side_effect = lambda name: parm_values[name]
result = ipoverrides.build_resolution_scale_display(mock_node)
assert result == "480x270"
mock_node.parm.assert_called_with("camera")
calls = [mocker.call((1920, 1080), 0.5), mocker.call((960, 540), 0.5)]
mock_scale.assert_has_calls(calls)
def test_build_pyfilter_command_from_node(mocker):
"""Test the ht.pyfilter.operations.ipoverrides.build_pyfilter_command_from_node."""
mock_build_arg = mocker.patch(
"ht.pyfilter.operations.ipoverrides.build_arg_string_from_node"
)
mock_build_command = mocker.patch(
"ht.pyfilter.operations.ipoverrides.build_pyfilter_command"
)
mock_node = mocker.MagicMock(spec=hou.RopNode)
assert (
ipoverrides.build_pyfilter_command_from_node(mock_node)
== mock_build_command.return_value
)
mock_build_arg.assert_called_with(mock_node)
mock_build_command.assert_called_with(
mock_build_arg.return_value.split.return_value
)
def test_set_mantra_command(mocker):
"""Test the ht.pyfilter.operations.ipoverrides.set_mantra_command."""
mock_node = mocker.MagicMock(spec=hou.RopNode)
ipoverrides.set_mantra_command(mock_node)
mock_node.parm.return_value.set.assert_called_with(
"mantra `pythonexprs(\"__import__('ht.pyfilter.operations', globals(), locals(), ['ipoverrides']).ipoverrides.build_pyfilter_command_from_node(hou.pwd())\")`"
)
mock_node.parm.assert_called_with("soho_pipecmd")
| 33.1801 | 166 | 0.640137 |
7552489daa3307779c988a1f9c7667facf284ff0 | 1,661 | py | Python | docs/source/examples/3/sample.py | kumar-pratik/hi-ml | a108cf4ea244a76127adedc0ca60f0a5afdfb3e8 | [
"MIT"
] | 34 | 2021-08-18T13:27:36.000Z | 2022-03-26T01:25:36.000Z | docs/source/examples/3/sample.py | kumar-pratik/hi-ml | a108cf4ea244a76127adedc0ca60f0a5afdfb3e8 | [
"MIT"
] | 111 | 2021-08-18T13:19:46.000Z | 2022-03-30T05:57:01.000Z | docs/source/examples/3/sample.py | kumar-pratik/hi-ml | a108cf4ea244a76127adedc0ca60f0a5afdfb3e8 | [
"MIT"
] | 6 | 2021-09-13T12:07:58.000Z | 2022-03-24T16:31:06.000Z | # ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
from argparse import ArgumentParser
from typing import List
from health_azure import submit_to_azure_if_needed
def sieve(n: int) -> List[int]:
"""
A simple implementation of the http://en.wikipedia.org/wiki/Sieve_of_Eratosthenes
:param n: Maximum value to search up to, not included.
:return: List of primes upto but not including n.
"""
all_numbers = [True] * n
for i in range(2, int(n ** 0.5 + 1)):
if all_numbers[i]:
for f in range(i * i, n, i):
all_numbers[f] = False
primes = []
for i in range(2, n):
if all_numbers[i]:
primes.append(i)
return primes
def main() -> None:
run_info = submit_to_azure_if_needed(
compute_cluster_name="lite-testing-ds2",
wait_for_completion=True,
wait_for_completion_show_output=True)
parser = ArgumentParser()
parser.add_argument("-n", "--count", type=int, default=100, required=False, help="Maximum value (not included)")
parser.add_argument("-o", "--output", type=str, default="primes.txt", required=False, help="Output file name")
args = parser.parse_args()
primes = sieve(args.count)
output = run_info.output_folder / args.output
output.write_text("\n".join(map(str, primes)))
if __name__ == "__main__":
main()
| 33.897959 | 116 | 0.585792 |
237cfacb65fb9d85dc75dc9c975c8a47b0e11161 | 14,190 | py | Python | mpf/core/device_manager.py | Briedis1975/mpf | 99a2d72f84537cb5dae8ef9a783bd885b25a5adb | [
"MIT"
] | null | null | null | mpf/core/device_manager.py | Briedis1975/mpf | 99a2d72f84537cb5dae8ef9a783bd885b25a5adb | [
"MIT"
] | null | null | null | mpf/core/device_manager.py | Briedis1975/mpf | 99a2d72f84537cb5dae8ef9a783bd885b25a5adb | [
"MIT"
] | null | null | null | """Contains the DeviceManager base class."""
import asyncio
from collections import OrderedDict
from typing import Callable, Tuple, List, Generator
from mpf.core.utility_functions import Util
from mpf.core.mpf_controller import MpfController
MYPY = False
if MYPY: # pragma: no cover
from mpf.core.device import Device # pylint: disable-msg=cyclic-import,unused-import
class DeviceManager(MpfController):
"""Manages all the devices in MPF."""
config_name = "device_manager"
__slots__ = ["_monitorable_devices", "collections", "device_classes"]
def __init__(self, machine):
"""Initialize device manager."""
super().__init__(machine)
self._monitorable_devices = {}
self.collections = OrderedDict()
self.device_classes = OrderedDict() # collection_name: device_class
# this has to happen after mode load
self.machine.events.add_async_handler('init_phase_1',
self._load_device_modules, priority=5)
self.machine.events.add_handler('init_phase_2',
self.create_machinewide_device_control_events,
priority=2)
def get_monitorable_devices(self):
"""Return all devices which are registered as monitorable."""
return self._monitorable_devices
def register_monitorable_device(self, device):
"""Register a monitorable device.
Args:
----
device: The device to register.
"""
if device.collection not in self._monitorable_devices:
self._monitorable_devices[device.collection] = {}
self._monitorable_devices[device.collection][device.name] = device
def notify_device_changes(self, device, notify, old, value):
"""Notify subscribers about changes in a registered device.
Args:
----
device: The device that changed.
notify: Attribute name which changed.
old: The old value.
value: The new value.
"""
self.machine.bcp.interface.notify_device_changes(device, notify, old, value)
async def _load_device_modules(self, **kwargs):
del kwargs
# step 1: create devices in machine collection
self.debug_log("Creating devices...")
for device_type in self.machine.config['mpf']['device_modules']:
device_cls = Util.string_to_class(device_type) # type: Device
collection_name, config = device_cls.get_config_info()
self.device_classes[collection_name] = device_cls
# create the collection
collection = DeviceCollection(self.machine, collection_name,
device_cls.config_section)
self.collections[collection_name] = collection
setattr(self.machine, collection_name, collection)
# Get the config section for these devices
config = self.machine.config.get(config, None)
# create the devices
if config:
self.create_devices(collection_name, config)
# create the default control events
try:
self._create_default_control_events(collection)
except KeyError:
pass
self.machine.mode_controller.create_mode_devices()
# step 2: load config and validate devices
self.load_devices_config(validate=True)
await self.machine.mode_controller.load_mode_devices()
# step 3: initialise devices (mode devices will be initialised when mode is started)
await self.initialize_devices()
def stop_devices(self):
"""Stop all devices in the machine."""
for device_type in self.machine.config['mpf']['device_modules']:
device_cls = Util.string_to_class(device_type)
collection_name, _ = device_cls.get_config_info()
if not hasattr(self.machine, collection_name):
continue
for device in getattr(self.machine, collection_name).values():
if hasattr(device, "stop_device"):
device.stop_device()
def create_devices(self, collection_name, config):
"""Create devices for a collection."""
cls = self.device_classes[collection_name]
collection = getattr(self.machine, collection_name)
# if this device class has a device_class_init classmethod, run it now
if hasattr(cls, 'device_class_init'):
# don't want to use try here in case the called method has an error
cls.device_class_init(self.machine)
# create the devices
for device_name in config:
if not config[device_name] and not cls.allow_empty_configs:
self.raise_config_error("Device {}:'{}' has an empty config.".format(collection_name, device_name), 2,
context=collection_name + "." + device_name)
elif not isinstance(config[device_name], dict):
self.raise_config_error("Device {}:'{}' does not have a valid config. Expected a dictionary.".format(
collection_name, device_name), 3, context=collection_name + "." + device_name)
collection[device_name] = cls(self.machine, device_name)
def load_devices_config(self, validate=True):
"""Load all devices."""
if validate:
for device_type in self.machine.config['mpf']['device_modules']:
device_cls = Util.string_to_class(device_type)
collection_name, config_name = device_cls.get_config_info()
if config_name not in self.machine.config:
continue
# Get the config section for these devices
collection = getattr(self.machine, collection_name)
config = self.machine.config[config_name]
if not config:
self.machine.config[config_name] = config = {}
if not isinstance(config, dict):
self.raise_config_error("Format of collection {} is invalid.".format(collection_name), 1)
# validate config
for device_name in config:
config[device_name] = collection[device_name].prepare_config(config[device_name], False)
config[device_name] = collection[device_name].validate_and_parse_config(config[device_name], False)
for device_type in self.machine.config['mpf']['device_modules']:
device_cls = Util.string_to_class(device_type)
collection_name, config_name = device_cls.get_config_info()
if config_name not in self.machine.config:
continue
# Get the config section for these devices
collection = getattr(self.machine, collection_name)
config = self.machine.config[config_name]
# load config
for device_name in config:
collection[device_name].load_config(config[device_name])
async def initialize_devices(self):
"""Initialise devices."""
futures = []
for device_type in self.machine.config['mpf']['device_modules']:
device_cls = Util.string_to_class(device_type)
collection_name, config_name = device_cls.get_config_info()
if config_name not in self.machine.config:
continue
# Get the config section for these devices
collection = getattr(self.machine, collection_name)
config = self.machine.config[config_name]
# add machine wide
for device_name in config:
futures.append(collection[device_name].device_added_system_wide())
await asyncio.wait(futures)
# pylint: disable-msg=too-many-nested-blocks
def get_device_control_events(self, config) -> Generator[Tuple[str, Callable, int, "Device"], None, None]:
"""Scan a config dictionary for control_events.
Yields events, methods, delays, and devices for all the devices and
control_events in that config.
Args:
----
config: An MPF config dictionary (either machine-wide or mode-
specific).
Returns a generator of 4-item tuples
------------------------------------
* The event name
* The callback method of the device
* The delay in ms
* The device object
"""
config_spec = self.machine.config_validator.get_config_spec()
for collection in self.collections:
config_section = self.collections[collection].config_section
if config_section in config:
for device, settings in iter(config[config_section].items()):
control_events = [x for x in settings if
x.endswith('_events') and x != "control_events"]
device_obj = self.collections[collection][device]
for control_event in control_events:
# get events from this device's config
if not isinstance(settings[control_event], dict):
if config_spec[config_section][control_event] == "ignore":
continue
raise AssertionError(
"Type of {}:{} should be event_handler|event_handler:ms| in config_spec".format(
collection, control_event))
# try the new style first
try:
method = getattr(device_obj, "event_{}".format(control_event[:-7]))
except AttributeError:
raise AssertionError("Class {} needs to have method event_{} to handle {}".format(
device_obj, control_event[:-7], control_event
))
for event, delay in settings[control_event].items():
yield (event,
method,
delay,
device_obj)
def create_machinewide_device_control_events(self, **kwargs):
"""Create machine wide control events."""
del kwargs
for event, method, delay, _ in (
self.get_device_control_events(self.machine.config)):
if delay:
self.machine.events.add_handler(
event=event,
handler=self._control_event_handler,
callback=method,
ms_delay=delay,
delay_mgr=self.machine.delay)
else:
self.machine.events.add_handler(
event=event,
handler=method)
def _control_event_handler(self, callback, ms_delay, delay_mgr=None, **kwargs):
del kwargs
self.debug_log("_control_event_handler: callback: %s,", callback)
delay_mgr.add(ms=ms_delay, callback=callback)
def _create_default_control_events(self, device_list):
for device in device_list.values():
event_prefix = device.class_label + '_' + device.name + '_'
event_prefix2 = device.collection + '_'
for method in (self.machine.config['mpf']['device_events']
[device.config_section]):
self.machine.events.add_handler(event=event_prefix + method,
handler=getattr(device,
method))
self.machine.events.add_handler(event=event_prefix2 + method,
handler=getattr(device,
method))
class DeviceCollection(dict):
"""A collection of Devices.
One instance of this class will be created for each different type of
hardware device (such as coils, lights, switches, ball devices, etc.).
"""
__slots__ = ["machine", "name", "config_section", "_tag_cache"]
def __init__(self, machine, collection, config_section):
"""Initialise device collection."""
super().__init__()
self.machine = machine
self.name = collection
self.config_section = config_section
self._tag_cache = dict()
def __hash__(self):
"""Hash collection."""
return hash((self.name, self.machine))
def __delitem__(self, key):
"""Delete item for key."""
# clear the tag cache
self._tag_cache = dict()
return super().__delitem__(key)
def __getattr__(self, attr):
"""Return device by key.
This method is DEPRECATED and will be removed soon.
"""
# We used this to allow the programmer to access a hardware item like
# self.coils.coilname
try:
return self[attr]
except KeyError:
raise KeyError('Error: No device exists with the name:', attr)
def __iter__(self):
"""Iterate collection.
This method is DEPRECATED and will be removed soon. Use .values() instead.
"""
for item in self.values():
yield item
def items_tagged(self, tag) -> List["Device"]:
"""Return of list of device objects which have a certain tag.
Args:
----
tag: A string of the tag name which specifies what devices are
returned.
Returns a list of device objects. If no devices are found with that tag, it
will return an empty list.
"""
items_in_tag_cache = self._tag_cache.get(tag, None)
if items_in_tag_cache is not None:
return items_in_tag_cache
items = [item for item in self.values() if hasattr(item, "tags") and tag in item.tags]
self._tag_cache[tag] = items
return items
| 38.876712 | 119 | 0.586963 |
c52eb626ce771b2d17ac43b40851f6c3f02ed8c9 | 204 | py | Python | utils.py | sourcery-ai-bot/quinread | 7d90fd8e2ba8eec53654b3fa57c230657683f044 | [
"MIT"
] | 6 | 2021-10-09T07:19:41.000Z | 2021-12-05T05:16:25.000Z | utils.py | sourcery-ai-bot/quinread | 7d90fd8e2ba8eec53654b3fa57c230657683f044 | [
"MIT"
] | 5 | 2021-11-13T15:34:53.000Z | 2021-11-28T18:07:10.000Z | utils.py | TheQuinbox/quinread | b1c5c0cbfdf866df51a04942d7f7f2dc36f4cc9a | [
"MIT"
] | null | null | null | def plural(number, singular, multiple):
return singular if number in [1, -1] else multiple
def count_words(content):
word_list = content.split(" ")
count = len(word_list)
del word_list
return count
| 22.666667 | 51 | 0.740196 |
c84e356c90df591b59aaf5570aa77ba2a50f5af8 | 2,063 | py | Python | py12306/log/order_log.py | chenfengfeng/py12306 | 532e1656883362fdaa21fd443c906cf84f4a4ae2 | [
"Apache-2.0"
] | 2 | 2019-01-18T02:04:19.000Z | 2019-01-18T02:04:22.000Z | py12306/log/order_log.py | robram9572/py12306 | 9b7f16d60a8ddf28803cfb40ff4ef93d7a35517f | [
"Apache-2.0"
] | null | null | null | py12306/log/order_log.py | robram9572/py12306 | 9b7f16d60a8ddf28803cfb40ff4ef93d7a35517f | [
"Apache-2.0"
] | 1 | 2019-01-22T01:59:11.000Z | 2019-01-22T01:59:11.000Z | from py12306.log.base import BaseLog
from py12306.helpers.func import *
@singleton
class OrderLog(BaseLog):
# 这里如果不声明,会出现重复打印,目前不知道什么原因
logs = []
thread_logs = {}
quick_log = []
MESSAGE_REQUEST_INIT_DC_PAGE_FAIL = '请求初始化订单页面失败'
MESSAGE_SUBMIT_ORDER_REQUEST_FAIL = '提交订单失败,错误原因 {} \n'
MESSAGE_SUBMIT_ORDER_REQUEST_SUCCESS = '提交订单成功'
MESSAGE_CHECK_ORDER_INFO_FAIL = '检查订单失败,错误原因 {} \n'
MESSAGE_CHECK_ORDER_INFO_SUCCESS = '检查订单成功'
MESSAGE_GET_QUEUE_INFO_SUCCESS = '获取排队信息成功,目前排队人数 {}, 余票还剩余 {} 张'
MESSAGE_GET_QUEUE_COUNT_SUCCESS = '排队成功,你当前排在第 {} 位, 余票还剩余 {} 张'
MESSAGE_GET_QUEUE_LESS_TICKET = '排队失败,目前排队人数已经超过余票张数'
MESSAGE_GET_QUEUE_COUNT_FAIL = '排队失败,错误原因 {}'
MESSAGE_CONFIRM_SINGLE_FOR_QUEUE_SUCCESS = '# 提交订单成功!#'
MESSAGE_CONFIRM_SINGLE_FOR_QUEUE_ERROR = '出票失败,错误原因 {}'
MESSAGE_CONFIRM_SINGLE_FOR_QUEUE_FAIL = '提交订单失败,错误原因 {}'
MESSAGE_QUERY_ORDER_WAIT_TIME_WAITING = '排队等待中,排队人数 {},预计还需要 {} 秒'
MESSAGE_QUERY_ORDER_WAIT_TIME_FAIL = '排队失败,错误原因 {}'
MESSAGE_QUERY_ORDER_WAIT_TIME_INFO = '第 {} 次排队,请耐心等待'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_TITLE = '车票购买成功!'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_CONTENT = '请及时登录12306,打开 \'未完成订单\',在30分钟内完成支付!'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_OF_VOICE_CODE_START_SEND = '正在发送语音通知, 第 {} 次'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_OF_VOICE_CODE_CONTENT = '你的车票 {} 到 {} 购买成功,请登录 12306 进行支付'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_OF_EMAIL_CONTENT = '订单号 {},请及时登录12306,打开 \'未完成订单\',在30分钟内完成支付!'
MESSAGE_JOB_CLOSED = '当前任务已结束'
@classmethod
def print_passenger_did_deleted(cls, passengers):
self = cls()
result = [passenger.get('name') + '(' + passenger.get('type_text') + ')' for passenger in passengers]
self.add_quick_log('# 删减后的乘客列表 {} #'.format(', '.join(result)))
self.flush()
return self
@classmethod
def print_ticket_did_ordered(cls, order_id):
self = cls()
self.add_quick_log('# 车票购买成功,订单号 {} #'.format(order_id))
self.flush()
return self
| 36.839286 | 109 | 0.712555 |
fc1f7f2d13ce9d7cdaf3bf5458e7dd73baff0b3e | 445 | py | Python | super_detector/migrations/0002_auto_20170611_0628.py | nagendraksrivastava/fake-product-detector-backend | e851481480368ecf9f7907700d6e03a0dc40c43e | [
"Unlicense",
"MIT"
] | 1 | 2020-10-21T08:58:06.000Z | 2020-10-21T08:58:06.000Z | super_detector/migrations/0002_auto_20170611_0628.py | nagendraksrivastava/fake-product-detector-backend | e851481480368ecf9f7907700d6e03a0dc40c43e | [
"Unlicense",
"MIT"
] | null | null | null | super_detector/migrations/0002_auto_20170611_0628.py | nagendraksrivastava/fake-product-detector-backend | e851481480368ecf9f7907700d6e03a0dc40c43e | [
"Unlicense",
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-06-11 06:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('super_detector', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='pin',
field=models.IntegerField(null=True),
),
]
| 21.190476 | 49 | 0.613483 |
deeda51f96dddb7deef0d64045789c65127882f3 | 3,105 | py | Python | lanenet/model/model.py | thanit456/pytorch-lanenet | 4c5b2747e2b608ebfc5f0edff3601e34610e0029 | [
"MIT"
] | null | null | null | lanenet/model/model.py | thanit456/pytorch-lanenet | 4c5b2747e2b608ebfc5f0edff3601e34610e0029 | [
"MIT"
] | null | null | null | lanenet/model/model.py | thanit456/pytorch-lanenet | 4c5b2747e2b608ebfc5f0edff3601e34610e0029 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
LaneNet model
https://arxiv.org/pdf/1807.01726.pdf
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from .loss import DiscriminativeLoss
from .encoders import VGGEncoder
from .decoders import FCNDecoder
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class LaneNet(nn.Module):
def __init__(self, arch="VGG"):
super(LaneNet, self).__init__()
# no of instances for segmentation
self.no_of_instances = 5
encode_num_blocks = 5
in_channels = [3, 64, 128, 256, 512]
out_channels = in_channels[1:] + [512]
self._arch = arch
if self._arch == 'VGG':
self._encoder = VGGEncoder(encode_num_blocks, in_channels, out_channels)
self._encoder.to(DEVICE)
decode_layers = ["pool5", "pool4", "pool3"]
decode_channels = out_channels[:-len(decode_layers) - 1:-1]
decode_last_stride = 8
self._decoder = FCNDecoder(decode_layers, decode_channels, decode_last_stride)
self._decoder.to(DEVICE)
elif self._arch == 'ESPNet':
raise NotImplementedError
elif self._arch == 'ENNet':
raise NotImplementedError
self._pix_layer = nn.Conv2d(in_channels=64, out_channels=self.no_of_instances, kernel_size=1, bias=False).to(
DEVICE)
self.relu = nn.ReLU().to(DEVICE)
def forward(self, input_tensor):
encode_ret = self._encoder(input_tensor)
decode_ret = self._decoder(encode_ret)
decode_logits = decode_ret['logits']
decode_logits = decode_logits.to(DEVICE)
binary_seg_ret = torch.argmax(F.softmax(decode_logits, dim=1), dim=1, keepdim=True)
decode_deconv = decode_ret['deconv']
pix_embedding = self.relu(self._pix_layer(decode_deconv))
return {
'instance_seg_logits': pix_embedding,
'binary_seg_pred': binary_seg_ret,
'binary_seg_logits': decode_logits
}
def compute_loss(net_output, binary_label, instance_label):
k_binary = 0.7
k_instance = 0.3
k_dist = 1.0
ce_loss_fn = nn.CrossEntropyLoss()
binary_seg_logits = net_output["binary_seg_logits"]
binary_loss = ce_loss_fn(binary_seg_logits, binary_label)
pix_embedding = net_output["instance_seg_logits"]
ds_loss_fn = DiscriminativeLoss(0.5, 1.5, 1.0, 1.0, 0.001)
var_loss, dist_loss, reg_loss = ds_loss_fn(pix_embedding, instance_label)
binary_loss = binary_loss * k_binary
instance_loss = var_loss * k_instance
dist_loss = dist_loss * k_dist
total_loss = binary_loss + instance_loss + dist_loss
out = net_output["binary_seg_pred"]
iou = 0
batch_size = out.size()[0]
for i in range(batch_size):
PR = out[i].squeeze(0).nonzero().size()[0]
GT = binary_label[i].nonzero().size()[0]
TP = (out[i].squeeze(0) * binary_label[i]).nonzero().size()[0]
union = PR + GT - TP
iou += TP / union
iou = iou / batch_size
return total_loss, binary_loss, instance_loss, out, iou
| 34.120879 | 117 | 0.654428 |
0f99b82fcbf25b718ef2beb1399a2e64565a54d9 | 2,263 | py | Python | rino/utils.py | rinocloud/rino | 34d4d6eb697f501c6ab8aa5d41a9435529342da6 | [
"MIT"
] | null | null | null | rino/utils.py | rinocloud/rino | 34d4d6eb697f501c6ab8aa5d41a9435529342da6 | [
"MIT"
] | null | null | null | rino/utils.py | rinocloud/rino | 34d4d6eb697f501c6ab8aa5d41a9435529342da6 | [
"MIT"
] | null | null | null | from git import Repo
from git.exc import InvalidGitRepositoryError
import os
import json
import click
import requests
def get_rino_folder(rinocloud, path):
headers = {
'Authorization': 'Token %s' % rinocloud.api_key,
'X-Requested-With': 'XMLHttpRequest'
}
try:
return requests.post(rinocloud.api_base + 'files/create_folder_if_not_exist/', json={
'name': path
}, headers=headers)
except requests.exceptions.ConnectionError:
raise requests.exceptions.ConnectionError("Could not connect to specified domain %s" % rinocloud.api_domain)
def clean_metadata(meta):
obj = {}
for key, val in meta.items():
if key not in ["id", "created_on", "updated_on", "etag", "filepath", "name", "versions"]:
obj[key] = val
return obj
def get_git_repo():
try:
repo = Repo(os.getcwd())
repo.config_reader()
except InvalidGitRepositoryError as e:
repo = None
return repo
def get_git_meta(repo):
git_metadata = None
if repo:
try:
git_remote = repo.remote()
git_metadata = {
'remote_name': git_remote.name,
'remote_urls': list(git_remote.urls),
'branch': repo.active_branch.name,
'commit': repo.head.commit.hexsha,
'message': repo.head.commit.message
}
except ValueError as e:
click.echo('Cant add git informatoin. git has no remote set.')
return git_metadata
def make_hidden_json_file(path):
dirname = os.path.dirname(path)
fname = os.path.basename(path)
index_of_dot = fname.index('.')
fname_no_ext = fname[:index_of_dot]
return os.path.join(
dirname,
'.' + fname_no_ext + '.json'
)
def save_notebook_meta(path, obj):
m = obj._prep_metadata()
mfile = open(make_hidden_json_file(path), 'w')
mfile.write(json.dumps(m, indent=4, sort_keys=True))
mfile.close()
def get_notebook_meta(path):
mfile_path = make_hidden_json_file(path)
if not os.path.isfile(mfile_path):
return None, None
mfile = open(mfile_path, 'r')
metadata = json.loads(mfile.read())
mfile.close()
return metadata, mfile_path
| 26.623529 | 116 | 0.626602 |
1612882091e906b5d29715bd2dd0747546746ae0 | 1,686 | py | Python | cirq/testing/deprecation.py | nlbao/Cirq | 142507d08f0548f460616af1d0b76a35f0f1b07e | [
"Apache-2.0"
] | null | null | null | cirq/testing/deprecation.py | nlbao/Cirq | 142507d08f0548f460616af1d0b76a35f0f1b07e | [
"Apache-2.0"
] | null | null | null | cirq/testing/deprecation.py | nlbao/Cirq | 142507d08f0548f460616af1d0b76a35f0f1b07e | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from contextlib import contextmanager
from cirq.testing import assert_logs
ALLOW_DEPRECATION_IN_TEST = 'ALLOW_DEPRECATION_IN_TEST'
@contextmanager
def assert_deprecated(*msgs: str, deadline: str, allow_multiple_warnings: bool = False):
"""Allows deprecated functions, classes, decorators in tests.
It acts as a contextmanager that can be used in with statements:
>>> with assert_deprecated("use cirq.x instead", deadline="v0.9"):
>>> # do something deprecated
Args:
msgs: messages that should match the warnings captured
deadline: the expected deadline the feature will be deprecated by. Has to follow the format
vX.Y (minor versions only)
allow_multiple_warnings: if True, multiple warnings are accepted. Typically this should not
be used, by default it's False.
"""
os.environ[ALLOW_DEPRECATION_IN_TEST] = 'True'
try:
with assert_logs(*(msgs + (deadline,)), count=None if allow_multiple_warnings else 1):
yield True
finally:
del os.environ[ALLOW_DEPRECATION_IN_TEST]
| 38.318182 | 99 | 0.729537 |
7480f20e0bba775ab02a9b178bb4e49f993c62a3 | 1,106 | py | Python | mylights.py | brettonw/mywizlight | d619bf3398c1f3dc3c3e0cef7be9aeea0ac197ca | [
"MIT"
] | null | null | null | mylights.py | brettonw/mywizlight | d619bf3398c1f3dc3c3e0cef7be9aeea0ac197ca | [
"MIT"
] | null | null | null | mylights.py | brettonw/mywizlight | d619bf3398c1f3dc3c3e0cef7be9aeea0ac197ca | [
"MIT"
] | null | null | null | import sys;
from pywizlight.bulb import PilotBuilder, PilotParser, wizlight
lightIpsByName = {
"office": "192.168.1.217",
"nook": "192.168.1.230",
"nook": "192.168.1.230",
"nook": "192.168.1.230",
"nook": "192.168.1.230",
"nook": "192.168.1.230",
"nook": "192.168.1.230",
"nook": "192.168.1.230",
"nook": "192.168.1.230",
"nook": "192.168.1.230",
"nook": "192.168.1.230",
"nook": "192.168.1.230"
};
lightNamesByIp = {v: k for k, v in lightIpsByName.items()};
def makeLight (name):
ip = lightIpsByName[name];
print ("Light: {} ({})".format (name, ip));
return wizlight (ip);
def getLight ():
# default to the office light (for testing)
if (len (sys.argv) > 1):
# read the second argument
light = sys.argv[1];
if light in lightIpsByName:
return makeLight (light);
if light in lightNamesByIp:
return makeLight (lightNamesByIp[light]);
print ("Unknown light ({})".format (light));
else:
print ("ERROR: No light specified");
return None;
| 29.105263 | 63 | 0.563291 |
063d34250c2508fff47d0257c9d5c6c6b9bb3f42 | 3,478 | py | Python | tests/redical-tests.py | atatsu/redical | 2c8128e58214ae3ab717addfb358b2c6e9ed2297 | [
"BSD-3-Clause"
] | null | null | null | tests/redical-tests.py | atatsu/redical | 2c8128e58214ae3ab717addfb358b2c6e9ed2297 | [
"BSD-3-Clause"
] | null | null | null | tests/redical-tests.py | atatsu/redical | 2c8128e58214ae3ab717addfb358b2c6e9ed2297 | [
"BSD-3-Clause"
] | null | null | null | import asyncio
import pytest
from redical import create_redical, PipelineError, TransactionError, WatchError
pytestmark = [pytest.mark.asyncio]
# There will probably be a lot of duplicated tests here (specifically
# test cases pulled straight from the connection-specific and pool-specific
# tests) to ensure the "higher-level" functionality plays nicely with the
# expected behaviors of the "lower-level" functionality.
async def test_error_func(redical):
def error_func(exc):
return TypeError('no!')
with pytest.raises(TypeError, match='no!'):
await redical.execute('hset', 'mykey', error_func=error_func)
# |-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|
# Pipelines
async def test_pipeline(redical):
async with redical as pipe:
fut1 = pipe.set('foo', 'bar')
fut2 = pipe.set('bar', 'baz')
fut3 = pipe.get('foo')
fut4 = pipe.get('bar')
assert True is await fut1
assert True is await fut2
assert 'bar' == await fut3
assert 'baz' == await fut4
async def test_multiple_pipelines_prevented(redis_uri):
redical = await create_redical(redis_uri)
async with redical:
with pytest.raises(PipelineError):
async with redical:
pass
redical.close()
await redical.wait_closed()
async def test_context_sanity_check(redical):
"""
More geared towards the pool-based redical instance.
"""
if not redical.resource.supports_multiple_pipelines:
return
async def t1(event):
async with redical as pipe:
pipe.set('foo', 'bar')
fut = pipe.get('foo')
await event.wait()
assert 'bar' == await fut
async def t2(event):
async with redical as pipe:
pipe.set('foo', 'baz')
fut = pipe.get('foo')
event.set()
assert 'baz' == await fut
event = asyncio.Event()
await asyncio.gather(t1(event), t2(event))
async def test_pipeline_disallow_close(redical):
async with redical as pipe:
with pytest.raises(PipelineError, match='Do not close from within pipeline'):
pipe.close()
async def test_pipeline_disallow_wait_closed(redical):
async with redical as pipe:
with pytest.raises(PipelineError, match='Do not close from within pipeline'):
await pipe.wait_closed()
# |-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|
# Transactions
async def test_transaction(redical):
async with redical.transaction() as tr:
async with tr as pipe:
fut1 = pipe.set('foo', 'bar')
fut2 = pipe.set('bar', 'baz')
fut3 = pipe.get('foo')
fut4 = pipe.get('bar')
assert True is await fut1
assert True is await fut2
assert 'bar' == await fut3
assert 'baz' == await fut4
async def test_transaction_watch_error(redical, conn):
await redical.set('mykey', 1)
async with redical.transaction('mykey') as tr:
val = int(await tr.get('mykey'))
val += 1
with pytest.raises(WatchError, match='Transaction aborted'):
async with tr as pipe:
await conn.execute('SET', 'mykey', 'foo')
fut = pipe.set('mykey', val)
assert 'foo' == await redical.get('mykey')
with pytest.raises(WatchError, match='Transaction aborted'):
await fut
async def test_transaction_disallow_close(redical):
async with redical.transaction() as tr:
with pytest.raises(TransactionError, match='Do not close from within transaction'):
tr.close()
async def test_transaction_disallow_wait_closed(redical):
async with redical.transaction() as tr:
with pytest.raises(TransactionError, match='Do not close from within transaction'):
await tr.wait_closed()
| 27.385827 | 87 | 0.691777 |
0305a6233e566c66adf8a317bf8cfd5a08ef71ac | 1,147 | py | Python | pdm/cli/commands/sync.py | frafra/pdm | 12c5c4f91bbb7260be7d93f3e3914ba708309032 | [
"MIT"
] | 1 | 2021-12-16T07:22:47.000Z | 2021-12-16T07:22:47.000Z | pdm/cli/commands/sync.py | frafra/pdm | 12c5c4f91bbb7260be7d93f3e3914ba708309032 | [
"MIT"
] | 2 | 2021-05-01T11:43:58.000Z | 2021-05-03T17:52:57.000Z | pdm/cli/commands/sync.py | frafra/pdm | 12c5c4f91bbb7260be7d93f3e3914ba708309032 | [
"MIT"
] | null | null | null | import argparse
from pdm.cli import actions
from pdm.cli.commands.base import BaseCommand
from pdm.cli.options import clean_group, dry_run_option, groups_group, install_group
from pdm.project import Project
class Command(BaseCommand):
"""Synchronize the current working set with lock file"""
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
groups_group.add_to_parser(parser)
dry_run_option.add_to_parser(parser)
parser.add_argument(
"-r",
"--reinstall",
action="store_true",
help="Force reinstall existing dependencies",
)
clean_group.add_to_parser(parser)
install_group.add_to_parser(parser)
def handle(self, project: Project, options: argparse.Namespace) -> None:
actions.do_sync(
project,
groups=options.groups,
dev=options.dev,
default=options.default,
dry_run=options.dry_run,
clean=options.clean,
no_editable=options.no_editable,
no_self=options.no_self,
reinstall=options.reinstall,
)
| 31.861111 | 84 | 0.648649 |
fd8187243c017abf478dc25fdce5b549c92430d1 | 12,875 | py | Python | src/transformers/training_args_tf.py | johntiger1/transformers | 38fadb6faf30c103531fa5fcd52b3f110e7582d0 | [
"Apache-2.0"
] | 6 | 2021-08-20T03:14:08.000Z | 2021-11-14T18:31:51.000Z | src/transformers/training_args_tf.py | johntiger1/transformers | 38fadb6faf30c103531fa5fcd52b3f110e7582d0 | [
"Apache-2.0"
] | 2 | 2022-01-13T04:20:10.000Z | 2022-03-12T01:04:07.000Z | src/transformers/training_args_tf.py | johntiger1/transformers | 38fadb6faf30c103531fa5fcd52b3f110e7582d0 | [
"Apache-2.0"
] | 3 | 2021-09-19T08:20:42.000Z | 2022-02-19T16:32:40.000Z | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from dataclasses import dataclass, field
from typing import Tuple
from .file_utils import cached_property, is_tf_available, tf_required
from .training_args import TrainingArguments
from .utils import logging
logger = logging.get_logger(__name__)
if is_tf_available():
import tensorflow as tf
@dataclass
class TFTrainingArguments(TrainingArguments):
"""
TrainingArguments is the subset of the arguments we use in our example scripts **which relate to the training loop
itself**.
Using :class:`~transformers.HfArgumentParser` we can turn this class into `argparse
<https://docs.python.org/3/library/argparse.html#module-argparse>`__ arguments that can be specified on the command
line.
Parameters:
output_dir (:obj:`str`):
The output directory where the model predictions and checkpoints will be written.
overwrite_output_dir (:obj:`bool`, `optional`, defaults to :obj:`False`):
If :obj:`True`, overwrite the content of the output directory. Use this to continue training if
:obj:`output_dir` points to a checkpoint directory.
do_train (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to run training or not. This argument is not directly used by :class:`~transformers.Trainer`, it's
intended to be used by your training/evaluation scripts instead. See the `example scripts
<https://github.com/huggingface/transformers/tree/master/examples>`__ for more details.
do_eval (:obj:`bool`, `optional`):
Whether to run evaluation on the validation set or not. Will be set to :obj:`True` if
:obj:`evaluation_strategy` is different from :obj:`"no"`. This argument is not directly used by
:class:`~transformers.Trainer`, it's intended to be used by your training/evaluation scripts instead. See
the `example scripts <https://github.com/huggingface/transformers/tree/master/examples>`__ for more
details.
do_predict (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to run predictions on the test set or not. This argument is not directly used by
:class:`~transformers.Trainer`, it's intended to be used by your training/evaluation scripts instead. See
the `example scripts <https://github.com/huggingface/transformers/tree/master/examples>`__ for more
details.
evaluation_strategy (:obj:`str` or :class:`~transformers.trainer_utils.EvaluationStrategy`, `optional`, defaults to :obj:`"no"`):
The evaluation strategy to adopt during training. Possible values are:
* :obj:`"no"`: No evaluation is done during training.
* :obj:`"steps"`: Evaluation is done (and logged) every :obj:`eval_steps`.
* :obj:`"epoch"`: Evaluation is done at the end of each epoch.
per_device_train_batch_size (:obj:`int`, `optional`, defaults to 8):
The batch size per GPU/TPU core/CPU for training.
per_device_eval_batch_size (:obj:`int`, `optional`, defaults to 8):
The batch size per GPU/TPU core/CPU for evaluation.
gradient_accumulation_steps: (:obj:`int`, `optional`, defaults to 1):
Number of updates steps to accumulate the gradients for, before performing a backward/update pass.
.. warning::
When using gradient accumulation, one step is counted as one step with backward pass. Therefore,
logging, evaluation, save will be conducted every ``gradient_accumulation_steps * xxx_step`` training
examples.
learning_rate (:obj:`float`, `optional`, defaults to 5e-5):
The initial learning rate for Adam.
weight_decay (:obj:`float`, `optional`, defaults to 0):
The weight decay to apply (if not zero).
adam_beta1 (:obj:`float`, `optional`, defaults to 0.9):
The beta1 hyperparameter for the Adam optimizer.
adam_beta2 (:obj:`float`, `optional`, defaults to 0.999):
The beta2 hyperparameter for the Adam optimizer.
adam_epsilon (:obj:`float`, `optional`, defaults to 1e-8):
The epsilon hyperparameter for the Adam optimizer.
max_grad_norm (:obj:`float`, `optional`, defaults to 1.0):
Maximum gradient norm (for gradient clipping).
num_train_epochs(:obj:`float`, `optional`, defaults to 3.0):
Total number of training epochs to perform.
max_steps (:obj:`int`, `optional`, defaults to -1):
If set to a positive number, the total number of training steps to perform. Overrides
:obj:`num_train_epochs`.
warmup_steps (:obj:`int`, `optional`, defaults to 0):
Number of steps used for a linear warmup from 0 to :obj:`learning_rate`.
logging_dir (:obj:`str`, `optional`):
`TensorBoard <https://www.tensorflow.org/tensorboard>`__ log directory. Will default to
`runs/**CURRENT_DATETIME_HOSTNAME**`.
logging_first_step (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to log and evaluate the first :obj:`global_step` or not.
logging_steps (:obj:`int`, `optional`, defaults to 500):
Number of update steps between two logs.
save_steps (:obj:`int`, `optional`, defaults to 500):
Number of updates steps before two checkpoint saves.
save_total_limit (:obj:`int`, `optional`):
If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in
:obj:`output_dir`.
no_cuda (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to not use CUDA even when it is available or not.
seed (:obj:`int`, `optional`, defaults to 42):
Random seed for initialization.
fp16 (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to use 16-bit (mixed) precision training (through NVIDIA Apex) instead of 32-bit training.
fp16_opt_level (:obj:`str`, `optional`, defaults to 'O1'):
For :obj:`fp16` training, Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details
on the `Apex documentation <https://nvidia.github.io/apex/amp.html>`__.
local_rank (:obj:`int`, `optional`, defaults to -1):
During distributed training, the rank of the process.
tpu_num_cores (:obj:`int`, `optional`):
When training on TPU, the number of TPU cores (automatically passed by launcher script).
debug (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to activate the trace to record computation graphs and profiling information or not.
dataloader_drop_last (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size)
or not.
eval_steps (:obj:`int`, `optional`, defaults to 1000):
Number of update steps before two evaluations.
past_index (:obj:`int`, `optional`, defaults to -1):
Some models like :doc:`TransformerXL <../model_doc/transformerxl>` or :doc`XLNet <../model_doc/xlnet>` can
make use of the past hidden states for their predictions. If this argument is set to a positive int, the
``Trainer`` will use the corresponding output (usually index 2) as the past state and feed it to the model
at the next training step under the keyword argument ``mems``.
tpu_name (:obj:`str`, `optional`):
The name of the TPU the process is running on.
run_name (:obj:`str`, `optional`):
A descriptor for the run. Notably used for wandb logging.
xla (:obj:`bool`, `optional`):
Whether to activate the XLA compilation or not.
"""
tpu_name: str = field(
default=None,
metadata={"help": "Name of TPU"},
)
poly_power: float = field(
default=1.0,
metadata={"help": "Power for the Polynomial decay LR scheduler."},
)
xla: bool = field(default=False, metadata={"help": "Whether to activate the XLA compilation or not"})
@cached_property
@tf_required
def _setup_strategy(self) -> Tuple["tf.distribute.Strategy", int]:
logger.info("Tensorflow: setting up strategy")
if self.xla:
tf.config.optimizer.set_jit(True)
gpus = tf.config.list_physical_devices("GPU")
# Set to float16 at first
if self.fp16:
policy = tf.keras.mixed_precision.experimental.Policy("mixed_float16")
tf.keras.mixed_precision.experimental.set_policy(policy)
if self.no_cuda:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
else:
try:
if self.tpu_name:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name)
else:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
tpu = None
if tpu:
# Set to bfloat16 in case of TPU
if self.fp16:
policy = tf.keras.mixed_precision.experimental.Policy("mixed_bfloat16")
tf.keras.mixed_precision.experimental.set_policy(policy)
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.TPUStrategy(tpu)
elif len(gpus) == 0:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
elif len(gpus) == 1:
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
elif len(gpus) > 1:
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
strategy = tf.distribute.MirroredStrategy()
else:
raise ValueError("Cannot find the proper strategy please check your environment properties.")
return strategy
@property
@tf_required
def strategy(self) -> "tf.distribute.Strategy":
"""
The strategy used for distributed training.
"""
return self._setup_strategy
@property
@tf_required
def n_replicas(self) -> int:
"""
The number of replicas (CPUs, GPUs or TPU cores) used in this training.
"""
return self._setup_strategy.num_replicas_in_sync
@property
def train_batch_size(self) -> int:
"""
The actual batch size for training (may differ from :obj:`per_gpu_train_batch_size` in distributed training).
"""
if self.per_gpu_train_batch_size:
logger.warning(
"Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future "
"version. Using `--per_device_train_batch_size` is preferred."
)
per_device_batch_size = self.per_gpu_train_batch_size or self.per_device_train_batch_size
return per_device_batch_size * self.n_replicas
@property
def eval_batch_size(self) -> int:
"""
The actual batch size for evaluation (may differ from :obj:`per_gpu_eval_batch_size` in distributed training).
"""
if self.per_gpu_eval_batch_size:
logger.warning(
"Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future "
"version. Using `--per_device_eval_batch_size` is preferred."
)
per_device_batch_size = self.per_gpu_eval_batch_size or self.per_device_eval_batch_size
return per_device_batch_size * self.n_replicas
@property
@tf_required
def n_gpu(self) -> int:
"""
The number of replicas (CPUs, GPUs or TPU cores) used in this training.
"""
warnings.warn(
"The n_gpu argument is deprecated and will be removed in a future version, use n_replicas instead.",
FutureWarning,
)
return self._setup_strategy.num_replicas_in_sync
| 49.903101 | 137 | 0.647223 |
4cb72af6e34f22a26c5db62d4670fb0ee28bdf2d | 302 | py | Python | tests/test_core.py | wolfinger/pysurfline | f0c98ea06ce9f26f5ba9e3b94ac98963150f7ddc | [
"MIT"
] | null | null | null | tests/test_core.py | wolfinger/pysurfline | f0c98ea06ce9f26f5ba9e3b94ac98963150f7ddc | [
"MIT"
] | null | null | null | tests/test_core.py | wolfinger/pysurfline | f0c98ea06ce9f26f5ba9e3b94ac98963150f7ddc | [
"MIT"
] | 1 | 2022-03-05T20:12:10.000Z | 2022-03-05T20:12:10.000Z | """
test core classes.
"""
import pytest
from pysurfline import URLBuilder
def test_URLBuilder():
params={"spotId":"5842041f4e65fad6a7708890"}
u=URLBuilder(type="wave",params=params)
assert u.url == "https://services.surfline.com/kbyg/spots/forecasts/wave?spotId=5842041f4e65fad6a7708890" | 27.454545 | 109 | 0.754967 |
35f90c3c6d648a57e5ed332bfc6270f26172d995 | 380 | py | Python | notifications_plus/serializers.py | DjangoStudyTeam/django-notifications-plus | 2c2516d9f99ec5210e45f09c79accaacbdd290fb | [
"MIT"
] | null | null | null | notifications_plus/serializers.py | DjangoStudyTeam/django-notifications-plus | 2c2516d9f99ec5210e45f09c79accaacbdd290fb | [
"MIT"
] | null | null | null | notifications_plus/serializers.py | DjangoStudyTeam/django-notifications-plus | 2c2516d9f99ec5210e45f09c79accaacbdd290fb | [
"MIT"
] | null | null | null | from rest_framework import serializers
from notifications_plus import get_notification_model
NotificationModel = get_notification_model()
class NotificationListSerializer(serializers.ModelSerializer):
class Meta:
model = NotificationModel
fields = [
"content",
"unread",
"created_at",
"recipient",
]
| 22.352941 | 62 | 0.657895 |
dbba14ab37e13046716b902eac3d5451751e98f5 | 2,508 | py | Python | guessingGameApp/views.py | marjohnson/w11D1-Django | 630786c9f84f2c32a3d889cd965d3f5455d3f363 | [
"MIT"
] | null | null | null | guessingGameApp/views.py | marjohnson/w11D1-Django | 630786c9f84f2c32a3d889cd965d3f5455d3f363 | [
"MIT"
] | null | null | null | guessingGameApp/views.py | marjohnson/w11D1-Django | 630786c9f84f2c32a3d889cd965d3f5455d3f363 | [
"MIT"
] | 8 | 2021-04-12T23:45:38.000Z | 2021-06-12T20:33:48.000Z | from django.shortcuts import render, redirect
count = 1
FOOTER = {
'Created by Amazon Career Choice Houston Class',
'© 2021'
}
def gameHome(request):
context = {
'footer': FOOTER
}
return render(request, 'game.html', context)
count = 1
request.session.flush()
def clearGame(request):
request.session.clear()
return redirect('/gameApp/')
def setup_game(request):
num_range = int(request.POST['number_range'])+1
arr = []
for i in range(1,num_range):
arr.append(i)
request.session['username1'] = request.POST['username1']
request.session['username2'] = request.POST['username2']
request.session['number_range'] = arr
request.session['current_user'] = count
request.session['game_over'] = False
return redirect('/gameApp/game')
def game(request):
context = {
"user1": request.session['username1'],
"user2": request.session['username2'],
"number_range": request.session['number_range'],
"current_user":request.session['current_user'],
'footer': FOOTER
}
return render(request,"thegame.html",context)
def process_game(request):
# need to cast this to integer, (issue in class)
current_user = int(request.POST["current_user"])
if current_user == 1:
request.session['current_user'] = count + 1
chosen_number = request.POST['chosen_number']
request.session['user1_choice'] = chosen_number
return redirect('/gameApp/game')
else:
chosen_number = request.POST['chosen_number']
if request.session['user1_choice'] == chosen_number:
results = f"You won the game! Want to play again?"
request.session['game_over'] = True
else:
results = f"You lost the game"
request.session['results'] = results
return redirect('/gameApp/results')
def results(request):
context = {
"results": request.session['results'],
"game_over": request.session['game_over'],
'footer': FOOTER
}
return render(request,"gameResults.html",context)
def splitOdds(request):
request.session['current_user'] = 1
temp = request.session['username1']
request.session['username1'] = request.session['username2']
request.session['username2'] = temp
old_arr = request.session['number_range']
for i in range(1,int(len(old_arr)/2)):
request.session['number_range'].pop()
return redirect('/gameApp/game')
| 26.125 | 63 | 0.640351 |
1a6ab3c5b3915563f75ec67a8945e4ab095a3136 | 2,698 | py | Python | mccole/check.py | gvwilson/mccole-old | 5d724a64e7e91d39d72947798f5ee38bfdf96a23 | [
"MIT"
] | 1 | 2022-01-08T04:10:46.000Z | 2022-01-08T04:10:46.000Z | mccole/check.py | gvwilson/mccole | 5d724a64e7e91d39d72947798f5ee38bfdf96a23 | [
"MIT"
] | 43 | 2022-01-21T11:04:39.000Z | 2022-02-11T21:11:54.000Z | mccole/check.py | gvwilson/mccole-old | 5d724a64e7e91d39d72947798f5ee38bfdf96a23 | [
"MIT"
] | 1 | 2022-01-23T18:52:23.000Z | 2022-01-23T18:52:23.000Z | """Check consistency of project."""
import re
MAX_CODE_LINES = 120
MAX_LINE_LENGTH = 120
def check(options, config, xref, seen):
"""Check various aspects of project."""
if "bib" in options.check:
_check_bib(config, seen)
if "code" in options.check:
_check_code(config)
if "gloss" in options.check:
_check_gloss(config, seen)
# ----------------------------------------------------------------------
def _check_bib(config, seen):
"""Check consistency of bibliography."""
_show_unused("biblography", config.bib_keys - seen.cite)
key = re.compile(r"^[A-Z][A-Za-z]+\d{4}[a-z]?$")
previous = None
for entry in config.bib_data:
# Alphabetic order by key (ignoring case).
if previous and (previous["ID"].lower() > entry["ID"].lower()):
print(f"Bibliography entry {entry['ID']} out of order.")
previous = entry
# Keys are Name and 4-digit year.
if not key.match(entry["ID"]):
print(f"Badly-formatted bibliography key {entry['ID']}.")
def _check_code(config):
"""Check code inclusions."""
pre = re.compile(
r'<pre\s+title="(.*?)"><code\s+class="(.*?)">(.*?)</code></pre>', re.DOTALL
)
lang = re.compile(r"^language-.+$")
for info in config.pages:
for (title, cls, body) in pre.findall(info.html):
body = body.split("\n")
# Code class.
if not lang.search(cls):
print(
f"Code block {title} in {info.src} has unrecognized class {cls}."
)
# Number of lines.
if len(body) > MAX_CODE_LINES:
print(
f"Code block {title} in {info.src} has {len(body)} lines (> {MAX_CODE_LINES})."
)
# Line length.
long_lines = [x for x in body if len(x) > MAX_LINE_LENGTH]
if long_lines:
print(
f"Code block {title} in {info.src} has {len(long_lines)} long lines (> {MAX_LINE_LENGTH})."
)
def _check_gloss(config, seen):
"""Check consistency of glossary."""
_show_unused("glossary", config.gloss_keys - seen.gloss_ref)
previous = None
lang = config.lang
for entry in config.gloss_data:
# Alphabetic order by key (ignoring case).
if previous and (previous[lang]["term"].lower() > entry[lang]["term"].lower()):
print(f"Glossary entry {entry[lang]['key']} out of order.")
previous = entry
def _show_unused(kind, unused):
if not unused:
return
print(f"Unused {kind} keys:")
for key in sorted(unused):
print(f"- {key}")
| 29.977778 | 111 | 0.547072 |
903ebffccda979ed92f3e639f32c5d904c45f3fd | 399 | py | Python | projawwards/wsgi.py | Kipkorir2017/Proj_Awwards | 3b5f898b725e14f28448019f85306845ecefe3a2 | [
"MIT"
] | null | null | null | projawwards/wsgi.py | Kipkorir2017/Proj_Awwards | 3b5f898b725e14f28448019f85306845ecefe3a2 | [
"MIT"
] | null | null | null | projawwards/wsgi.py | Kipkorir2017/Proj_Awwards | 3b5f898b725e14f28448019f85306845ecefe3a2 | [
"MIT"
] | null | null | null | """
WSGI config for projawwards project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'projawwards.settings')
application = get_wsgi_application()
| 23.470588 | 78 | 0.789474 |
d04b7b3221080e4571c498d0128f2b158f5a0c0f | 2,478 | py | Python | get_strat_names.py | UW-Macrostrat/Global-Geologic-Units | a79c837a30d377b7e91805517f41f61902b9ef7a | [
"CC-BY-4.0"
] | 2 | 2018-08-22T20:52:56.000Z | 2021-01-28T04:40:18.000Z | get_strat_names.py | UW-Macrostrat/Global-Geologic-Units | a79c837a30d377b7e91805517f41f61902b9ef7a | [
"CC-BY-4.0"
] | null | null | null | get_strat_names.py | UW-Macrostrat/Global-Geologic-Units | a79c837a30d377b7e91805517f41f61902b9ef7a | [
"CC-BY-4.0"
] | 2 | 2018-08-22T15:26:08.000Z | 2018-09-19T16:27:18.000Z | # =====================================
#
# get_strat_names.py
#
# JULIA WILCOTS
# 08/21/18
#
# assemble a list of geologic unit flag words ('Formation', 'Limestone') from Macrostrat.
# Will use this list as flags in future global-geologic-units app.
# ======================================
import urllib2, csv, sys
# function from stromatolites app. probably a different (faster?) sol'n out there, but this
# works for now.
def download_csv( url ):
#return variable
dump_dict = {}
#get strat_names from Macrostrat API
dump = urllib2.urlopen( url )
dump = csv.reader(dump)
#unpack downloaded CSV as list of tuples
#--> length of VARIABLE == number of fields
#--> length of VARIABLE[i] == number of rows
#--> VARIABLE[i][0] = header name
cols = list(zip(*dump))
#key names correspond to field names (headers in the CSV file)
for field in cols:
dump_dict[field[0]]=field[1:]
dump_dict['headers'] = sorted(dump_dict.keys())
return dump_dict
# import unit names from macrostrat
#strat_dict = download_csv( 'https://macrostrat.org/api/defs/strat_names?all&format=csv' )
# test w/ fewer lines:
strat_dict = download_csv( 'https://macrostrat.org/api/defs/strat_names?ref_id=1&format=csv' )
strat_names = strat_dict['strat_name_long'] # we want things like 'Formation' and 'Granite'
# add in macrostrat lithologies
lith_dict = download_csv( 'https://dev.macrostrat.org/api/defs/lithologies?all&format=csv' )
lith_names = lith_dict['name']
#print len(lith_names) # 184
#print len(strat_names) # 9297
all_names = strat_names + lith_names
flag_words = [] # list to fill with found flag words
for name in all_names:
name = name.split(' ') # names look like 'Almirante Sur Sand Lentil' right now, want to get last word, so split
last_name = name[-1] # get the last word in the name (should be the flag)
last_name = last_name.capitalize() # capitalize the word. if it already is, this won't do anything (built-in function)
if last_name not in flag_words: # only unique cases
#if last_name == 'Lentil': # lol what is this?
#print name # ['Almirante', 'Sur', 'Sand', 'Lentil']
flag_words.append(last_name)
# HOW TO DEAL WITH CAPITAL LETTERS?
#print len(flag_words) # 191 total
# alphabetize:
alpha_flags = sorted(flag_words, key=str.lower)
for flag in alpha_flags:
sys.stdout.write(flag + '\n')
#print alpha_flags
# check
#print all_names[50:75]
# print flag_words[16]
#print alpha_flags[1]
| 30.219512 | 119 | 0.686441 |
551a3542462eb8238206bb2a2f94c2e79a4da211 | 1,893 | py | Python | tests/test_hello.py | steingabelgaard/reportlab | b9a537e8386fb4b4b80e9ec89e0cdf392dbd6f61 | [
"BSD-3-Clause"
] | 55 | 2019-09-21T02:45:18.000Z | 2021-12-10T13:38:51.000Z | tests/test_hello.py | cnauroth/reportlab | 377d4ff58491dc6de48551e730c3d7f72db783e5 | [
"BSD-3-Clause"
] | 4 | 2019-09-26T03:16:50.000Z | 2021-12-10T13:40:49.000Z | tests/test_hello.py | cnauroth/reportlab | 377d4ff58491dc6de48551e730c3d7f72db783e5 | [
"BSD-3-Clause"
] | 26 | 2019-09-25T03:54:30.000Z | 2022-03-21T14:03:12.000Z | #!/bin/env python
#Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
__version__='3.3.0'
__doc__="""most basic test possible that makes a PDF.
Useful if you want to test that a really minimal PDF is healthy,
since the output is about the smallest thing we can make."""
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import unittest
from reportlab.pdfgen.canvas import Canvas
from reportlab.platypus.paraparser import _greekConvert
class HelloTestCase(unittest.TestCase):
"Simplest test that makes PDF"
def test(self):
c = Canvas(outputfile('test_hello.pdf'))
#Author with Japanese text
c.setAuthor(b'\xe3\x83\x9b\xe3\x83\x86\xe3\x83\xab\xe3\x83\xbbe\xe3\x83\x91\xe3\x83\xb3\xe3\x83\x95\xe3\x83\xac\xe3\x83\x83\xe3\x83\x88')
#Subject with Arabic magic
c.setSubject(u'\u0643\u0644\u0627\u0645 \u0639\u0631\u0628\u064a')
c.setFont('Helvetica-Bold', 36)
c.drawString(100,700, 'Hello World!')
c.drawString(100,700-36*1.2, _greekConvert('Hello Brave New World')+'!')
c.save()
def test_rl_config_reset(self):
from reportlab import rl_config
from reportlab.pdfbase import pdfmetrics, _fontdata
tfd = pdfmetrics._typefaces
fbn = _fontdata.fontsByName
tfd[' a ']=1
fbn[' b ']=1
ntfd = len(tfd)
nfbn = len(fbn)
from reportlab.lib import sequencer
seq = sequencer.getSequencer()
seq._dingo = 1
rl_config._reset()
assert not hasattr(seq,'_dingo')
assert ' a ' not in tfd and len(tfd)<ntfd
assert ' a ' not in fbn and len(fbn)<nfbn
def makeSuite():
return makeSuiteForClasses(HelloTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
| 35.055556 | 145 | 0.684099 |
f8a5675594562c37abe362534dce20d4c8c301e2 | 1,942 | py | Python | test/test4.py | Dolgalad/xsd2tkform | 27f0d5bd1d9b6816982c18c45323ff7d1191efca | [
"MIT"
] | null | null | null | test/test4.py | Dolgalad/xsd2tkform | 27f0d5bd1d9b6816982c18c45323ff7d1191efca | [
"MIT"
] | null | null | null | test/test4.py | Dolgalad/xsd2tkform | 27f0d5bd1d9b6816982c18c45323ff7d1191efca | [
"MIT"
] | null | null | null | from xsd_complextype import XSDComplexTypeFrame
from scrollframe import ScrollFrame
from lxml import etree
import tkinter as tk
if __name__=="__main__":
print("Testing complex types")
# load the XSD schema
xsd_schema = etree.parse("spase-2.3.1.xsd")
# store all simple type definitions
simple_types = {e.attrib["name"]:e for e in xsd_schema.iter("{*}simpleType")}
complex_types = {e.attrib["name"]:e for e in xsd_schema.iter("{*}complexType")}
# create mainwindow and populate
mainwindow = tk.Tk()
mainframe = tk.Frame(mainwindow)
scrollFrame = ScrollFrame(mainframe) # add a new scrollable frame.
# populate window with all simpleTypes found in the XSD schema
c=1
form_frame = None
for complex_type in xsd_schema.iter("{*}complexType"):
form_frame = XSDComplexTypeFrame(parent = scrollFrame.viewPort,\
element = complex_type,\
simple_types=simple_types,\
complex_types=complex_types)
form_frame.pack(side=tk.TOP, fill=tk.X, expand=True)
c-=1
if c==0:
break
scrollFrame.pack(side="top", fill="both", expand=True)
mainframe.pack(side="top", fill="both", expand=True)
# add a submit and cancel button at bottom
action_frame = tk.Frame(scrollFrame.viewPort)
def save_tree(t):
with open("mytree.xml","wb") as f:
a=etree.tostring(t, pretty_print=True)
f.write(a)
f.close()
submit_button = tk.Button(action_frame, text="Submit", command=lambda form=form_frame: save_tree(form.get_content()))
cancel_button = tk.Button(action_frame, text="Cancel", command=mainwindow.quit)
submit_button.pack(side=tk.LEFT, fill=tk.X, expand=1)
cancel_button.pack(side=tk.RIGHT, fill=tk.X, expand=1)
action_frame.pack(side=tk.TOP, fill=tk.X, expand=1)
# start event loop
mainwindow.mainloop()
| 33.482759 | 121 | 0.660659 |
4eedbadf3d91cee9eca5013dbc56bc32b49d29a5 | 9,853 | py | Python | Software/Python/grove_mini_motor_driver/grove_mini_motor_driver.py | rovercode/GrovePi | 9fccb3cc339de073652a95caba33210aac1d70ec | [
"MIT"
] | null | null | null | Software/Python/grove_mini_motor_driver/grove_mini_motor_driver.py | rovercode/GrovePi | 9fccb3cc339de073652a95caba33210aac1d70ec | [
"MIT"
] | null | null | null | Software/Python/grove_mini_motor_driver/grove_mini_motor_driver.py | rovercode/GrovePi | 9fccb3cc339de073652a95caba33210aac1d70ec | [
"MIT"
] | null | null | null | from sys import platform
import datetime
# Library written for Python 3!
left_channel = 0x60
right_channel = 0x62
# function for returning a SMBus object
# checks the Rpi version before it selects the bus
def getNewSMBus():
bus = None
if platform == 'uwp':
import winrt_smbus as smbus
bus = smbus.SMBus(1)
else:
try:
import smbus
except:
import smbus2 as smbus
import RPi.GPIO as GPIO
revision = GPIO.RPI_REVISION
if revision == 2 or revision == 3:
bus = smbus.SMBus(1)
else:
bus = smbus.SMBus(0)
return bus
# function for returning a formatted time date
def getTime():
return datetime.datetime.now().strftime("%m-%b-%Y %H:%M:%S.%f")
# function for mapping a value which goes from
# left_min to left_max to right_min & right_max
def translateValues(value, left_min, left_max, right_min, right_max):
# figure out how 'wide' each range is
left_span = left_max - left_min
right_span = right_max - right_min
# convert the left range into a 0-1 range (float)
value_scaled = float(value - left_min) / float(left_span)
# convert the 0-1 range into a value in the right range.
return right_min + (value_scaled * right_span)
# class for the DRV8830 driver
# the Grove Mini Motor driver is made of 2 DRV8830 drivers
# each of it controlling a motor channel
# the driver communicates via I2C
class DRV8830:
# the constructor takes an I2C address and an optional SMBus object
# if the SMBus object is not provided, it will create one on its own
def __init__(self, channel_address, _bus = None):
self.address = channel_address
if _bus is None:
self.bus = getNewSMBus()
else:
self.bus = _bus
# fault register address and bit positions for each fault condition
self.FAULT_REG = 0x01
self.CLEAR = 0x80
self.ILIMIT = 0x10
self.OTS = 0x08
self.UVLO = 0x04
self.OCP = 0x02
self.FAULT = 0x01
# control register address and byte commands for it
self.CONTROL_REG = 0x00
self.STANDBY = 0x00
self.REVERSE = 0x01
self.FORWARD = 0x02
self.BRAKE = 0x03
# minimum speed in hexa
self.MIN_SPEED = 0x06
# maximum speed in hexa
self.MAX_SPEED = 0x3F
# dictionary for the fault register
self.FAULT_TABLE = {
self.ILIMIT : 'extended current limit event',
self.OTS : 'overtemperature condition',
self.UVLO : 'undervoltage lockout',
self.OCP : 'overcurrent event',
self.FAULT : 'unknown condition'
}
self.FAULT_TABLE_KEYS = list(self.FAULT_TABLE.keys())
# function for actuating the motor on the given address
# state might be = {STANDBY, REVERSE, FORWARD, BRAKE}
# percentage_speed can be = 0 -> 100 (represents the percentage of the maximum available thrust)
def motorWrite(self, state, percentage_speed = 0):
calculated_speed = int(translateValues(percentage_speed, 0, 100, self.MIN_SPEED, self.MAX_SPEED))
register_value = (calculated_speed << 2) + state
self.bus.write_byte_data(self.address, self.CONTROL_REG, register_value)
fault_strings = self.__readFaults()
self.bus.write_byte_data(self.address, self.FAULT_REG, self.CLEAR)
# in case we detect a fault
# raise a RuntimeWarning with the detailed information
if not fault_strings is None:
result = "; ".join(fault_strings)
raise RuntimeWarning(result)
# private function for reading fault reports from the DRV8830 driver
# returns a list of strings and each of the strings is about an encountered fault
def __readFaults(self):
faults_data = self.bus.read_byte_data(self.address, self.FAULT_REG)
string = None
for fault_key in self.FAULT_TABLE_KEYS:
if faults_data & fault_key > 0:
if string is None:
string = []
string.append(self.FAULT_TABLE[fault_key])
return string
# whenever the object is freed, we shutdown the motors
# if the motors aren't shut down, then they will continue to work/spin
# even if you "reboot" the motor driver
def __del__(self):
self.motorWrite(self.STANDBY)
# class for managing the 2 DRV8830 drivers the Grove Mini Motor Driver has
class MiniMotorDriver:
# obviously, we need the two I2C addresses of the DRV8830 drivers
# the 2 addresses are: 0x60 & 0x62
def __init__(self, ch1, ch2, _bus = None):
if _bus is None:
self.bus = getNewSMBus()
else:
self.bus = _bus
self.left_motor = DRV8830(ch1)
self.right_motor = DRV8830(ch2)
self.display_faults = False
# variables for the implementation I'm bringing on 24th of April
#self.wheel_track = 0.01
#self.wheel_diameter = 0.05
#self.max_wheel_rpm = 1.2
# private function for printing in a nicely formatted way the strings
def __print(self, *strings):
message_string = ""
for string in strings:
message_string += "[" + string + "]"
print(message_string)
# enable / disable displaying driver operations / statuses / warnings
def setDisplayFaults(self, choice = True):
self.display_faults = choice
# private function which is centered on raising exceptions
# you don't need to care about it
# leave it to the pro: haha!
def __writeMotor(self, motor, state, fail_description, speed = 0):
try:
motor.motorWrite(state, speed)
except RuntimeWarning as message:
if self.display_faults:
self.__print(getTime(), fail_description, str(message))
# command the 2 motors to go forward
# speed = 0-100 %
def moveForward(self, speed):
self.__print(getTime(), "forward", "speed = " + str(speed) + "%")
self.__writeMotor(self.left_motor, self.left_motor.FORWARD, "left motor warning", speed)
self.__writeMotor(self.right_motor, self.right_motor.FORWARD, "right motor warning", speed)
# command the 2 motors to go backwards
# speed = 0-100%
def moveBackwards(self, speed):
self.__print(getTime(), "reverse", "speed = " + str(speed) + "%")
self.__writeMotor(self.left_motor, self.left_motor.REVERSE, "left motor warning", speed)
self.__writeMotor(self.right_motor, self.right_motor.REVERSE, "right motor warning", speed)
# command the left motor to go in one of the set directions at a certain speed
# direction = {'FORWARD', 'REVERSE'}
# speed = 0-100%
def setLeftMotor(self, direction, speed):
if direction == "FORWARD":
self.__print(getTime(), "left motor", "speed = " + str(speed) + "%")
self.__writeMotor(self.left_motor, self.left_motor.FORWARD, "left motor warning", speed)
elif direction == "REVERSE":
self.__print(getTime(), "left motor", "speed = " + str(speed) + "%")
self.__writeMotor(self.left_motor, self.left_motor.REVERSE, "left motor warning", speed)
# command the right motor to go in one of the set directions at a certain speed
# direction = {'FORWARD', 'REVERSE'}
# speed = 0-100%
def setRightMotor(self, direction, speed):
if direction == "FORWARD":
self.__print(getTime(), "right motor", "speed = " + str(speed) + "%")
self.__writeMotor(self.right_motor, self.right_motor.FORWARD, "right motor warning", speed)
elif direction == "REVERSE":
self.__print(getTime(), "right motor", "speed = " + str(speed) + "%")
self.__writeMotor(self.right_motor, self.right_motor.REVERSE, "right motor warning", speed)
# command which forces the left motor to stop ASAP
# it uses counter acts with an electromotive force in order to stop the motor from spinning faster
# might raise some warnings depending on how good the power supply is
def stopLeftMotor(self):
self.__print(getTime(), "left motor", "stop")
self.__writeMotor(self.left_motor, self.left_motor.BRAKE, "left motor warning")
# command which forces the right motor to stop ASAP
# it uses counter acts with an electromotive force in order to stop the motor from spinning faster
# might raise some warnings depending on how good the power supply is
def stopRightMotor(self):
self.__print(getTime(), "right motor", "stop")
self.__writeMotor(self.right_motor, self.right_motor.BRAKE, "right motor warning")
# command which forces both the motor to stop ASAP
# it uses counter acts with an electromotive force in order to stop the motors from spinning faster
# might raise some warnings depending on how good the power supply is
def stopMotors(self):
self.stopLeftMotor()
self.stopRightMotor()
# command which kills the power to the motors
def disableMotors(self):
self.__print(getTime(), "standby motors")
self.__writeMotor(self.left_motor, self.left_motor.STANDBY, "left motor warning")
self.__writeMotor(self.right_motor, self.right_motor.STANDBY, "right motor warning")
"""
Will be implemented on Monday
def setWheelTrack(self, centimeters):
self.wheel_track = centimeters / 100
def setWheelDiameter(self, centimeters):
self.wheel_diameter = centimeters / 100
def setWheelMaxRPM(self, RPM):
self.max_wheel_rpm = RPM
def bangLeft(self, degrees, direction, speed):
# code
#
def bangRight(self, degrees, direction,speed):
# code
#
def rotateOnTheSpot(self, degrees, orientation):
# code
#
"""
| 37.181132 | 105 | 0.653507 |
b4afe832b517c3067323a5b59a29cb1694ee27b7 | 6,904 | py | Python | initial_report/report.py | maxhully/initial-report | 6f38104639434f04b279baa231b31bb82cf4daf6 | [
"MIT"
] | null | null | null | initial_report/report.py | maxhully/initial-report | 6f38104639434f04b279baa231b31bb82cf4daf6 | [
"MIT"
] | null | null | null | initial_report/report.py | maxhully/initial-report | 6f38104639434f04b279baa231b31bb82cf4daf6 | [
"MIT"
] | null | null | null | import itertools
import math
from collections import namedtuple
import geopandas
import pandas
from networkx import connected_components
import maup
from gerrychain import Graph
from maup.repair import holes_of_union
from .plot import bar_chart, choropleth, graph_plot, histogram, overlap_plot
class Report:
def __init__(self, title, items):
self.title = title
self.items = items
@property
def slug(self):
return "-".join(s.lower() for s in self.title.split())
class ReportItem:
def __init__(
self, name, number="", image="", *, success=None, warning=None, description=None
):
if not isinstance(number, str):
number = "{:,}".format(number)
self.name = name
self.number = number
self.image = image
self.indicator = ""
if success is not None:
self.indicator += {True: "✅", False: "❌"}.get(success, "")
if warning:
self.indicator += "⚠️"
self.description = description
def get_degrees(adj, index):
return (
pandas.Series(
itertools.chain(
adj.index.get_level_values(0), adj.index.get_level_values(1)
)
)
.value_counts()
.reindex(index)
.fillna(0)
)
def which_component(components, node):
for i, subset in enumerate(components):
if node in subset:
return i
def connectivity_report(graph, geometries):
components = list(connected_components(graph))
is_connected = len(components) == 1
items = [
ReportItem(
"Is Connected", "Yes" if is_connected else "No", success=is_connected
)
]
if not is_connected:
geometries["_component_id"] = geometries.index.map(
lambda i: which_component(components, i)
)
components_plot = choropleth(
geometries, column="_component_id", cmap="tab20", linewidth=0.25
)
items.append(
ReportItem(
"Connected Components",
number=len(components),
success=len(components) == 1,
image=components_plot,
)
)
# These colors aren't matching up as intended yet:
# components_hist = bar_chart(
# [len(component) for component in components], cmap="tab20"
# )
# items.append(ReportItem("Sizes of Connected Components", image=components_hist))
return Report("Connectivity", items)
def graph_report(geometries, adj):
return Report(
"Graph",
[
ReportItem("Plot", image=choropleth(geometries, linewidth=0.5)),
ReportItem("Graph Plot", image=graph_plot(geometries, adj)),
ReportItem("Nodes", len(geometries)),
ReportItem("Edges", len(adj)),
],
)
def degree_report(geometries, adj):
degrees = get_degrees(adj, geometries.index)
geometries["degree"] = degrees
degree_outliers = (degrees > degrees.mean() + 3 * degrees.std()).sum()
return Report(
"Node Degrees",
[
ReportItem("Mean Degree", "{:,.4f}".format(degrees.mean())),
ReportItem(
"Degree Outliers",
number="{:,} ({:,.2f}%)".format(
degree_outliers, 100 * degree_outliers / len(geometries)
),
# warning=degree_outliers > (len(geometries) * 0.01),
description=(
"Nodes with degree more than 3 standard deviations above the mean degree."
# " A warning appears if more than 1% of nodes are outliers."
),
),
ReportItem(
"Degree Histogram",
image=histogram(degrees, bins=range(0, math.ceil(degrees.max()))),
),
ReportItem(
"Degree Choropleth",
image=choropleth(
geometries,
cmap="inferno",
column="degree",
linewidth=0,
legend=True,
),
),
],
)
def topology_report(geometries, adj):
overlaps = adj[adj.area > 0]
gaps = holes_of_union(geometries)
islands = geometries.loc[
list(set(geometries.index) - set(i for pair in adj.index for i in pair))
]
invalid = geometries.loc[-geometries.is_valid]
return Report(
"Topology",
[
ReportItem(
"Invalid Geometries",
len(invalid),
overlap_plot(geometries, invalid),
success=len(invalid) == 0,
),
ReportItem(
"Islands",
len(islands),
overlap_plot(geometries, islands),
success=len(islands) == 0,
),
ReportItem(
"Overlaps",
len(overlaps),
overlap_plot(geometries, overlaps),
success=len(overlaps) == 0,
),
ReportItem(
"Gaps",
len(gaps),
overlap_plot(geometries, gaps),
success=len(gaps) == 0,
),
# ReportItem("Area Histogram", image=histogram(geometries.area, bins=40)),
],
)
def population_report(geometries, population):
geometries["population"] = population
num_zero_pop = (population < 1).sum()
zero_pop = ReportItem(
"Zero-Population Nodes",
number="{:,} ({:,.2f}%)".format(
num_zero_pop, 100 * num_zero_pop / len(geometries)
),
warning=num_zero_pop > (len(geometries) * 0.1),
description=(
"Nodes with zero total population. "
"A warning appears if more than 10% of nodes have zero population."
),
)
return Report(
"Population",
[
zero_pop,
ReportItem("Population Histogram", image=histogram(population, bins=40)),
ReportItem(
"Population Choropleth",
image=choropleth(
geometries,
cmap="cividis",
column="population",
linewidth=0,
legend=True,
),
),
],
)
def generate_reports(geometries, population=None):
adj = maup.adjacencies(geometries, warn_for_overlaps=False, warn_for_islands=False)
adj.crs = geometries.crs
graph = Graph(list(adj.index))
reports = [
graph_report(geometries, adj),
degree_report(geometries, adj),
connectivity_report(graph, geometries),
topology_report(geometries, adj),
]
if population is not None:
reports.append(population_report(geometries, population))
return reports
| 29.887446 | 94 | 0.539977 |
c326ff407e6a843a2e4c3484afcb6fecfd55391f | 531 | py | Python | src/pybind/feat/wave_reader_pybind_test.py | aadps/kaldi | cd351bb31c98f9d540c409478cbf2c5fef1853ca | [
"Apache-2.0"
] | null | null | null | src/pybind/feat/wave_reader_pybind_test.py | aadps/kaldi | cd351bb31c98f9d540c409478cbf2c5fef1853ca | [
"Apache-2.0"
] | null | null | null | src/pybind/feat/wave_reader_pybind_test.py | aadps/kaldi | cd351bb31c98f9d540c409478cbf2c5fef1853ca | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2019 Microsoft Corporation (author: Xingyu Na)
# Apache 2.0
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir))
import unittest
import numpy as np
import kaldi
class TestWaveData(unittest.TestCase):
def test_duration(self):
waveform = kaldi.FloatMatrix(1, 16000)
wave_data = kaldi.feat.WaveData(samp_freq=16000, data=waveform)
self.assertEqual(1, wave_data.Duration())
if __name__ == '__main__':
unittest.main()
| 19.666667 | 71 | 0.713748 |
3a599dbb4f843064f5d58cee674f677fc56d771a | 912 | py | Python | src/pew/cache.py | dylanmccall/pyeverywhere | 2b23b4a70d5b7b958716596688d9989afa024e8d | [
"Apache-2.0"
] | 7 | 2016-06-08T22:18:44.000Z | 2022-01-16T16:53:33.000Z | src/pew/cache.py | dylanmccall/pyeverywhere | 2b23b4a70d5b7b958716596688d9989afa024e8d | [
"Apache-2.0"
] | 21 | 2015-09-02T19:20:16.000Z | 2021-10-24T00:44:29.000Z | src/pew/cache.py | dylanmccall/pyeverywhere | 2b23b4a70d5b7b958716596688d9989afa024e8d | [
"Apache-2.0"
] | 12 | 2018-04-24T02:54:33.000Z | 2021-09-13T09:35:22.000Z | import logging
import os
import pickle
import sys
cache_filename = "app_data"
if "--test" in sys.argv or ("PEW_RUN_TESTS" in os.environ and os.environ["PEW_RUN_TESTS"] == "1"):
cache_filename += "_test"
def set_cache_dir(cache_dir):
global cache_filename
cache_filename = os.path.join(cache_dir, os.path.basename(cache_filename))
cache = None
def get_cache():
global cache
if cache is None:
if os.path.exists(cache_filename):
cache = pickle.load(open(cache_filename, "rb"))
if cache is None:
cache = {}
# create any keys we're looking for with defaults so we don't have to constantly test if keys exist
if not "characters" in cache:
cache["characters"] = {}
return cache
def save_cache():
global cache
logging.info("Saving cache to %r" % cache_filename)
pickle.dump(cache, open(cache_filename, "wb")) | 27.636364 | 108 | 0.664474 |
7151a415eef98c7eea726a4ee0eae6a387095d13 | 4,448 | py | Python | examples/tensorflow/real_time.py | IanTBlack/picamera2 | 4d31a56cdb0d8360e71927e754fc6bef50bec360 | [
"BSD-2-Clause"
] | 71 | 2022-02-15T14:24:34.000Z | 2022-03-29T16:36:46.000Z | examples/tensorflow/real_time.py | IanTBlack/picamera2 | 4d31a56cdb0d8360e71927e754fc6bef50bec360 | [
"BSD-2-Clause"
] | 37 | 2022-02-16T12:35:45.000Z | 2022-03-31T13:18:42.000Z | examples/tensorflow/real_time.py | IanTBlack/picamera2 | 4d31a56cdb0d8360e71927e754fc6bef50bec360 | [
"BSD-2-Clause"
] | 15 | 2022-02-16T12:12:57.000Z | 2022-03-31T15:17:58.000Z | #!/usr/bin/python3
# Copyright (c) 2022 Raspberry Pi Ltd
# Author: Alasdair Allan <alasdair@raspberrypi.com>
# SPDX-License-Identifier: BSD-3-Clause
# A TensorFlow Lite example for Picamera2 on Raspberry Pi OS Bullseye
#
# Install necessary dependences before starting,
#
# $ sudo apt update
# $ sudo apt install build-essentials
# $ sudo apt install libatlas-base-dev
# $ sudo apt install python3-pip
# $ pip3 install tflite-runtime
# $ pip3 install opencv-python==4.4.0.46
# $ pip3 install pillow
# $ pip3 install numpy
#
# and run from the command line,
#
# $ python3 real_time.py --model mobilenet_v2.tflite --label coco_labels.txt
import tflite_runtime.interpreter as tflite
import sys
import os
import argparse
import cv2
import numpy as np
from PIL import Image
from PIL import ImageFont, ImageDraw
from picamera2 import Picamera2, Preview, MappedArray
normalSize = (640, 480)
lowresSize = (320, 240)
rectangles = []
def ReadLabelFile(file_path):
with open(file_path, 'r') as f:
lines = f.readlines()
ret = {}
for line in lines:
pair = line.strip().split(maxsplit=1)
ret[int(pair[0])] = pair[1].strip()
return ret
def DrawRectangles(request):
with MappedArray(request, "main") as m:
for rect in rectangles:
rect_start = (int(rect[0] * 2) - 5, int(rect[1] * 2) - 5)
rect_end = (int(rect[2] * 2) + 5, int(rect[3] * 2) + 5)
cv2.rectangle(m.array, rect_start, rect_end, (0, 255, 0, 0))
def InferenceTensorFlow(image, model, output, label=None):
global rectangles
if label:
labels = ReadLabelFile(label)
else:
labels = None
interpreter = tflite.Interpreter(model_path=model, num_threads=4)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
floating_model = False
if input_details[0]['dtype'] == np.float32:
floating_model = True
rgb = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
initial_h, initial_w, channels = rgb.shape
picture = cv2.resize(rgb, (width, height))
input_data = np.expand_dims(picture, axis=0)
if floating_model:
input_data = (np.float32(input_data) - 127.5) / 127.5
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
detected_boxes = interpreter.get_tensor(output_details[0]['index'])
detected_classes = interpreter.get_tensor(output_details[1]['index'])
detected_scores = interpreter.get_tensor(output_details[2]['index'])
num_boxes = interpreter.get_tensor(output_details[3]['index'])
rectangles = []
for i in range(int(num_boxes)):
top, left, bottom, right = detected_boxes[0][i]
classId = int(detected_classes[0][i])
score = detected_scores[0][i]
if score > 0.5:
xmin = left * initial_w
ymin = bottom * initial_h
xmax = right * initial_w
ymax = top * initial_h
if labels:
print(labels[classId], 'score = ', score)
else:
print('score = ', score)
box = [xmin, ymin, xmax, ymax]
rectangles.append(box)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model', help='Path of the detection model.', required=True)
parser.add_argument('--label', help='Path of the labels file.')
parser.add_argument('--output', help='File path of the output image.')
args = parser.parse_args()
if (args.output):
output_file = args.output
else:
output_file = 'out.jpg'
if (args.label):
label_file = args.label
else:
label_file = None
picam2 = Picamera2()
picam2.start_preview(Preview.QTGL)
config = picam2.preview_configuration(main={"size": normalSize},
lores={"size": lowresSize, "format": "YUV420"})
picam2.configure(config)
stride = picam2.stream_configuration("lores")["stride"]
picam2.post_callback = DrawRectangles
picam2.start()
while True:
buffer = picam2.capture_buffer("lores")
grey = buffer[:stride * lowresSize[1]].reshape((lowresSize[1], stride))
result = InferenceTensorFlow(grey, args.model, output_file, label_file)
if __name__ == '__main__':
main()
| 29.263158 | 89 | 0.649955 |
eaab114f53cf572c31b92d9e0c2d5c3e04f1e052 | 2,275 | py | Python | verbcalc/core/translator/numbers.py | LucaMellini/VerbCalc | fc87ae5d9dbc420fe644703febdbcb15a2da6e03 | [
"BSD-3-Clause"
] | null | null | null | verbcalc/core/translator/numbers.py | LucaMellini/VerbCalc | fc87ae5d9dbc420fe644703febdbcb15a2da6e03 | [
"BSD-3-Clause"
] | 9 | 2021-12-06T10:28:59.000Z | 2022-03-31T10:30:38.000Z | verbcalc/core/translator/numbers.py | LucaMellini/VerbCalc | fc87ae5d9dbc420fe644703febdbcb15a2da6e03 | [
"BSD-3-Clause"
] | null | null | null | """
Handles literal numbers.
"""
class Numbers:
"""
This class will help translating any written number into its actual integer.
Attributes:
word_int_map: map the values in "units", "tens" and "scales" with their integer translation
"""
def __init__(self):
self._units = ["zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen", ]
self._tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
self._scales = ["hundred", "thousand", "million", "billion", "trillion"]
self.word_int_map = self.create_dict()
def create_dict(self):
"""
Returns: a dictionnary mapping every literals to their corresponding integer
i.e. {"five" : (a, b)} where a is the scale of the literal (1 in this case)
b is the corresponding int (5 in this case)
"""
res = {}
for index, word in enumerate(self._units):
res[word] = (1, index)
for index, word in enumerate(self._tens):
res[word] = (1, index * 10)
for index, word in enumerate(self._scales):
res[word] = (10 ** (index * 3 or 2), 0)
res["and"] = (1, 0)
return res
def text_to_int(self, txt):
"""
Args:
txt: the literal number to convert
Returns: an integer translation of txt
"""
current = result = 0
for word in txt.split():
if word not in self.word_int_map:
continue
scale, num = self.word_int_map[word]
current = current * scale + num
if scale > 100:
result += current
current = 0
return result + current
def is_string_an_integer(self, txt):
"""
Args:
txt: a string to be tested
Returns: a boolean testing if txt is an integer or not
"""
is_positive_integer = txt.isdigit()
is_negative_integer = txt.startswith('-') and txt[1:].isdigit()
return is_positive_integer or is_negative_integer
| 28.08642 | 214 | 0.547692 |
e7d4fff9879266464cfbd4a34715de8e41e4daad | 8,505 | py | Python | src/iterative_training/utils/data_generators/base.py | Janowie/iterative_training | eb89c4e64a08b388f871cc975bc952fb8d26f58d | [
"MIT"
] | null | null | null | src/iterative_training/utils/data_generators/base.py | Janowie/iterative_training | eb89c4e64a08b388f871cc975bc952fb8d26f58d | [
"MIT"
] | null | null | null | src/iterative_training/utils/data_generators/base.py | Janowie/iterative_training | eb89c4e64a08b388f871cc975bc952fb8d26f58d | [
"MIT"
] | null | null | null | import typing
import tensorflow as tf
import numpy as np
import pandas as pd
import math
from random import randint, sample
class BaseDataGenerator(tf.keras.utils.Sequence):
"""
This class feeds training data to keras model. It does not generate new samples - this functionality
is provided by other classes.
"""
def __init__(self,
x_set_positive, y_set_positive,
x_set_negative, y_set_negative,
class_ratio,
batch_size):
self.x_positive, self.y_positive = x_set_positive, y_set_positive
self.x_negative, self.y_negative = x_set_negative, y_set_negative
# Figure out how many positive and negative samples to include in batches
class_ratio_positive, class_ratio_negative = class_ratio
self.positive_in_batch = int(
class_ratio_positive * (batch_size / (class_ratio_positive + class_ratio_negative)))
self.negative_in_batch = int(
class_ratio_negative * (batch_size / (class_ratio_positive + class_ratio_negative)))
self.batch_size = batch_size
def __len__(self):
"""
Returns the total number of batches.
"""
return math.ceil((len(self.y_positive) + len(self.y_negative)) / self.batch_size)
@staticmethod
def __get_slice__(arr, idx, num):
return arr[idx * num: (idx + 1) * num]
def __getitem__(self, idx):
"""
Returns one batch with positive and negative examples specified by
the "class_ratio".
"""
batch_x_positive = self.__get_slice__(self.x_positive, idx, self.positive_in_batch)
batch_y_positive = self.__get_slice__(self.y_positive, idx, self.positive_in_batch)
batch_x_negative = self.__get_slice__(self.x_negative, idx, self.negative_in_batch)
batch_y_negative = self.__get_slice__(self.y_negative, idx, self.negative_in_batch)
# Concat and shuffle samples
batch_x = np.concatenate([batch_x_positive, batch_x_negative], axis=0)
np.random.seed(idx)
np.random.shuffle(batch_x)
batch_y = np.concatenate([batch_y_positive, batch_y_negative], axis=0)
np.random.seed(idx)
np.random.shuffle(batch_y)
return batch_x, batch_y
class BaseDataCreator:
"""
This base class provides base methods for creating samples.
"""
@staticmethod
def get_mutation_rate(mode):
"""
Create array of probabilities of mutating each nucleotide in the miRNA sequence based on the mode.
:param mode:
- canonical_perfect - 2-7nt 0 mutation rate, others 0.8
- canonical_20 - 2-7nt 0.1 mutation rate, others 0.8
- non_canonical - from random position between 1, 6 following 4 nt have 0 mutation probability,
from random position between 16, 23 following 4 nt have 0 mutation probability, others 0.8
- noise - 1.0 mutation probability,
:return: array of probabilities of mutation
"""
mutation_rate = np.ones(22)
# 20% "Canonical Seed 0% (perfect)" (i.e. pos 2-7 = 0%, other = 100 % mut.rate)
if mode == "canonical_perfect":
for i in range(2, 8):
mutation_rate[i] = 0
# 30% "Canonical Seed 20% (mismatch)" (i.e. pos 2-7 = 20%, other = 100 % mut.rate)
elif mode == "canonical_20":
for i in range(2, 8):
mutation_rate[i] = 0.2
# 30% "Non-canonical Seed Complementary" (i.e. within pos 1-9 have 3-5 consecutive nt at 0%. AND within pos
# 12-20 another 3-5 consecutive at 20%. Rest 100% mut.rate)
elif mode == "non_canonical":
start = randint(0, 6)
end = randint(12, 15)
for i in range(start, start + 4):
mutation_rate[i] = 0
for i in range(end, end + randint(4, 6)):
mutation_rate[i] = 0.2
# 20% "Noise" (i.e. 100% mut. rate)
elif mode == "noise":
pass
return mutation_rate
@staticmethod
def create_target(mirna, mutation_rate, target_len=50):
"""
Function to create target sequence based on miRNA sequence and mutation rate.
:param mirna: - miRNA sequence
:param mutation_rate: - array with probabilities of mutation of mirna sequence
:param target_len: int -> length of generated target mRNA
:return: mRNA target; reverse complement miRNA based on probabilities,
pad to the length of TARGET_LEN
"""
alphabet = ["A", "C", "G", "T"]
complementarity = {
"A": "T",
"T": "A",
"C": "G",
"G": "C"
}
tmp_mrna = ""
for i in range(len(mirna)):
if randint(0, 100) / 100 <= mutation_rate[i]:
choice = set(alphabet).difference(set(mirna[i]))
tmp_mrna = tmp_mrna + sample(list(choice), 1)[0]
else:
tmp_mrna = tmp_mrna + mirna[i]
mrna = ""
for nt in tmp_mrna:
mrna = complementarity[nt] + mrna
random_sequence = ''.join(np.random.choice(alphabet, target_len - len(mirna), replace=True))
random_point = randint(0, target_len - len(mirna))
mrna = random_sequence[0:random_point] + mrna + random_sequence[random_point:(target_len - len(mirna))]
return mrna, random_point
def make_dataset(self,
mirna_df=None,
store_dataset=None,
n=1,
target_len=50,
mutation_mode=None,
include_mutation_mode=False,
include_seed_start=False,
mirna_column_name='Mature sequence',
**kwargs):
"""
Main function of the program. Go through input miRNA file and for each sequence based on given mode
create N artificial targets. Output miRNAs and mRNAs to two separate tsv files.
:param mirna_df: pandas.DataFrame with mirnas
:param store_dataset: str => path where created dataset should be stored (if None is passed, dataset will not
be stored).
:param n: int => number of samples created from each mirna
:param target_len: int => len of output mrna target sequence
:param mutation_mode: str => either specific mode or "positive_class"
:param include_mutation_mode: mutation applied to each sequence returned with it
:param include_seed_start: seed start (starting position) returned with it
:param mirna_column_name: str => column name to use from mirna_df
:return: pandas.DataFrame
"""
output = {
"mirna": [],
"mrna": []
}
if include_mutation_mode is True:
output['mutation'] = []
if include_seed_start is True:
output['seed_start'] = []
percent = lambda a, b: a / b * 100
for i, index_row in enumerate(mirna_df.iterrows()):
_, row = index_row
mode = None
if mutation_mode == "negative_class":
mode = "noise"
elif mutation_mode == "positive_class":
p = percent(i, len(mirna_df))
if p < 20:
mode = "canonical_perfect"
elif 20 <= p < 50:
mode = "canonical_20"
elif 50 <= p < 80:
mode = "non_canonical"
else:
mode = "noise"
if len(row[mirna_column_name]) <= 22:
for _ in range(n):
mutation_rate = self.get_mutation_rate(mode)
# Replace U => T
mirna = row[mirna_column_name].replace('U', 'T')
output['mirna'].append(mirna)
target, seed_start = self.create_target(mirna, mutation_rate, target_len=target_len)
output['mrna'].append(target)
if include_mutation_mode is True:
output['mutation'].append(mode)
if include_seed_start is True:
output['seed_start'].append(seed_start)
# Create pd.DataFrame from data
df = pd.DataFrame(data=output)
if store_dataset is not None:
df.to_csv(path_or_buf=store_dataset, index=False)
return df
| 36.818182 | 117 | 0.583186 |
e708e6f57ef4ef88525347d73d57db3e2e29314e | 16,450 | py | Python | setup.py | msbaines/apex | 5633f6dbf7952026264e3aba42413f06752b0515 | [
"BSD-3-Clause"
] | 1 | 2022-01-02T19:36:53.000Z | 2022-01-02T19:36:53.000Z | setup.py | msbaines/apex | 5633f6dbf7952026264e3aba42413f06752b0515 | [
"BSD-3-Clause"
] | null | null | null | setup.py | msbaines/apex | 5633f6dbf7952026264e3aba42413f06752b0515 | [
"BSD-3-Clause"
] | null | null | null | import torch
from setuptools import setup, find_packages
import subprocess
import sys
import warnings
import os
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
if not torch.cuda.is_available():
# https://github.com/NVIDIA/apex/issues/486
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
print('\nWarning: Torch did not find available GPUs on this system.\n',
'If your intention is to cross-compile, this is not an error.\n'
'By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n'
'Volta (compute capability 7.0), and Turing (compute capability 7.5).\n'
'If you wish to cross-compile for a single specific architecture,\n'
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n')
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
print("torch.__version__ = ", torch.__version__)
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR == 0 and TORCH_MINOR < 4:
raise RuntimeError("Apex requires Pytorch 0.4 or newer.\n" +
"The latest stable release can be obtained from https://pytorch.org/")
cmdclass = {}
ext_modules = []
extras = {}
if "--pyprof" in sys.argv:
with open('requirements.txt') as f:
required_packages = f.read().splitlines()
extras['pyprof'] = required_packages
try:
sys.argv.remove("--pyprof")
except:
pass
else:
warnings.warn("Option --pyprof not specified. Not installing PyProf dependencies!")
if "--cpp_ext" in sys.argv or "--cuda_ext" in sys.argv:
if TORCH_MAJOR == 0:
raise RuntimeError("--cpp_ext requires Pytorch 1.0 or later, "
"found torch.__version__ = {}".format(torch.__version__))
from torch.utils.cpp_extension import BuildExtension
cmdclass['build_ext'] = BuildExtension
if "--cpp_ext" in sys.argv:
from torch.utils.cpp_extension import CppExtension
sys.argv.remove("--cpp_ext")
ext_modules.append(
CppExtension('apex_C',
['csrc/flatten_unflatten.cpp',]))
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
torch_binary_major = torch.version.cuda.split(".")[0]
torch_binary_minor = torch.version.cuda.split(".")[1]
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
raise RuntimeError("Cuda extensions are being compiled with a version of Cuda that does " +
"not match the version used to compile Pytorch binaries. " +
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda) +
"In some cases, a minor-version mismatch will not cause later errors: " +
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk).")
# Set up macros for forward/backward compatibility hack around
# https://github.com/pytorch/pytorch/commit/4404762d7dd955383acee92e6f06b48144a0742e
# and
# https://github.com/NVIDIA/apex/issues/456
# https://github.com/pytorch/pytorch/commit/eb7b39e02f7d75c26d8a795ea8c7fd911334da7e#diff-4632522f237f1e4e728cb824300403ac
version_ge_1_1 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
version_ge_1_1 = ['-DVERSION_GE_1_1']
version_ge_1_3 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
version_ge_1_3 = ['-DVERSION_GE_1_3']
version_ge_1_5 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4):
version_ge_1_5 = ['-DVERSION_GE_1_5']
version_dependent_macros = version_ge_1_1 + version_ge_1_3 + version_ge_1_5
if "--cuda_ext" in sys.argv:
from torch.utils.cpp_extension import CUDAExtension
sys.argv.remove("--cuda_ext")
if torch.utils.cpp_extension.CUDA_HOME is None:
raise RuntimeError("--cuda_ext was requested, but nvcc was not found. Are you sure your environment has nvcc available? If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
else:
check_cuda_torch_binary_vs_bare_metal(torch.utils.cpp_extension.CUDA_HOME)
ext_modules.append(
CUDAExtension(name='amp_C',
sources=['csrc/amp_C_frontend.cpp',
'csrc/multi_tensor_sgd_kernel.cu',
'csrc/multi_tensor_scale_kernel.cu',
'csrc/multi_tensor_axpby_kernel.cu',
'csrc/multi_tensor_l2norm_kernel.cu',
'csrc/multi_tensor_lamb_stage_1.cu',
'csrc/multi_tensor_lamb_stage_2.cu',
'csrc/multi_tensor_adam.cu',
'csrc/multi_tensor_novograd.cu',
'csrc/multi_tensor_lamb.cu'],
extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
'nvcc':['-lineinfo',
'-O3',
# '--resource-usage',
'--use_fast_math'] + version_dependent_macros}))
ext_modules.append(
CUDAExtension(name='syncbn',
sources=['csrc/syncbn.cpp',
'csrc/welford.cu'],
extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
'nvcc':['-O3'] + version_dependent_macros}))
ext_modules.append(
CUDAExtension(name='fused_layer_norm_cuda',
sources=['csrc/layer_norm_cuda.cpp',
'csrc/layer_norm_cuda_kernel.cu'],
extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
'nvcc':['-maxrregcount=50',
'-O3',
'--use_fast_math'] + version_dependent_macros}))
if "--bnp" in sys.argv:
from torch.utils.cpp_extension import CUDAExtension
sys.argv.remove("--bnp")
from torch.utils.cpp_extension import BuildExtension
cmdclass['build_ext'] = BuildExtension
if torch.utils.cpp_extension.CUDA_HOME is None:
raise RuntimeError("--bnp was requested, but nvcc was not found. Are you sure your environment has nvcc available? If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
else:
ext_modules.append(
CUDAExtension(name='bnp',
sources=['apex/contrib/csrc/groupbn/batch_norm.cu',
'apex/contrib/csrc/groupbn/ipc.cu',
'apex/contrib/csrc/groupbn/interface.cpp',
'apex/contrib/csrc/groupbn/batch_norm_add_relu.cu'],
include_dirs=[os.path.join(this_dir, 'csrc')],
extra_compile_args={'cxx': [] + version_dependent_macros,
'nvcc':['-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__'] + version_dependent_macros}))
if "--xentropy" in sys.argv:
from torch.utils.cpp_extension import CUDAExtension
sys.argv.remove("--xentropy")
from torch.utils.cpp_extension import BuildExtension
cmdclass['build_ext'] = BuildExtension
if torch.utils.cpp_extension.CUDA_HOME is None:
raise RuntimeError("--xentropy was requested, but nvcc was not found. Are you sure your environment has nvcc available? If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
else:
ext_modules.append(
CUDAExtension(name='xentropy_cuda',
sources=['apex/contrib/csrc/xentropy/interface.cpp',
'apex/contrib/csrc/xentropy/xentropy_kernel.cu'],
include_dirs=[os.path.join(this_dir, 'csrc')],
extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
'nvcc':['-O3'] + version_dependent_macros}))
if "--deprecated_fused_adam" in sys.argv:
from torch.utils.cpp_extension import CUDAExtension
sys.argv.remove("--deprecated_fused_adam")
from torch.utils.cpp_extension import BuildExtension
cmdclass['build_ext'] = BuildExtension
if torch.utils.cpp_extension.CUDA_HOME is None:
raise RuntimeError("--deprecated_fused_adam was requested, but nvcc was not found. Are you sure your environment has nvcc available? If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
else:
ext_modules.append(
CUDAExtension(name='fused_adam_cuda',
sources=['apex/contrib/csrc/optimizers/fused_adam_cuda.cpp',
'apex/contrib/csrc/optimizers/fused_adam_cuda_kernel.cu'],
include_dirs=[os.path.join(this_dir, 'csrc')],
extra_compile_args={'cxx': ['-O3',] + version_dependent_macros,
'nvcc':['-O3',
'--use_fast_math'] + version_dependent_macros}))
if "--fast_multihead_attn" in sys.argv:
from torch.utils.cpp_extension import CUDAExtension
sys.argv.remove("--fast_multihead_attn")
from torch.utils.cpp_extension import BuildExtension
cmdclass['build_ext'] = BuildExtension
if torch.utils.cpp_extension.CUDA_HOME is None:
raise RuntimeError("--fast_multihead_attn was requested, but nvcc was not found. Are you sure your environment has nvcc available? If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
else:
subprocess.run(["git", "submodule", "update", "--init", "apex/contrib/csrc/multihead_attn/cutlass"])
ext_modules.append(
CUDAExtension(name='fast_self_multihead_attn',
sources=['apex/contrib/csrc/multihead_attn/self_multihead_attn.cpp',
'apex/contrib/csrc/multihead_attn/self_multihead_attn_cuda.cu'],
extra_compile_args={'cxx': ['-O3',] + version_dependent_macros,
'nvcc':['-O3',
'-gencode', 'arch=compute_70,code=sm_70',
'-I./apex/contrib/csrc/multihead_attn/cutlass/',
'-U__CUDA_NO_HALF_OPERATORS__',
'-U__CUDA_NO_HALF_CONVERSIONS__',
'--expt-relaxed-constexpr',
'--expt-extended-lambda',
'--use_fast_math'] + version_dependent_macros}))
ext_modules.append(
CUDAExtension(name='fast_self_multihead_attn_norm_add',
sources=['apex/contrib/csrc/multihead_attn/self_multihead_attn_norm_add.cpp',
'apex/contrib/csrc/multihead_attn/self_multihead_attn_norm_add_cuda.cu'],
extra_compile_args={'cxx': ['-O3',] + version_dependent_macros,
'nvcc':['-O3',
'-gencode', 'arch=compute_70,code=sm_70',
'-I./apex/contrib/csrc/multihead_attn/cutlass/',
'-U__CUDA_NO_HALF_OPERATORS__',
'-U__CUDA_NO_HALF_CONVERSIONS__',
'--expt-relaxed-constexpr',
'--expt-extended-lambda',
'--use_fast_math'] + version_dependent_macros}))
ext_modules.append(
CUDAExtension(name='fast_encdec_multihead_attn',
sources=['apex/contrib/csrc/multihead_attn/encdec_multihead_attn.cpp',
'apex/contrib/csrc/multihead_attn/encdec_multihead_attn_cuda.cu'],
extra_compile_args={'cxx': ['-O3',] + version_dependent_macros,
'nvcc':['-O3',
'-gencode', 'arch=compute_70,code=sm_70',
'-I./apex/contrib/csrc/multihead_attn/cutlass/',
'-U__CUDA_NO_HALF_OPERATORS__',
'-U__CUDA_NO_HALF_CONVERSIONS__',
'--expt-relaxed-constexpr',
'--expt-extended-lambda',
'--use_fast_math'] + version_dependent_macros}))
ext_modules.append(
CUDAExtension(name='fast_encdec_multihead_attn_norm_add',
sources=['apex/contrib/csrc/multihead_attn/encdec_multihead_attn_norm_add.cpp',
'apex/contrib/csrc/multihead_attn/encdec_multihead_attn_norm_add_cuda.cu'],
extra_compile_args={'cxx': ['-O3',] + version_dependent_macros,
'nvcc':['-O3',
'-gencode', 'arch=compute_70,code=sm_70',
'-I./apex/contrib/csrc/multihead_attn/cutlass/',
'-U__CUDA_NO_HALF_OPERATORS__',
'-U__CUDA_NO_HALF_CONVERSIONS__',
'--expt-relaxed-constexpr',
'--expt-extended-lambda',
'--use_fast_math'] + version_dependent_macros}))
setup(
name='apex',
version='0.1',
packages=find_packages(exclude=('build',
'csrc',
'include',
'tests',
'dist',
'docs',
'tests',
'examples',
'apex.egg-info',)),
description='PyTorch Extensions written by NVIDIA',
ext_modules=ext_modules,
cmdclass=cmdclass,
extras_require=extras,
)
| 58.333333 | 290 | 0.544802 |
e9858a6fe81738095c44a3b2cf122eabd21a8fc8 | 7,300 | py | Python | packages/winpanda/extra/src/winpanda/core/package/manifest.py | sergiimatus/dcos | 801b28dbeee83d615dbad08eed2ee1b80ef3b6e8 | [
"Apache-2.0"
] | null | null | null | packages/winpanda/extra/src/winpanda/core/package/manifest.py | sergiimatus/dcos | 801b28dbeee83d615dbad08eed2ee1b80ef3b6e8 | [
"Apache-2.0"
] | null | null | null | packages/winpanda/extra/src/winpanda/core/package/manifest.py | sergiimatus/dcos | 801b28dbeee83d615dbad08eed2ee1b80ef3b6e8 | [
"Apache-2.0"
] | null | null | null | """Panda package management for Windows.
DC/OS package manifest type definition.
"""
import json
from pathlib import Path
from .id import PackageId
from common import logger
from common.storage import ISTOR_NODE, IStorNodes
from core import exceptions as cr_exc
from core.rc_ctx import ResourceContext
from core import utils as cr_utl
LOG = logger.get_logger(__name__)
class PackageManifest:
"""Package manifest container."""
_pkginfo_fpath = 'pkginfo.json'
_pkg_extcfg_fpath = 'etc/{pkg_name}.extra.j2'
_pkg_svccfg_fpath = 'etc/{pkg_name}.nssm.j2'
def __init__(self, pkg_id, istor_nodes, cluster_conf,
pkg_info=None, pkg_extcfg=None, pkg_svccfg=None):
"""Constructor.
:param pkg_id: PackageId, package ID
:param istor_nodes: IStorNodes, DC/OS installation storage nodes (set
of pathlib.Path objects)
:param cluster_conf: dict, configparser.ConfigParser.read_dict()
compatible data. DC/OS cluster setup parameters
:param pkg_info: dict, package info descriptor from DC/OS package
build system
:param pkg_extcfg: dict, extra package installation options
:param pkg_svccfg: dict, package system service options
(configparser.ConfigParser.read_dict() compatible)
# :param context: dict, package resources rendering context
"""
assert isinstance(pkg_id, PackageId), (
f'Argument: pkg_id:'
f' Got {type(pkg_id).__name__} instead of PackageId'
)
assert isinstance(istor_nodes, IStorNodes), (
f'Argument: istor_nodes:'
f' Got {type(istor_nodes).__name__} instead of IStorNodes'
)
assert isinstance(cluster_conf, dict), (
f'Argument: cluster_conf:'
f'Got {type(cluster_conf).__name__} instead of dict'
)
self._pkg_id = pkg_id
self._istor_nodes = istor_nodes
self._context = ResourceContext(istor_nodes, cluster_conf, pkg_id)
# Load package info descriptor
self._pkg_info = pkg_info if pkg_info is not None else (
self._load_pkg_info()
)
# Load package extra installation options descriptor
self._pkg_extcfg = pkg_extcfg if pkg_extcfg is not None else (
self._load_pkg_extcfg()
)
# Load package system service options descriptor
self._pkg_svccfg = pkg_svccfg if pkg_svccfg is not None else (
self._load_pkg_svccfg()
)
# TODO: Add content verification (jsonschema) for self.body. Raise
# ValueError, if conformance was not confirmed.
def __str__(self):
return str(self.body)
@property
def body(self):
""""""
return {
'pkg_id': self._pkg_id.pkg_id,
'context': self._context.as_dict(),
'pkg_info': self._pkg_info,
'pkg_extcfg': self._pkg_extcfg,
'pkg_svccfg': self._pkg_svccfg,
}
@property
def pkg_id(self):
""""""
return self._pkg_id
@property
def istor_nodes(self):
""""""
return self._pkg_id
@property
def pkg_info(self):
""""""
return self._pkg_info
@property
def pkg_extcfg(self):
""""""
return self._pkg_extcfg
@property
def pkg_svccfg(self):
""""""
return self._pkg_svccfg
def _load_pkg_info(self):
"""Load package info descriptor from a file.
:return: dict, package info descriptor
"""
fpath = getattr(self._istor_nodes, ISTOR_NODE.PKGREPO).joinpath(
self._pkg_id.pkg_id, self._pkginfo_fpath
)
try:
pkg_info = cr_utl.rc_load_json(
fpath, emheading='Package info descriptor',
render=True, context=self._context
)
except cr_exc.RCNotFoundError:
pkg_info = {}
return pkg_info
def _load_pkg_extcfg(self):
"""Load package extra installation options from a file.
:return: dict, package extra installation options descriptor
"""
fpath = getattr(self._istor_nodes, ISTOR_NODE.PKGREPO).joinpath(
self._pkg_id.pkg_id, self._pkg_extcfg_fpath.format(
pkg_name=self._pkg_id.pkg_name
)
)
try:
pkg_extcfg = cr_utl.rc_load_yaml(
fpath, emheading='Package inst extra descriptor',
render=True, context=self._context
)
except cr_exc.RCNotFoundError:
pkg_extcfg = {}
return pkg_extcfg
def _load_pkg_svccfg(self):
"""Load package system service options from a file.
:return: dict, package system service descriptor
"""
fpath = getattr(self._istor_nodes, ISTOR_NODE.PKGREPO).joinpath(
self._pkg_id.pkg_id, self._pkg_svccfg_fpath.format(
pkg_name=self._pkg_id.pkg_name
)
)
try:
pkg_svccfg = cr_utl.rc_load_ini(
fpath, emheading='Package service descriptor',
render=True, context=self._context
)
except cr_exc.RCNotFoundError:
pkg_svccfg = {}
return pkg_svccfg
def json(self):
"""Construct JSON representation of the manifest."""
return json.dumps(self.body, indent=4, sort_keys=True)
@ classmethod
def load(cls, fpath):
"""Load package manifest from a file.
:param fpath: pathlib.Path, path to a JSON-formatted manifest file.
:return: dict, package manifest.
"""
m_body = cr_utl.rc_load_json(fpath, emheading='Package manifest')
try:
manifest = cls(
pkg_id=PackageId(pkg_id=m_body.get('pkg_id')),
istor_nodes=IStorNodes(
**{
k: Path(v) for k, v in m_body.get(
'context'
).get('istor_nodes').items()
}
),
cluster_conf=m_body.get('context').get('cluster_conf'),
pkg_info=m_body.get('pkg_info'),
pkg_extcfg=m_body.get('pkg_extcfg'),
pkg_svccfg=m_body.get('pkg_svccfg'),
)
LOG.debug(f'Package manifest: Load: {fpath}')
except (ValueError, AssertionError) as e:
err_msg = (f'Package manifest: Load:'
f' {fpath}: {type(e).__name__}: {e}')
raise cr_exc.RCInvalidError(err_msg) from e
return manifest
def save(self):
"""Save package manifest to a file within the active packages index."""
fpath = getattr(self._istor_nodes, ISTOR_NODE.PKGACTIVE).joinpath(
f'{self._pkg_id.pkg_id}.json'
)
try:
with fpath.open(mode='w') as fp:
json.dump(self.body, fp)
except (OSError, RuntimeError) as e:
err_msg = f'Package manifest: Save: {type(e).__name__}: {e}'
raise cr_exc.RCError(err_msg) from e
LOG.debug(f'Package manifest: Save: {fpath}')
| 33.181818 | 79 | 0.583151 |
a8992d1fe53d14202610e6910fd801e8f9508ac8 | 123 | py | Python | nullcompressor.py | greenteadigital/pycrypto | e6aa3bfbcf22e95fd15b710311b05e82bae8bdec | [
"MIT"
] | 1 | 2015-10-26T16:25:55.000Z | 2015-10-26T16:25:55.000Z | nullcompressor.py | greenteadigital/pycrypto | e6aa3bfbcf22e95fd15b710311b05e82bae8bdec | [
"MIT"
] | null | null | null | nullcompressor.py | greenteadigital/pycrypto | e6aa3bfbcf22e95fd15b710311b05e82bae8bdec | [
"MIT"
] | null | null | null |
class NullCompressor(object):
def __init__(self):
self.__name__ = 'none'
def compress(self, _input):
return _input | 15.375 | 29 | 0.723577 |
90fb6d550103fb886ef8fcb04d8a2de4b431ee49 | 21,654 | py | Python | brewtils/test/fixtures.py | Jake-Ross/brewtils | bed6ab82dc354ea825a64e9e48a01a6479bf5060 | [
"MIT"
] | null | null | null | brewtils/test/fixtures.py | Jake-Ross/brewtils | bed6ab82dc354ea825a64e9e48a01a6479bf5060 | [
"MIT"
] | null | null | null | brewtils/test/fixtures.py | Jake-Ross/brewtils | bed6ab82dc354ea825a64e9e48a01a6479bf5060 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import copy
from datetime import datetime
import pytest
import pytz
from brewtils.models import (
Choices,
Command,
CronTrigger,
DateTrigger,
Event,
Garden,
Instance,
IntervalTrigger,
Job,
LoggingConfig,
Operation,
Parameter,
PatchOperation,
Principal,
Queue,
Request,
RequestFile,
RequestTemplate,
Resolvable,
LegacyRole,
Runner,
System,
)
@pytest.fixture
def system_id():
return "584f11af55a38e64799f1234"
@pytest.fixture
def ts_dt():
"""Jan 1, 2016 as a naive datetime."""
return datetime(2016, 1, 1)
@pytest.fixture
def ts_epoch():
"""Jan 1, 2016 UTC as epoch milliseconds."""
return 1451606400000
@pytest.fixture
def ts_dt_utc(ts_epoch):
"""Jan 1, 2016 UTC as timezone-aware datetime."""
return datetime.fromtimestamp(ts_epoch / 1000, tz=pytz.utc)
@pytest.fixture
def ts_epoch_eastern():
"""Jan 1, 2016 US/Eastern as epoch milliseconds."""
return 1451624160000
@pytest.fixture
def ts_dt_eastern():
"""Jan 1, 2016 US/Eastern as timezone-aware datetime."""
return datetime(2016, 1, 1, tzinfo=pytz.timezone("US/Eastern"))
@pytest.fixture
def ts_2_dt(ts_2_epoch):
"""Feb 2, 2017 as a naive datetime."""
return datetime(2017, 2, 2)
@pytest.fixture
def ts_2_epoch():
"""Feb 2, 2017 UTC as epoch milliseconds."""
return 1485993600000
@pytest.fixture
def ts_2_dt_utc(ts_2_epoch):
"""Feb 2, 2017 UTC as timezone-aware datetime."""
return datetime.fromtimestamp(ts_2_epoch / 1000, tz=pytz.utc)
@pytest.fixture
def choices_dict():
"""Choices as a dictionary."""
return {
"display": "select",
"strict": True,
"type": "static",
"value": ["choiceA", "choiceB"],
"details": {},
}
@pytest.fixture
def bg_choices(choices_dict):
return Choices(**choices_dict)
@pytest.fixture
def nested_parameter_dict():
"""Nested Parameter as a dictionary."""
return {
"key": "nested",
"type": "Any",
"multi": False,
"display_name": "nested",
"optional": True,
"default": None,
"description": None,
"choices": None,
"parameters": [],
"nullable": True,
"maximum": None,
"minimum": None,
"regex": None,
"form_input_type": None,
"type_info": {},
}
@pytest.fixture
def parameter_dict(nested_parameter_dict, choices_dict):
"""Non-nested parameter as a dictionary."""
return {
"key": "message",
"type": "Any",
"multi": False,
"display_name": "display",
"optional": True,
"default": "default",
"description": "desc",
"choices": choices_dict,
"parameters": [nested_parameter_dict],
"nullable": False,
"maximum": 10,
"minimum": 1,
"regex": ".*",
"form_input_type": None,
"type_info": {},
}
@pytest.fixture
def bg_parameter(parameter_dict, bg_choices):
"""Parameter based on the parameter_dict"""
dict_copy = copy.deepcopy(parameter_dict)
dict_copy["parameters"] = [Parameter(**dict_copy["parameters"][0])]
dict_copy["choices"] = bg_choices
return Parameter(**dict_copy)
@pytest.fixture
def command_dict(parameter_dict, system_id):
"""A command represented as a dictionary."""
return {
"name": "speak",
"description": "desc",
"parameters": [parameter_dict],
"command_type": "ACTION",
"output_type": "STRING",
"hidden": False,
"schema": {},
"form": {},
"template": "<html></html>",
"icon_name": "icon!",
"metadata": {"meta": "data"},
}
@pytest.fixture
def bg_command(command_dict, bg_parameter, system_id):
"""Use the bg_command fixture instead."""
dict_copy = copy.deepcopy(command_dict)
dict_copy["parameters"] = [bg_parameter]
return Command(**dict_copy)
@pytest.fixture
def command_dict_2(command_dict):
"""A second command represented as a dictionary."""
dict_copy = copy.deepcopy(command_dict)
dict_copy["name"] = "speak2"
return dict_copy
@pytest.fixture
def bg_command_2(command_dict_2, bg_parameter, system_id):
"""Use the bg_command fixture instead."""
dict_copy = copy.deepcopy(command_dict_2)
dict_copy["parameters"] = [bg_parameter]
return Command(**dict_copy)
@pytest.fixture
def instance_dict(ts_epoch):
"""An instance represented as a dictionary."""
return {
"id": "584f11af55a38e64799fd1d4",
"name": "default",
"description": "desc",
"status": "RUNNING",
"icon_name": "icon!",
"queue_type": "rabbitmq",
"queue_info": {
"admin": {"name": "admin.abc.0-0-1.default.ai39fk0ji4", "args": {}},
"request": {"name": "abc.0-0-1.default", "args": {}},
"connection": {
"host": "localhost",
"port": 5672,
"user": "guest",
"password": "guest",
"virtual_host": "/",
"ssl": {"enabled": False},
},
"url": "amqp://guest:guest@localhost:5672",
},
"status_info": {"heartbeat": ts_epoch},
"metadata": {"meta": "data"},
}
@pytest.fixture
def bg_instance(instance_dict, ts_dt):
"""An instance as a model."""
dict_copy = copy.deepcopy(instance_dict)
dict_copy["status_info"]["heartbeat"] = ts_dt
return Instance(**dict_copy)
@pytest.fixture
def system_dict(instance_dict, command_dict, command_dict_2, system_id):
"""A system represented as a dictionary."""
return {
"name": "system",
"description": "desc",
"version": "1.0.0",
"id": system_id,
"max_instances": 1,
"instances": [instance_dict],
"commands": [command_dict, command_dict_2],
"icon_name": "fa-beer",
"display_name": "non-offensive",
"metadata": {"some": "stuff"},
"namespace": "ns",
"local": True,
"template": "<html>template</html>",
}
@pytest.fixture
def bg_system(system_dict, bg_instance, bg_command, bg_command_2):
"""A system as a model."""
dict_copy = copy.deepcopy(system_dict)
dict_copy["instances"] = [bg_instance]
dict_copy["commands"] = [bg_command, bg_command_2]
return System(**dict_copy)
@pytest.fixture
def bg_system_2(system_dict, bg_instance, bg_command, bg_command_2):
"""A system with a different version."""
dict_copy = copy.deepcopy(system_dict)
dict_copy["version"] = "2.0.0"
dict_copy["instances"] = [bg_instance]
dict_copy["commands"] = [bg_command, bg_command_2]
return System(**dict_copy)
@pytest.fixture
def child_request_dict(ts_epoch):
"""A child request represented as a dictionary."""
return {
"system": "child_system",
"system_version": "1.0.0",
"instance_name": "default",
"namespace": "ns",
"command": "say",
"id": "58542eb571afd47ead90d25f",
"parameters": {},
"comment": "bye!",
"output": "nested output",
"output_type": "STRING",
"status": "CREATED",
"hidden": True,
"command_type": "ACTION",
"created_at": ts_epoch,
"updated_at": ts_epoch,
"status_updated_at": ts_epoch,
"error_class": None,
"metadata": {"child": "stuff"},
"has_parent": True,
"requester": "user",
}
@pytest.fixture
def child_request(child_request_dict, ts_dt):
"""A child request as a model."""
dict_copy = copy.deepcopy(child_request_dict)
dict_copy["created_at"] = ts_dt
dict_copy["updated_at"] = ts_dt
dict_copy["status_updated_at"] = ts_dt
return Request(**dict_copy)
@pytest.fixture
def parent_request_dict(ts_epoch):
"""A parent request represented as a dictionary."""
return {
"system": "parent_system",
"system_version": "1.0.0",
"instance_name": "default",
"namespace": "ns",
"command": "say",
"id": "58542eb571afd47ead90d25d",
"parent": None,
"parameters": {},
"comment": "bye!",
"output": "nested output",
"output_type": "STRING",
"status": "CREATED",
"command_type": "ACTION",
"created_at": ts_epoch,
"hidden": False,
"updated_at": ts_epoch,
"status_updated_at": ts_epoch,
"error_class": None,
"metadata": {"parent": "stuff"},
"has_parent": False,
"requester": "user",
}
@pytest.fixture
def parent_request(parent_request_dict, ts_dt):
"""A parent request as a model."""
dict_copy = copy.deepcopy(parent_request_dict)
dict_copy["created_at"] = ts_dt
dict_copy["updated_at"] = ts_dt
dict_copy["status_updated_at"] = ts_dt
return Request(**dict_copy)
@pytest.fixture
def request_template_dict():
"""Request template as a dictionary."""
return {
"system": "system",
"system_version": "1.0.0",
"instance_name": "default",
"namespace": "ns",
"command": "speak",
"command_type": "ACTION",
"parameters": {"message": "hey!"},
"comment": "hi!",
"metadata": {"request": "stuff"},
"output_type": "STRING",
}
@pytest.fixture
def bg_request_template(request_template_dict):
"""Request template as a bg model."""
return RequestTemplate(**request_template_dict)
@pytest.fixture
def request_dict(parent_request_dict, child_request_dict, ts_epoch):
"""A request represented as a dictionary."""
return {
"system": "system",
"system_version": "1.0.0",
"instance_name": "default",
"namespace": "ns",
"command": "speak",
"id": "58542eb571afd47ead90d25e",
"parent": parent_request_dict,
"children": [child_request_dict],
"parameters": {"message": "hey!"},
"comment": "hi!",
"output": "output",
"output_type": "STRING",
"status": "CREATED",
"hidden": False,
"command_type": "ACTION",
"created_at": ts_epoch,
"updated_at": ts_epoch,
"status_updated_at": ts_epoch,
"error_class": "ValueError",
"metadata": {"request": "stuff"},
"has_parent": True,
"requester": "user",
}
@pytest.fixture
def bg_request(request_dict, parent_request, child_request, ts_dt):
"""A request as a model."""
dict_copy = copy.deepcopy(request_dict)
dict_copy["parent"] = parent_request
dict_copy["children"] = [child_request]
dict_copy["created_at"] = ts_dt
dict_copy["updated_at"] = ts_dt
dict_copy["status_updated_at"] = ts_dt
return Request(**dict_copy)
@pytest.fixture
def patch_dict_no_envelop():
"""A patch without an envelope represented as a dictionary."""
return {"operation": "replace", "path": "/status", "value": "RUNNING"}
@pytest.fixture
def patch_dict_no_envelop2():
"""A patch without an envelope represented as a dictionary."""
return {"operation": "replace2", "path": "/status2", "value": "RUNNING2"}
@pytest.fixture
def patch_dict(patch_dict_no_envelop):
"""A patch represented as a dictionary."""
return {"operations": [patch_dict_no_envelop]}
@pytest.fixture
def patch_many_dict(patch_dict_no_envelop, patch_dict_no_envelop2):
"""Multiple patches represented as a dictionary."""
return {"operations": [patch_dict_no_envelop, patch_dict_no_envelop2]}
@pytest.fixture
def bg_patch(patch_dict_no_envelop):
"""A patch as a model."""
return PatchOperation(**patch_dict_no_envelop)
@pytest.fixture
def bg_patch2(patch_dict_no_envelop2):
"""A patch as a model."""
return PatchOperation(**patch_dict_no_envelop2)
@pytest.fixture
def logging_config_dict():
"""A logging config represented as a dictionary."""
return {
"level": "INFO",
"handlers": {"stdout": {"foo": "bar"}},
"formatters": {"default": {"format": LoggingConfig.DEFAULT_FORMAT}},
}
@pytest.fixture
def bg_logging_config(logging_config_dict):
"""A logging config as a model."""
return LoggingConfig(**logging_config_dict)
@pytest.fixture
def event_dict(ts_epoch, request_dict):
"""An event represented as a dictionary."""
return {
"name": "REQUEST_CREATED",
"namespace": "ns",
"garden": "beer",
"metadata": {"extra": "info"},
"timestamp": ts_epoch,
"payload_type": "Request",
"payload": request_dict,
"error": False,
"error_message": None,
}
@pytest.fixture
def bg_event(event_dict, ts_dt, bg_request):
"""An event as a model."""
dict_copy = copy.deepcopy(event_dict)
dict_copy["timestamp"] = ts_dt
dict_copy["payload"] = bg_request
return Event(**dict_copy)
@pytest.fixture
def queue_dict(system_id):
"""A queue represented as a dictionary."""
return {
"name": "echo.1-0-0.default",
"system": "echo",
"version": "1.0.0",
"instance": "default",
"system_id": system_id,
"display": "foo.1-0-0.default",
"size": 3,
}
@pytest.fixture
def bg_queue(queue_dict):
"""A queue as a model."""
return Queue(**queue_dict)
@pytest.fixture
def principal_dict(legacy_role_dict):
return {
"id": "58542eb571afd47ead90d24f",
"username": "admin",
"roles": [legacy_role_dict],
"permissions": ["bg-all"],
"preferences": {"theme": "dark"},
"metadata": {"foo": "bar"},
}
@pytest.fixture
def bg_principal(principal_dict, bg_role):
dict_copy = copy.deepcopy(principal_dict)
dict_copy["roles"] = [bg_role]
return Principal(**dict_copy)
@pytest.fixture
def legacy_role_dict():
return {
"id": "58542eb571afd47ead90d26f",
"name": "bg-admin",
"description": "The admin role",
"permissions": ["bg-all"],
}
@pytest.fixture
def bg_role(legacy_role_dict):
dict_copy = copy.deepcopy(legacy_role_dict)
return LegacyRole(**dict_copy)
@pytest.fixture
def job_dict(ts_epoch, request_template_dict, date_trigger_dict):
"""A date job represented as a dictionary."""
return {
"name": "job_name",
"id": "58542eb571afd47ead90d26a",
"trigger_type": "date",
"trigger": date_trigger_dict,
"request_template": request_template_dict,
"misfire_grace_time": 3,
"coalesce": True,
"next_run_time": ts_epoch,
"success_count": 0,
"error_count": 0,
"status": "RUNNING",
"max_instances": 3,
"timeout": 30,
}
@pytest.fixture
def job_id_list_dict(job_dict):
"""A job ID list represented as a dictionary."""
return {"ids": [job_dict["id"]]}
@pytest.fixture
def job_dfn_list_dict(job_dict):
"""A job definition list represented as a dictionary."""
return {"jobs": [job_dict]}
@pytest.fixture
def cron_job_dict(job_dict, cron_trigger_dict):
"""A cron job represented as a dictionary."""
dict_copy = copy.deepcopy(job_dict)
dict_copy["trigger_type"] = "cron"
dict_copy["trigger"] = cron_trigger_dict
return dict_copy
@pytest.fixture
def interval_job_dict(job_dict, interval_trigger_dict):
"""An interval job represented as a dictionary."""
dict_copy = copy.deepcopy(job_dict)
dict_copy["trigger_type"] = "interval"
dict_copy["trigger"] = interval_trigger_dict
return dict_copy
@pytest.fixture
def job_ids_dict(job_dict):
"""A list of job IDs represented as a dictionary."""
dict_copy = copy.deepcopy(job_dict)
return {"ids": [dict_copy["id"]]}
@pytest.fixture
def job_dict_for_import(job_dict):
"""A job dict but some keys and values are missing."""
dict_copy = copy.deepcopy(job_dict)
for field in ["id", "next_run_time", "success_count", "error_count"]:
dict_copy.pop(field, None)
return dict_copy
@pytest.fixture
def bg_job(job_dict, ts_dt, bg_request_template, bg_date_trigger):
"""A job as a model."""
dict_copy = copy.deepcopy(job_dict)
dict_copy["next_run_time"] = ts_dt
dict_copy["trigger"] = bg_date_trigger
dict_copy["request_template"] = bg_request_template
return Job(**dict_copy)
@pytest.fixture
def bg_cron_job(cron_job_dict, bg_request_template, bg_cron_trigger, ts_dt):
"""A beer garden cron job"""
dict_copy = copy.deepcopy(cron_job_dict)
dict_copy["next_run_time"] = ts_dt
dict_copy["trigger"] = bg_cron_trigger
dict_copy["request_template"] = bg_request_template
return Job(**dict_copy)
@pytest.fixture
def bg_interval_job(interval_job_dict, bg_request_template, bg_interval_trigger, ts_dt):
"""A beer garden interval job"""
dict_copy = copy.deepcopy(interval_job_dict)
dict_copy["next_run_time"] = ts_dt
dict_copy["trigger"] = bg_interval_trigger
dict_copy["request_template"] = bg_request_template
return Job(**dict_copy)
@pytest.fixture
def bg_job_ids(job_dict):
"""A list of job IDs"""
dict_copy = copy.deepcopy(job_dict)
return [str(dict_copy["id"])]
@pytest.fixture
def bg_job_defns_list(job_dict_for_import):
"""A list of job definitions"""
return [Job(**job_dict_for_import)]
@pytest.fixture
def interval_trigger_dict(ts_epoch, ts_2_epoch):
"""An interval trigger as a dictionary."""
return {
"weeks": 1,
"days": 1,
"hours": 1,
"minutes": 1,
"seconds": 1,
"start_date": ts_epoch,
"end_date": ts_2_epoch,
"timezone": "utc",
"jitter": 1,
"reschedule_on_finish": False,
}
@pytest.fixture
def bg_interval_trigger(interval_trigger_dict, ts_dt, ts_2_dt):
"""An interval trigger as a model."""
dict_copy = copy.deepcopy(interval_trigger_dict)
dict_copy["start_date"] = ts_dt
dict_copy["end_date"] = ts_2_dt
return IntervalTrigger(**dict_copy)
@pytest.fixture
def request_file_dict():
"""A request file represented as a dictionary."""
return {"storage_type": "gridfs", "filename": "request_filename"}
@pytest.fixture
def cron_trigger_dict(ts_epoch, ts_2_epoch):
"""A cron trigger as a dictionary."""
return {
"year": "2020",
"month": "*/1",
"day": "*/1",
"week": "*/1",
"day_of_week": "*/1",
"hour": "*/1",
"minute": "*/1",
"second": "*/1",
"start_date": ts_epoch,
"end_date": ts_2_epoch,
"timezone": "utc",
"jitter": 1,
}
@pytest.fixture
def bg_cron_trigger(cron_trigger_dict, ts_dt, ts_2_dt):
"""A cron trigger as a model."""
dict_copy = copy.deepcopy(cron_trigger_dict)
dict_copy["start_date"] = ts_dt
dict_copy["end_date"] = ts_2_dt
return CronTrigger(**dict_copy)
@pytest.fixture
def date_trigger_dict(ts_epoch):
"""A cron trigger as a dictionary."""
return {"run_date": ts_epoch, "timezone": "utc"}
@pytest.fixture
def bg_date_trigger(date_trigger_dict, ts_dt):
"""A date trigger as a model."""
dict_copy = copy.deepcopy(date_trigger_dict)
dict_copy["run_date"] = ts_dt
return DateTrigger(**dict_copy)
@pytest.fixture
def bg_request_file(request_file_dict):
"""A request file as a model"""
return RequestFile(**request_file_dict)
@pytest.fixture
def garden_dict(ts_epoch, system_dict):
"""A garden as a dictionary."""
return {
"id": "123f11af55a38e64799fa1c1",
"name": "garden",
"status": "RUNNING",
"status_info": {},
"namespaces": [system_dict["namespace"]],
"systems": [system_dict],
"connection_type": "http",
"connection_params": {},
}
@pytest.fixture
def bg_garden(garden_dict, bg_system):
"""An operation as a model."""
dict_copy = copy.deepcopy(garden_dict)
dict_copy["systems"] = [bg_system]
return Garden(**dict_copy)
@pytest.fixture
def operation_dict(ts_epoch, request_dict):
"""An operation as a dictionary."""
return {
"model": request_dict,
"model_type": "Request",
"args": [request_dict["id"]],
"kwargs": {"extra": "kwargs"},
"target_garden_name": "child",
"source_garden_name": "parent",
"operation_type": "REQUEST_CREATE",
}
@pytest.fixture
def bg_operation(operation_dict, bg_request):
"""An operation as a model."""
dict_copy = copy.deepcopy(operation_dict)
dict_copy["model"] = bg_request
return Operation(**dict_copy)
@pytest.fixture
def runner_dict(instance_dict):
"""A runner as a dictionary."""
return {
"id": "EIBqyAVAyP",
"name": "system-1.0.0",
"path": "system-1.0.0",
"instance_id": instance_dict["id"],
"stopped": False,
"dead": False,
"restart": True,
}
@pytest.fixture
def bg_runner(runner_dict):
"""A runner as a model."""
return Runner(**runner_dict)
@pytest.fixture
def resolvable_dict():
"""A resolvable as a dictionary."""
return {
"id": "60996b9dc021bf0d4add8b67",
"type": "bytes",
"storage": "gridfs",
"details": {"random": "detail"},
}
@pytest.fixture
def bg_resolvable(resolvable_dict):
return Resolvable(**resolvable_dict)
@pytest.fixture
def resolvable_chunk_dict():
"""A resolvable as a dictionary."""
return {
"type": "base64",
"storage": "gridfs",
"details": {"file_id": "60996b9dc021bf0d4add8b67"},
}
@pytest.fixture
def bg_resolvable_chunk(resolvable_chunk_dict):
return Resolvable(**resolvable_chunk_dict)
| 26.057762 | 88 | 0.622195 |
9a0906b745ab31c4fa50f9d5b804101e1d4b5c65 | 1,157 | py | Python | centreonapi/centreon.py | jeromemartin/centreon-sdk-python | 4ac5e1c4af71cc52f5edc13318a1aa96ebe5e9bb | [
"Apache-2.0"
] | 1 | 2022-01-07T09:37:55.000Z | 2022-01-07T09:37:55.000Z | centreonapi/centreon.py | jeromemartin/centreon-sdk-python | 4ac5e1c4af71cc52f5edc13318a1aa96ebe5e9bb | [
"Apache-2.0"
] | null | null | null | centreonapi/centreon.py | jeromemartin/centreon-sdk-python | 4ac5e1c4af71cc52f5edc13318a1aa96ebe5e9bb | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from centreonapi.webservice.configuration.host import Hosts
from centreonapi.webservice.configuration.host import HostTemplates
from centreonapi.webservice.configuration.service import Services
from centreonapi.webservice.configuration.service import ServiceTemplates
from centreonapi.webservice.configuration.poller import Pollers
from centreonapi.webservice.configuration.hostgroups import HostGroups
from centreonapi.webservice.configuration.command import Commands
from centreonapi.webservice.configuration.resourcecfg import ResourceCFGs
from centreonapi.webservice import Webservice
class Centreon(object):
def __init__(self, url=None, username=None, password=None, check_ssl=True):
Webservice.getInstance(
url,
username,
password,
check_ssl
)
self.hosts = Hosts()
self.services = Services()
self.servicetemplates = ServiceTemplates()
self.pollers = Pollers()
self.hostgroups = HostGroups()
self.hosttemplates = HostTemplates()
self.commands = Commands()
self.resourcecfgs = ResourceCFGs()
| 36.15625 | 79 | 0.73898 |
48fde4671089c32269180a49d9c041c2db42ecc4 | 2,418 | py | Python | share/qt/extract_strings_qt.py | trungnq1510/bitcoin | 4702a08358f9996f2a40cb5cba03c9177d4d991f | [
"MIT"
] | null | null | null | share/qt/extract_strings_qt.py | trungnq1510/bitcoin | 4702a08358f9996f2a40cb5cba03c9177d4d991f | [
"MIT"
] | null | null | null | share/qt/extract_strings_qt.py | trungnq1510/bitcoin | 4702a08358f9996f2a40cb5cba03c9177d4d991f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2012-2019 The Bitnamicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import operator
import os
import sys
OUT_CPP="qt/bitnamicoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
sys.exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w', encoding="utf8")
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings_qt.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitnamicoin_strings[] = {\n')
f.write('QT_TRANSLATE_NOOP("bitnamicoin-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS'),))
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitnamicoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| 28.116279 | 105 | 0.631514 |
e61fba84208671c1dc139750e4ada7d2bd1fbe7a | 1,039 | py | Python | misc.py | AJAYK-01/Telegram_VC_Bot | 970dc601fb636d76347eb8d17b12e198c91a5721 | [
"MIT"
] | null | null | null | misc.py | AJAYK-01/Telegram_VC_Bot | 970dc601fb636d76347eb8d17b12e198c91a5721 | [
"MIT"
] | null | null | null | misc.py | AJAYK-01/Telegram_VC_Bot | 970dc601fb636d76347eb8d17b12e198c91a5721 | [
"MIT"
] | null | null | null | HELP_TEXT = """__**I Can Play Music In The Voice Chat**__
**/skip** __Skip The Current Playing Music.__
**/play** __Service_Or_Default (Services: youtube/saavn/deezer, Default: youtube) Song_Name | Reply_To_Audio__
**/joinvc** __Join Voice Chat.__
**/leavevc** __Leave Voice Chat.__
**/listvc** __List Joined Voice Chats.__
**/volume [1-200]** __Adjust Volume.__
**/pause** __Pause Music.__
**/resume** __Resume Music.__
**/stop** __Stop Music.__
**/start** __Play Last Music.__
**/replay** __Replay Current Music.__
**/theme** __Change Curently Playing Theme.__
**/queue plformat/Nothing** __Shows Queue List. If you send plformat with command you will get it in playlist format.__
**/delqueue** __Deletes Queue List and Playlist.__
**/playlist** __Start Playing Playlist.__
**/lyric** __Get Lyric of Currently Playing Music. It is possible to get wrong lyrics. To increase trueness possibility use deezer or saavn__"""
REPO_TEXT = (
"[Github](https://github.com/thehamkercat/Telegram_vc_bot)"
+ " | [Group](t.me/TGVCSUPPORT)"
)
| 45.173913 | 144 | 0.733397 |
e1c6131d5a6bb381386e87fb3da02d737fe1ca26 | 14,248 | py | Python | timemory/profiler/profiler.py | mhaseeb123/timemory | 815bb68edf8a36f309d43a911ef7e39da08ce5ae | [
"MIT"
] | null | null | null | timemory/profiler/profiler.py | mhaseeb123/timemory | 815bb68edf8a36f309d43a911ef7e39da08ce5ae | [
"MIT"
] | null | null | null | timemory/profiler/profiler.py | mhaseeb123/timemory | 815bb68edf8a36f309d43a911ef7e39da08ce5ae | [
"MIT"
] | null | null | null | #!@PYTHON_EXECUTABLE@
#
# MIT License
#
# Copyright (c) 2018, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory (subject to receipt of any
# required approvals from the U.S. Dept. of Energy). All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import copy
from functools import wraps
from collections import deque
__all__ = ["profile", "Profiler", "FakeProfiler"]
#
# Variables
#
_records = deque()
_counter = 0
_skip_counts = []
_is_running = False
_start_events = ["call"] # + ["c_call"]
_stop_events = ["return"] # + ["c_return"]
_always_skipped_functions = ["__exit__", "_handle_fromlist", "<module>"]
_always_skipped_files = ["__init__.py", "__main__.py", "<frozen importlib._bootstrap>"]
#
# Profiler settings
#
_include_line = True
_include_filepath = True
_full_filepath = False
def _default_functor():
return True
def _profiler_function(frame, event, arg):
from ..libpytimemory import settings
from ..libpytimemory import component_bundle
global _records
global _include_line
global _include_filepath
global _full_filepath
global _counter
global _skip_counts
global _start_events
global _stop_events
global _always_skipped_functions
_count = copy.copy(_counter)
if event in _start_events:
if not settings.enabled:
_skip_counts.append(_count)
return
_func = "{}".format(frame.f_code.co_name)
_line = int(frame.f_lineno) if _include_line else -1
_file = "" if not _include_filepath else "{}".format(
frame.f_code.co_filename)
# skip anything from this file
if _file == __file__:
_skip_counts.append(_count)
return
# check if skipped function
if _func in _always_skipped_functions:
_skip_counts.append(_count)
return
# check if skipped file
if os.path.basename(_file) in _always_skipped_files:
_skip_counts.append(_count)
return
if not _full_filepath and len(_file) > 0:
_file = os.path.basename(_file)
if "__init__.py" not in _file:
entry = component_bundle(_func, _file, _line)
entry.start()
_records.append(entry)
else:
_skip_counts.append(_count)
_counter += 1
elif event in _stop_events:
if _count in _skip_counts:
_skip_counts.remove(_count)
elif len(_records) > 0:
entry = _records.pop()
entry.stop()
del entry
_counter -= 1
#----------------------------------------------------------------------------------------#
#
PY3 = sys.version_info[0] == 3
PY35 = PY3 and sys.version_info[1] >= 5
# exec (from https://bitbucket.org/gutworth/six/):
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
del builtins
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
#----------------------------------------------------------------------------------------#
#
class Profiler():
"""
Provides decorators and context-manager for the timemory profilers
"""
global _default_functor
# static variable
_conditional_functor = _default_functor
#------------------------------------------------------------------------------------#
#
@staticmethod
def condition(functor):
Profiler._conditional_functor = functor
#------------------------------------------------------------------------------------#
#
@staticmethod
def is_enabled():
ret = Profiler._conditional_functor()
try:
if ret is True:
return True
elif ret is False:
return False
except:
pass
return False
#------------------------------------------------------------------------------------#
#
def __init__(self, components=[], flat=False, timeline=False, *args, **kwargs):
"""
Arguments:
- components [list of strings] : list of timemory components
- flat [bool] : enable flat profiling
- timeline [bool] : enable timeline profiling
"""
from ..libpytimemory import settings
global _records
global _include_line
global _include_filepath
global _full_filepath
global _counter
global _skip_counts
global _start_events
global _stop_events
global _is_running
_trace = settings.trace_components
_profl = settings.profiler_components
_components = _profl if _trace is None else _trace
self._original_profiler_function = sys.getprofile()
self._use = (not _is_running and Profiler.is_enabled() is True)
self._flat_profile = (settings.flat_profile or flat)
self._timeline_profile = (settings.timeline_profile or timeline)
self.components = components + _components.split(",")
self.components = [v.lower() for v in self.components]
self.components = list(dict.fromkeys(self.components))
if len(self.components) == 0:
self.components += ["wall_clock"]
if _trace is None:
settings.trace_components = ','.join(self.components)
settings.profiler_components = ','.join(self.components)
# os.environ["TIMEMORY_PROFILER_COMPONENTS"] = ",".join(self.components)
# print("USE = {}, COMPONENTS = {}".format(self._use, self.components))
#------------------------------------------------------------------------------------#
#
def start(self):
"""
Start the profiler explicitly
"""
from ..libpytimemory import component_bundle
global _is_running
if self._use:
self._original_profiler_function = sys.getprofile()
_is_running = True
component_bundle.reset()
component_bundle.configure(self.components, self._flat_profile,
self._timeline_profile)
sys.setprofile(_profiler_function)
#------------------------------------------------------------------------------------#
#
def stop(self):
"""
Stop the profiler explicitly
"""
global _is_running
if self._use:
_is_running = False
sys.setprofile(self._original_profiler_function)
#------------------------------------------------------------------------------------#
#
def __call__(self, func):
"""
Decorator
"""
from ..libpytimemory import component_bundle
global _is_running
if self._use:
self._original_profiler_function = sys.getprofile()
_is_running = True
component_bundle.reset()
component_bundle.configure(self.components, self._flat_profile,
self._timeline_profile)
@wraps(func)
def function_wrapper(*args, **kwargs):
if self._use:
sys.setprofile(_profiler_function)
_ret = func(*args, **kwargs)
if self._use:
sys.setprofile(self._original_profiler_function)
return _ret
_ret = function_wrapper
if self._use:
_is_running = False
return _ret
#------------------------------------------------------------------------------------#
#
def __enter__(self, *args, **kwargs):
"""
Context manager
"""
from ..libpytimemory import component_bundle
global _is_running
if self._use:
self._original_profiler_function = sys.getprofile()
_is_running = True
component_bundle.reset()
component_bundle.configure(self.components, self._flat_profile,
self._timeline_profile)
sys.setprofile(_profiler_function)
#------------------------------------------------------------------------------------#
#
def __exit__(self, exec_type, exec_value, exec_tb):
"""
Context manager
"""
global _is_running
if self._use:
_is_running = False
sys.setprofile(self._original_profiler_function)
import traceback
if exec_type is not None and exec_value is not None and exec_tb is not None:
traceback.print_exception(exec_type, exec_value, exec_tb, limit=5)
#------------------------------------------------------------------------------------#
#
def run(self, cmd):
"""
Execute and profile a command
"""
import __main__
dict = __main__.__dict__
if isinstance(cmd, str):
return self.runctx(cmd, dict, dict)
else:
return self.runctx(" ".join(cmd), dict, dict)
#------------------------------------------------------------------------------------#
#
def runctx(self, cmd, globals, locals):
"""
Profile a context
"""
from ..libpytimemory import component_bundle
global _is_running
if self._use:
self._original_profiler_function = sys.getprofile()
_is_running = True
component_bundle.reset()
component_bundle.configure(self.components, self._flat_profile,
self._timeline_profile)
try:
exec_(cmd, globals, locals)
finally:
if self._use:
_is_running = False
return self
#------------------------------------------------------------------------------------#
#
def runcall(self, func, *args, **kw):
"""
Profile a single function call.
"""
from ..libpytimemory import component_bundle
global _is_running
if self._use:
self._original_profiler_function = sys.getprofile()
_is_running = True
component_bundle.reset()
component_bundle.configure(self.components, self._flat_profile,
self._timeline_profile)
try:
return func(*args, **kw)
finally:
if self._use:
_is_running = False
#------------------------------------------------------------------------------------#
#
def add_module(self, mod):
""" Add all the functions in a module and its classes.
"""
from inspect import isclass, isfunction
nfuncsadded = 0
for item in mod.__dict__.values():
if isclass(item):
for k, v in item.__dict__.items():
if isfunction(v):
self.add_function(v)
nfuncsadded += 1
elif isfunction(item):
self.add_function(item)
nfuncsadded += 1
return nfuncsadded
profile = Profiler
class FakeProfiler():
"""
Provides decorators and context-manager for the timemory profiles which do nothing
"""
#------------------------------------------------------------------------------------#
#
@staticmethod
def condition(functor):
pass
#------------------------------------------------------------------------------------#
#
@staticmethod
def is_enabled():
return False
#------------------------------------------------------------------------------------#
#
def __init__(self, *args, **kwargs):
"""
Arguments:
- components [list of strings] : list of timemory components
- flat [bool] : enable flat profiling
- timeline [bool] : enable timeline profiling
"""
pass
#------------------------------------------------------------------------------------#
#
def __call__(self, func):
"""
Decorator
"""
@wraps(func)
def function_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return function_wrapper
#------------------------------------------------------------------------------------#
#
def __enter__(self, *args, **kwargs):
"""
Context manager
"""
pass
#------------------------------------------------------------------------------------#
#
def __exit__(self, exec_type, exec_value, exec_tb):
"""
Context manager
"""
import traceback
if exec_type is not None and exec_value is not None and exec_tb is not None:
traceback.print_exception(exec_type, exec_value, exec_tb, limit=5)
| 31.87472 | 90 | 0.522108 |
d798939067d9834c6c6206a077b96cf5e7bbe11e | 20,034 | py | Python | VTK/vtk_7.1.1_x64_Release/lib/python2.7/site-packages/vtk/numpy_interface/internal_algorithms.py | jiaguobing/FastCAE | 2348ab87e83fe5c704e4c998cf391229c25ac5d5 | [
"BSD-3-Clause"
] | 4 | 2016-03-30T14:31:52.000Z | 2019-02-02T05:01:32.000Z | VTK/vtk_7.1.1_x64_Release/lib/python2.7/site-packages/vtk/numpy_interface/internal_algorithms.py | Sunqia/FastCAE | cbc023fe07b6e306ceefae8b8bd7c12bc1562acb | [
"BSD-3-Clause"
] | 1 | 2020-03-06T04:49:42.000Z | 2020-03-06T04:49:42.000Z | VTK/vtk_7.1.1_x64_Release/lib/python2.7/site-packages/vtk/numpy_interface/internal_algorithms.py | Sunqia/FastCAE | cbc023fe07b6e306ceefae8b8bd7c12bc1562acb | [
"BSD-3-Clause"
] | 2 | 2019-08-30T23:36:13.000Z | 2019-11-08T16:52:01.000Z | from __future__ import absolute_import
from . import dataset_adapter as dsa
import numpy
from vtk.util import numpy_support
import vtk
def _cell_derivatives (narray, dataset, attribute_type, filter):
if not dataset :
raise RuntimeError('Need a dataset to compute _cell_derivatives.')
# Reshape n dimensional vector to n by 1 matrix
if len(narray.shape) == 1 :
narray = narray.reshape((narray.shape[0], 1))
ncomp = narray.shape[1]
if attribute_type == 'scalars' and ncomp != 1 :
raise RuntimeError('This function expects scalars. ' +
'Input shape ' + str(narray.shape))
if attribute_type == 'vectors' and ncomp != 3 :
raise RuntimeError('This function expects vectors. ' +
'Input shape ' + str(narray.shape))
# numpy_to_vtk converts only contiguous arrays
if not narray.flags.contiguous : narray = narray.copy()
varray = numpy_support.numpy_to_vtk(narray)
if attribute_type == 'scalars': varray.SetName('scalars')
else : varray.SetName('vectors')
# create a dataset with only our array but the same geometry/topology
ds = dataset.NewInstance()
ds.UnRegister(None)
ds.CopyStructure(dataset.VTKObject)
if dsa.ArrayAssociation.FIELD == narray.Association :
raise RuntimeError('Unknown data association. Data should be associated with points or cells.')
if dsa.ArrayAssociation.POINT == narray.Association :
# Work on point data
if narray.shape[0] != dataset.GetNumberOfPoints() :
raise RuntimeError('The number of points does not match the number of tuples in the array')
if attribute_type == 'scalars': ds.GetPointData().SetScalars(varray)
else : ds.GetPointData().SetVectors(varray)
elif dsa.ArrayAssociation.CELL == narray.Association :
# Work on cell data
if narray.shape[0] != dataset.GetNumberOfCells() :
raise RuntimeError('The number of does not match the number of tuples in the array')
# Since vtkCellDerivatives only works with point data, we need to convert
# the cell data to point data first.
ds2 = dataset.NewInstance()
ds2.UnRegister(None)
ds2.CopyStructure(dataset.VTKObject)
if attribute_type == 'scalars' : ds2.GetCellData().SetScalars(varray)
else : ds2.GetCellData().SetVectors(varray)
c2p = vtk.vtkCellDataToPointData()
c2p.SetInputData(ds2)
c2p.Update()
# Set the output to the ds dataset
if attribute_type == 'scalars':
ds.GetPointData().SetScalars(c2p.GetOutput().GetPointData().GetScalars())
else:
ds.GetPointData().SetVectors(c2p.GetOutput().GetPointData().GetVectors())
filter.SetInputData(ds)
if dsa.ArrayAssociation.POINT == narray.Association :
# Since the data is associated with cell and the query is on points
# we have to convert to point data before returning
c2p = vtk.vtkCellDataToPointData()
c2p.SetInputConnection(filter.GetOutputPort())
c2p.Update()
return c2p.GetOutput().GetPointData()
elif dsa.ArrayAssociation.CELL == narray.Association :
filter.Update()
return filter.GetOutput().GetCellData()
else :
# We shall never reach here
raise RuntimeError('Unknown data association. Data should be associated with points or cells.')
def _cell_quality (dataset, quality) :
if not dataset : raise RuntimeError('Need a dataset to compute _cell_quality')
# create a dataset with only our array but the same geometry/topology
ds = dataset.NewInstance()
ds.UnRegister(None)
ds.CopyStructure(dataset.VTKObject)
filter = vtk.vtkCellQuality()
filter.SetInputData(ds)
if "area" == quality : filter.SetQualityMeasureToArea()
elif "aspect" == quality : filter.SetQualityMeasureToAspectRatio()
elif "aspect_gamma" == quality : filter.SetQualityMeasureToAspectGamma()
elif "condition" == quality : filter.SetQualityMeasureToCondition()
elif "diagonal" == quality : filter.SetQualityMeasureToDiagonal()
elif "jacobian" == quality : filter.SetQualityMeasureToJacobian()
elif "max_angle" == quality : filter.SetQualityMeasureToMaxAngle()
elif "shear" == quality : filter.SetQualityMeasureToShear()
elif "skew" == quality : filter.SetQualityMeasureToSkew()
elif "min_angle" == quality : filter.SetQualityMeasureToMinAngle()
elif "volume" == quality : filter.SetQualityMeasureToVolume()
else : raise RuntimeError('Unknown cell quality ['+quality+'].')
filter.Update()
varray = filter.GetOutput().GetCellData().GetArray("CellQuality")
ans = dsa.vtkDataArrayToVTKArray(varray, dataset)
# The association information has been lost over the vtk filter
# we must reconstruct it otherwise lower pipeline will be broken.
ans.Association = dsa.ArrayAssociation.CELL
return ans
def _matrix_math_filter (narray, operation) :
if operation not in ['Determinant', 'Inverse', 'Eigenvalue', 'Eigenvector'] :
raise RuntimeError('Unknown quality measure ['+operation+']'+
' Supported are [Determinant, Inverse, Eigenvalue, Eigenvector]')
if narray.ndim != 3 :
raise RuntimeError(operation+' only works for an array of matrices(3D array).'+
' Input shape ' + str(narray.shape))
elif narray.shape[1] != narray.shape[2] :
raise RuntimeError(operation+' requires an array of 2D square matrices.' +
' Input shape ' + str(narray.shape))
# numpy_to_vtk converts only contiguous arrays
if not narray.flags.contiguous : narray = narray.copy()
# Reshape is necessary because numpy_support.numpy_to_vtk only works with 2D or
# less arrays.
nrows = narray.shape[0]
ncols = narray.shape[1] * narray.shape[2]
narray = narray.reshape(nrows, ncols)
ds = vtk.vtkImageData()
ds.SetDimensions(nrows, 1, 1)
varray = numpy_support.numpy_to_vtk(narray)
varray.SetName('tensors')
ds.GetPointData().SetTensors(varray)
filter = vtk.vtkMatrixMathFilter()
if operation == 'Determinant' : filter.SetOperationToDeterminant()
elif operation == 'Inverse' : filter.SetOperationToInverse()
elif operation == 'Eigenvalue' : filter.SetOperationToEigenvalue()
elif operation == 'Eigenvector' : filter.SetOperationToEigenvector()
filter.SetInputData(ds)
filter.Update()
varray = filter.GetOutput().GetPointData().GetArray(operation)
ans = dsa.vtkDataArrayToVTKArray(varray)
# The association information has been lost over the vtk filter
# we must reconstruct it otherwise lower pipeline will be broken.
ans.Association = narray.Association
ans.DataSet = narray.DataSet
return ans
# Python interfaces
def abs (narray) :
"Returns the absolute values of an array of scalars/vectors/tensors."
return numpy.abs(narray)
def all (narray, axis=None):
"Returns the min value of an array of scalars/vectors/tensors."
if narray is dsa.NoneArray:
return dsa.NoneArray
ans = numpy.all(numpy.array(narray), axis)
return ans
def area (dataset) :
"Returns the surface area of each cell in a mesh."
return _cell_quality(dataset, "area")
def aspect (dataset) :
"Returns the aspect ratio of each cell in a mesh."
return _cell_quality(dataset, "aspect")
def aspect_gamma (dataset) :
"Returns the aspect ratio gamma of each cell in a mesh."
return _cell_quality(dataset, "aspect_gamma")
def condition (dataset) :
"Returns the condition number of each cell in a mesh."
return _cell_quality(dataset, "condition")
def cross (x, y) :
"Return the cross product for two 3D vectors from two arrays of 3D vectors."
if x is dsa.NoneArray or y is dsa.NoneArray:
return dsa.NoneArray
if x.ndim != y.ndim or x.shape != y.shape:
raise RuntimeError('Both operands must have same dimension and shape.' +
' Input shapes ' + str(x.shape) + ' and ' + str(y.shape))
if x.ndim != 1 and x.ndim != 2 :
raise RuntimeError('Cross only works for 3D vectors or an array of 3D vectors.' +
' Input shapes ' + str(x.shape) + ' and ' + str(y.shape))
if x.ndim == 1 and x.shape[0] != 3 :
raise RuntimeError('Cross only works for 3D vectors.' +
' Input shapes ' + str(x.shape) + ' and ' + str(y.shape))
if x.ndim == 2 and x.shape[1] != 3 :
raise RuntimeError('Cross only works for an array of 3D vectors.' +
'Input shapes ' + str(x.shape) + ' and ' + str(y.shape))
return numpy.cross(x, y)
def curl (narray, dataset=None):
"Returns the curl of an array of 3D vectors."
if not dataset : dataset = narray.DataSet
if not dataset : raise RuntimeError('Need a dataset to compute curl.')
if narray.ndim != 2 or narray.shape[1] != 3 :
raise RuntimeError('Curl only works with an array of 3D vectors.' +
'Input shape ' + str(narray.shape))
cd = vtk.vtkCellDerivatives()
cd.SetVectorModeToComputeVorticity()
res = _cell_derivatives(narray, dataset, 'vectors', cd)
retVal = res.GetVectors()
retVal.SetName("vorticity")
ans = dsa.vtkDataArrayToVTKArray(retVal, dataset)
# The association information has been lost over the vtk filter
# we must reconstruct it otherwise lower pipeline will be broken.
ans.Association = narray.Association
return ans
def divergence (narray, dataset=None):
"Returns the divergence of an array of 3D vectors."
if not dataset : dataset = narray.DataSet
if not dataset : raise RuntimeError('Need a dataset to compute divergence')
if narray.ndim != 2 or narray.shape[1] != 3 :
raise RuntimeError('Divergence only works with an array of 3D vectors.' +
' Input shape ' + str(narray.shape))
g = gradient(narray, dataset)
g = g.reshape(g.shape[0], 3, 3)
return dsa.VTKArray\
(numpy.add.reduce(g.diagonal(axis1=1, axis2=2), 1), dataset=g.DataSet)
def det (narray) :
"Returns the determinant of an array of 2D square matrices."
return _matrix_math_filter(narray, "Determinant")
def determinant (narray) :
"Returns the determinant of an array of 2D square matrices."
return det(narray)
def diagonal (dataset) :
"Returns the diagonal length of each cell in a dataset."
return _cell_quality(dataset, "diagonal")
def dot (a1, a2):
"Returns the dot product of two scalars/vectors of two array of scalars/vectors."
if a1 is dsa.NoneArray or a2 is dsa.NoneArray:
return dsa.NoneArray
if a1.shape[1] != a2.shape[1] :
raise RuntimeError('Dot product only works with vectors of same dimension.' +
' Input shapes ' + str(a1.shape) + ' and ' + str(a2.shape))
m = a1*a2
va = dsa.VTKArray(numpy.add.reduce(m, 1))
if a1.DataSet == a2.DataSet : va.DataSet = a1.DataSet
return va
def eigenvalue (narray) :
"Returns the eigenvalue of an array of 2D square matrices."
return _matrix_math_filter(narray, "Eigenvalue")
def eigenvector (narray) :
"Returns the eigenvector of an array of 2D square matrices."
return _matrix_math_filter(narray, "Eigenvector")
def gradient(narray, dataset=None):
"Returns the gradient of an array of scalars/vectors."
if not dataset: dataset = narray.DataSet
if not dataset: raise RuntimeError('Need a dataset to compute gradient')
try:
ncomp = narray.shape[1]
except IndexError:
ncomp = 1
if ncomp != 1 and ncomp != 3:
raise RuntimeError('Gradient only works with scalars (1 component) and vectors (3 component)' +
' Input shape ' + str(narray.shape))
cd = vtk.vtkCellDerivatives()
if ncomp == 1 : attribute_type = 'scalars'
else : attribute_type = 'vectors'
res = _cell_derivatives(narray, dataset, attribute_type, cd)
if ncomp == 1 : retVal = res.GetVectors()
else : retVal = res.GetTensors()
try:
if narray.GetName() : retVal.SetName("gradient of " + narray.GetName())
else : retVal.SetName("gradient")
except AttributeError : retVal.SetName("gradient")
ans = dsa.vtkDataArrayToVTKArray(retVal, dataset)
# The association information has been lost over the vtk filter
# we must reconstruct it otherwise lower pipeline will be broken.
ans.Association = narray.Association
return ans
def inv (narray) :
"Returns the inverse an array of 2D square matrices."
return _matrix_math_filter(narray, "Inverse")
def inverse (narray) :
"Returns the inverse of an array of 2D square matrices."
return inv(narray)
def jacobian (dataset) :
"Returns the jacobian of an array of 2D square matrices."
return _cell_quality(dataset, "jacobian")
def laplacian (narray, dataset=None) :
"Returns the jacobian of an array of scalars."
if not dataset : dataset = narray.DataSet
if not dataset : raise RuntimeError('Need a dataset to compute laplacian')
ans = gradient(narray, dataset)
return divergence(ans)
def ln (narray) :
"Returns the natural logarithm of an array of scalars/vectors/tensors."
return numpy.log(narray)
def log (narray) :
"Returns the natural logarithm of an array of scalars/vectors/tensors."
return ln(narray)
def log10 (narray) :
"Returns the base 10 logarithm of an array of scalars/vectors/tensors."
return numpy.log10(narray)
def max (narray, axis=None):
"Returns the maximum value of an array of scalars/vectors/tensors."
if narray is dsa.NoneArray:
return dsa.NoneArray
ans = numpy.max(narray, axis)
# if len(ans.shape) == 2 and ans.shape[0] == 3 and ans.shape[1] == 3: ans.reshape(9)
return ans
def max_angle (dataset) :
"Returns the maximum angle of each cell in a dataset."
return _cell_quality(dataset, "max_angle")
def mag (a) :
"Returns the magnigude of an array of scalars/vectors."
return numpy.sqrt(dot(a, a))
def mean (narray, axis=None) :
"Returns the mean value of an array of scalars/vectors/tensors."
if narray is dsa.NoneArray:
return dsa.NoneArray
ans = numpy.mean(numpy.array(narray), axis)
# if len(ans.shape) == 2 and ans.shape[0] == 3 and ans.shape[1] == 3: ans.reshape(9)
return ans
def min (narray, axis=None):
"Returns the min value of an array of scalars/vectors/tensors."
if narray is dsa.NoneArray:
return dsa.NoneArray
ans = numpy.min(narray, axis)
# if len(ans.shape) == 2 and ans.shape[0] == 3 and ans.shape[1] == 3: ans.reshape(9)
return ans
def min_angle (dataset) :
"Returns the minimum angle of each cell in a dataset."
return _cell_quality(dataset, "min_angle")
def norm (a) :
"Returns the normalized values of an array of scalars/vectors."
return a/mag(a).reshape((a.shape[0], 1))
def shear (dataset) :
"Returns the shear of each cell in a dataset."
return _cell_quality(dataset, "shear")
def skew (dataset) :
"Returns the skew of each cell in a dataset."
return _cell_quality(dataset, "skew")
def strain (narray, dataset=None) :
"Returns the strain of an array of 3D vectors."
if not dataset : dataset = narray.DataSet
if not dataset : raise RuntimeError('Need a dataset to compute strain')
if 2 != narray.ndim or 3 != narray.shape[1] :
raise RuntimeError('strain only works with an array of 3D vectors' +
'Input shape ' + str(narray.shape))
cd = vtk.vtkCellDerivatives()
cd.SetTensorModeToComputeStrain()
res = _cell_derivatives(narray, dataset, 'vectors', cd)
retVal = res.GetTensors()
retVal.SetName("strain")
ans = dsa.vtkDataArrayToVTKArray(retVal, dataset)
# The association information has been lost over the vtk filter
# we must reconstruct it otherwise lower pipeline will be broken.
ans.Association = narray.Association
return ans
def sum (narray, axis=None):
"Returns the min value of an array of scalars/vectors/tensors."
if narray is dsa.NoneArray:
return dsa.NoneArray
return numpy.sum(narray, axis)
def surface_normal (dataset) :
"Returns the surface normal of each cell in a dataset."
if not dataset : raise RuntimeError('Need a dataset to compute surface_normal')
ds = dataset.NewInstance()
ds.UnRegister(None)
ds.CopyStructure(dataset.VTKObject)
filter = vtk.vtkPolyDataNormals()
filter.SetInputData(ds)
filter.ComputeCellNormalsOn()
filter.ComputePointNormalsOff()
filter.SetFeatureAngle(180)
filter.SplittingOff()
filter.ConsistencyOff()
filter.AutoOrientNormalsOff()
filter.FlipNormalsOff()
filter.NonManifoldTraversalOff()
filter.Update()
varray = filter.GetOutput().GetCellData().GetNormals()
ans = dsa.vtkDataArrayToVTKArray(varray, dataset)
# The association information has been lost over the vtk filter
# we must reconstruct it otherwise lower pipeline will be broken.
ans.Association = dsa.ArrayAssociation.CELL
return ans
def trace (narray) :
"Returns the trace of an array of 2D square matrices."
ax1 = 0
ax2 = 1
if narray.ndim > 2 :
ax1 = 1
ax2 = 2
return numpy.trace(narray, axis1=ax1, axis2=ax2)
def var (narray, axis=None) :
"Returns the mean value of an array of scalars/vectors/tensors."
if narray is dsa.NoneArray:
return dsa.NoneArray
return numpy.var(narray, axis)
def volume (dataset) :
"Returns the volume normal of each cell in a dataset."
return _cell_quality(dataset, "volume")
def vorticity(narray, dataset=None):
"Returns the vorticity/curl of an array of 3D vectors."
return curl(narray, dataset)
def vertex_normal (dataset) :
"Returns the vertex normal of each point in a dataset."
if not dataset : raise RuntimeError('Need a dataset to compute vertex_normal')
ds = dataset.NewInstance()
ds.UnRegister(None)
ds.CopyStructure(dataset.VTKObject)
filter = vtk.vtkPolyDataNormals()
filter.SetInputData(ds)
filter.ComputeCellNormalsOff()
filter.ComputePointNormalsOn()
filter.SetFeatureAngle(180)
filter.SplittingOff()
filter.ConsistencyOff()
filter.AutoOrientNormalsOff()
filter.FlipNormalsOff()
filter.NonManifoldTraversalOff()
filter.Update()
varray = filter.GetOutput().GetPointData().GetNormals()
ans = dsa.vtkDataArrayToVTKArray(varray, dataset)
# The association information has been lost over the vtk filter
# we must reconstruct it otherwise lower pipeline will be broken.
ans.Association = dsa.ArrayAssociation.POINT
return ans
def make_vector(ax, ay, az=None):
if ax is dsa.NoneArray or ay is dsa.NoneArray or ay is dsa.NoneArray:
return dsa.NoneArray
if len(ax.shape) != 1 or len(ay.shape) != 1 or (az is not None and len(az.shape) != 1):
raise ValueError("Can only merge 1D arrays")
if az is None:
az = numpy.zeros(ax.shape)
v = numpy.vstack([ax, ay, az]).transpose().view(dsa.VTKArray)
# Copy defaults from first array. The user can always
# overwrite this
try:
v.DataSet = ax.DataSet
except AttributeError: pass
try:
v.Association = ax.Association
except AttributeError: pass
return v
| 37.1 | 103 | 0.659828 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.