hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5e37960667eb3583128774bf429ae73437019954 | 1,848 | py | Python | bin/config.py | strauch-co/ahb2apb_bridge_vip | 9e9c9990dedb981f3de660aa3d1d14e66604abe0 | [
"MIT"
] | null | null | null | bin/config.py | strauch-co/ahb2apb_bridge_vip | 9e9c9990dedb981f3de660aa3d1d14e66604abe0 | [
"MIT"
] | null | null | null | bin/config.py | strauch-co/ahb2apb_bridge_vip | 9e9c9990dedb981f3de660aa3d1d14e66604abe0 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
##------------------------------------------------------------------------------
## Copyright (c) 2021 by Strauch Consulting, LLC. and Xpeerant, Inc.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##------------------------------------------------------------------------------
import sys
import os
import re
################################################################################
# Read configuration file and return items in a dictionary
################################################################################
def get_config (filename):
# TODO: trap exception FileNotFoundError
fhr = open (filename,"r")
config = {}
for line in fhr.readlines():
# remove comments,
line = re.sub ('#.*$','',line)
# get the key:value
mObj = re.search ('^\s*(\w+)\s*:\s*(.*$)', line)
if mObj:
key = mObj.group(1)
value = mObj.group(2)
# if value is enclosed in [] then parse as a list
if re.search ('\[.*\]', value):
# remove brackets
opt = re.sub ('\[','', value)
opt = re.sub ('\]\s*$','', opt)
# split on cammas
opt = opt.split(',')
value = []
# split on whitespace
for item in opt:
item = item.split()
value += item
config[key] = value
return config
| 34.222222 | 80 | 0.510281 |
a01f8ad7e173ff4e19a8a9f96955453a93bd2227 | 182 | py | Python | robot_framework/visualization/base_visualization.py | abarcis/robot-framework | a2cef7850784ae4c12b47fc7fb297f3772c2e2fe | [
"MIT"
] | null | null | null | robot_framework/visualization/base_visualization.py | abarcis/robot-framework | a2cef7850784ae4c12b47fc7fb297f3772c2e2fe | [
"MIT"
] | null | null | null | robot_framework/visualization/base_visualization.py | abarcis/robot-framework | a2cef7850784ae4c12b47fc7fb297f3772c2e2fe | [
"MIT"
] | null | null | null | #! /usr/bin/env python
class BaseVisualization:
def update(self, states, states_log=None):
raise NotImplementedError()
def reinit(self, params=None):
pass
| 18.2 | 46 | 0.664835 |
fe97e96e1680b2ca779cc192a08bb2a62d8308ff | 1,099 | py | Python | ros2_message_converter/json_message_converter.py | ubica-robotics/ros2_message_converter | a383a06bdbe47288d2ab13db2e6c71fbc25cae8a | [
"BSD-3-Clause"
] | 2 | 2021-09-09T12:50:25.000Z | 2021-09-17T12:19:39.000Z | ros2_message_converter/json_message_converter.py | ubica-robotics/ros2_message_converter | a383a06bdbe47288d2ab13db2e6c71fbc25cae8a | [
"BSD-3-Clause"
] | 3 | 2021-10-04T11:30:07.000Z | 2021-12-01T07:51:57.000Z | ros2_message_converter/json_message_converter.py | ubica-robotics/ros2_message_converter | a383a06bdbe47288d2ab13db2e6c71fbc25cae8a | [
"BSD-3-Clause"
] | 2 | 2021-09-21T09:54:12.000Z | 2021-11-26T21:06:58.000Z | import json
from ros2_message_converter import message_converter
def convert_json_to_ros_message(message_type, json_message, strict_mode=True):
"""
Takes in the message type and a JSON-formatted string and returns a ROS
message.
If strict_mode is set, an exception will be thrown if the json message contains extra fields
Example:
message_type = "std_msgs/String"
json_message = '{"data": "Hello, Robot"}'
ros_message = convert_json_to_ros_message(message_type, json_message)
"""
dictionary = json.loads(json_message)
return message_converter.convert_dictionary_to_ros_message(message_type, dictionary, strict_mode=strict_mode)
def convert_ros_message_to_json(message):
"""
Takes in a ROS message and returns a JSON-formatted string.
Example:
ros_message = std_msgs.msg.String(data="Hello, Robot")
json_message = convert_ros_message_to_json(ros_message)
"""
dictionary = message_converter.convert_ros_message_to_dictionary(message)
json_message = json.dumps(dictionary)
return json_message
| 36.633333 | 113 | 0.747043 |
98fe6b555f4d0e7c40db423b00e8d1fee48d9bfd | 1,302 | py | Python | pyemc/test_stress.py | numerodix/memcache.rs | fec9ccd0e762cbe0d74fa5199594c9a59e16918b | [
"MIT"
] | 63 | 2016-01-10T22:08:46.000Z | 2019-08-21T20:55:07.000Z | pyemc/test_stress.py | numerodix/memcache.rs | fec9ccd0e762cbe0d74fa5199594c9a59e16918b | [
"MIT"
] | 1 | 2016-10-06T21:24:07.000Z | 2017-01-29T16:12:56.000Z | pyemc/test_stress.py | numerodix/memcache.rs | fec9ccd0e762cbe0d74fa5199594c9a59e16918b | [
"MIT"
] | 6 | 2016-01-12T14:15:17.000Z | 2020-09-24T04:01:38.000Z | import time
from pyemc.abstractions.test_api import TestCase
class TestStress(TestCase):
def run_bench(self, func, loops, desc):
# untimed warmup
warmup_loops = loops / 3
for _ in xrange(loops):
func()
# timed execution
start_time = time.time()
for _ in xrange(loops):
func()
end_time = time.time()
interval = end_time - start_time
rate = float(loops) / interval
self.write("Made %d %s requests in %.2f seconds = %.2f requests/sec" %
(loops, desc, interval, rate))
def test_set_const_key_noreply(self):
def func():
self.client.set('x', 'abc', noreply=True)
self.run_bench(func, 700000, 'constant key set+noreply')
def test_set_const_key(self):
def func():
self.client.set('x', 'abc')
self.run_bench(func, 100000, 'constant key set')
def test_get_const_key(self):
self.client.set('x', 'abc')
def func():
self.client.get('x')
self.run_bench(func, 100000, 'constant key get')
def test_version(self):
'''Does not even touch the storage layer.'''
def func():
self.client.version()
self.run_bench(func, 100000, 'version')
| 25.529412 | 78 | 0.571429 |
73ff5d28c14572b2bd952435101e045f821c5760 | 1,231 | py | Python | main.py | guard-project/cb-manager | 7dc7c7b9beacc45236674642f4b64373a2f4cdb3 | [
"MIT"
] | 2 | 2020-05-14T00:32:32.000Z | 2022-02-28T21:08:16.000Z | main.py | guard-project/cb-manager | 7dc7c7b9beacc45236674642f4b64373a2f4cdb3 | [
"MIT"
] | 4 | 2020-12-09T16:22:45.000Z | 2021-04-06T09:58:01.000Z | main.py | guard-project/cb-manager | 7dc7c7b9beacc45236674642f4b64373a2f4cdb3 | [
"MIT"
] | null | null | null | import os
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
os.chdir(dir_path)
import waitress # noqa: E402
from rich import pretty, traceback # noqa: E402
from rich.console import Console # noqa: E402
from rich.panel import Panel # noqa: E402
pretty.install()
traceback.install(show_locals=False)
from about import project, title, version # noqa: E402
from api import api # noqa: E402
from lib.elasticsearch import connection as es_conn # noqa: E402
from reader.arg import ArgReader # noqa: E402
from utils.log import Log # noqa: E402
data_base = ArgReader.read()
if data_base.version is not None:
print(data_base.version)
else:
ident = f'{project} - {title} v:{version}'
console = Console()
console.print(Panel.fit(ident))
Log.init(config=data_base.log_config)
es_conn(endpoint=data_base.es_endpoint, timeout=data_base.es_timeout,
retry_period=data_base.es_retry_period)
api_instance = api(title=title, version=version)
Log.get('api').success(
f'Accept requests at {data_base.host}:{data_base.port}')
waitress.serve(api_instance, host=data_base.host, port=data_base.port,
expose_tracebacks=False, ident=ident, _quiet=True)
| 33.27027 | 74 | 0.727864 |
4a12df7ee6f62c095aa1451c14fc7fbf4d9fafeb | 10,554 | py | Python | evology/research/MCarloLongRuns/archiv/PopulationDynamics.py | aymericvie/evology | 8f00d94dee7208be5a5bdd0375a9d6ced25097f4 | [
"Apache-2.0"
] | null | null | null | evology/research/MCarloLongRuns/archiv/PopulationDynamics.py | aymericvie/evology | 8f00d94dee7208be5a5bdd0375a9d6ced25097f4 | [
"Apache-2.0"
] | 2 | 2022-01-10T02:10:56.000Z | 2022-01-14T03:41:42.000Z | evology/research/MCarloLongRuns/archiv/PopulationDynamics.py | aymericvie/evology | 8f00d94dee7208be5a5bdd0375a9d6ced25097f4 | [
"Apache-2.0"
] | null | null | null | """
This experiment investigates how learning rates and reinvestment rates affect population dynamics.
It takes a fixed initial condition (wealth coordinates), time horizon and population size.
"""
# Imports
import numpy as np
import pandas as pd
import sys
if sys.platform == "darwin":
sys.path.append("/Users/aymericvie/Documents/GitHub/evology/evology/code")
# Need to be executed from cd to MCarloLongRuns
if sys.platform == "linux":
sys.path.append("/home/vie/Documents/GitHub/evology/evology/code")
from main import main as evology
import multiprocessing as mp
# Fixed parameters
TimeHorizon = (
252 * 100 + 3 * 21
) # 100 Years + 3 months to compensate early period without recording data.
PopulationSize = 100
Coordinates = [1 / 3, 1 / 3, 1 / 3]
seed = 8
reps = 150
# COnfig: coords, popsize, time, selection rate, mutation rate, reinvestment rate
Config1 = [Coordinates, PopulationSize, TimeHorizon, 0, 0, 1] # Static
Config2 = [Coordinates, PopulationSize, TimeHorizon, 1 / 252, 1 / 252, 1] # Learning 1Y
Config3 = [Coordinates, PopulationSize, TimeHorizon, 1 / 252, 0, 0] # Imitation-only 1Y
Config4 = [Coordinates, PopulationSize, TimeHorizon, 1 / 252, 0] # Mutation-only 1Y
Config5 = [
Coordinates,
PopulationSize,
TimeHorizon,
1 / (252 * 2),
1 / (252 * 2),
0,
] # Learning 2Y
Config6 = [
Coordinates,
PopulationSize,
TimeHorizon,
1 / (252 * 3),
1 / (252 * 3),
0,
] # Learning 3Y
Config7 = [Coordinates, PopulationSize, TimeHorizon, 0, 0, 1.2] # Reinvestment high
Config8 = [Coordinates, PopulationSize, TimeHorizon, 0, 0, 0.8] # Reinvestment low
"""
def main(
space,
solver,
wealth_coordinates,
POPULATION_SIZE,
MAX_GENERATIONS,
PROBA_SELECTION,
MUTATION_RATE,
ReinvestmentRate,
tqdm_display,
reset_wealth
):
"""
Config = Config1
def job1(iteration):
np.random.seed()
try:
df, pop = evology(
"scholl",
"esl.true",
Config[0],
Config[1],
Config[2],
Config[3],
Config[4],
Config[5],
True,
False,
)
return df["WShare_NT"], df["WShare_VI"], df["WShare_TF"]
except:
print("Job 1 failed and passed.")
array = np.zeros((TimeHorizon - 3 * 21))
return pd.Series(array), pd.Series(array), pd.Series(array)
Config = Config2
def job2(iteration):
np.random.seed()
try:
df, pop = evology(
"scholl",
"esl.true",
Config[0],
Config[1],
Config[2],
Config[3],
Config[4],
Config[5],
True,
False,
)
return df["WShare_NT"], df["WShare_VI"], df["WShare_TF"]
except:
print("Job 2 failed and passed.")
array = np.zeros((TimeHorizon - 3 * 21))
return pd.Series(array), pd.Series(array), pd.Series(array)
Config = Config3
def job3(iteration):
np.random.seed()
try:
df, pop = evology(
"scholl",
"esl.true",
Config[0],
Config[1],
Config[2],
Config[3],
Config[4],
Config[5],
True,
False,
)
return df["WShare_NT"], df["WShare_VI"], df["WShare_TF"]
except:
print("Job 3 failed and passed.")
array = np.zeros((TimeHorizon - 3 * 21))
return pd.Series(array), pd.Series(array), pd.Series(array)
Config = Config4
def job4(iteration):
np.random.seed()
try:
df, pop = evology(
"scholl",
"esl.true",
Config[0],
Config[1],
Config[2],
Config[3],
Config[4],
Config[5],
True,
False,
)
return df["WShare_NT"], df["WShare_VI"], df["WShare_TF"]
except:
print("Job 4 failed and passed.")
array = np.zeros((TimeHorizon - 3 * 21))
return pd.Series(array), pd.Series(array), pd.Series(array)
Config = Config5
def job5(iteration):
np.random.seed()
try:
df, pop = evology(
"scholl",
"esl.true",
Config[0],
Config[1],
Config[2],
Config[3],
Config[4],
Config[5],
True,
False,
)
return df["WShare_NT"], df["WShare_VI"], df["WShare_TF"]
except:
print("Job 5 failed and passed.")
array = np.zeros((TimeHorizon - 3 * 21))
return pd.Series(array), pd.Series(array), pd.Series(array)
Config = Config6
def job6(iteration):
np.random.seed()
try:
df, pop = evology(
"scholl",
"esl.true",
Config[0],
Config[1],
Config[2],
Config[3],
Config[4],
Config[5],
True,
False,
)
return df["WShare_NT"], df["WShare_VI"], df["WShare_TF"]
except:
print("Job 6 failed and passed.")
array = np.zeros((TimeHorizon - 3 * 21))
return pd.Series(array), pd.Series(array), pd.Series(array)
Config = Config7
def job7(iteration):
np.random.seed()
try:
df, pop = evology(
"scholl",
"esl.true",
Config[0],
Config[1],
Config[2],
Config[3],
Config[4],
Config[5],
True,
False,
)
return df["WShare_NT"], df["WShare_VI"], df["WShare_TF"]
except:
print("Job 7 failed and passed.")
array = np.zeros((TimeHorizon - 3 * 21))
return pd.Series(array), pd.Series(array), pd.Series(array)
Config = Config8
def job8(iteration):
np.random.seed()
try:
df, pop = evology(
"scholl",
"esl.true",
Config[0],
Config[1],
Config[2],
Config[3],
Config[4],
Config[5],
True,
False,
)
return df["WShare_NT"], df["WShare_VI"], df["WShare_TF"]
except:
print("Job 8 failed and passed.")
array = np.zeros((TimeHorizon - 3 * 21))
return pd.Series(array), pd.Series(array), pd.Series(array)
def main1():
p = mp.Pool()
data = p.map(job1, [i for i in range(reps)])
p.close()
data = np.array(list(data))
return data
def main2():
p = mp.Pool()
data = p.map(job2, [i for i in range(reps)])
p.close()
data = np.array(list(data))
return data
def main3():
p = mp.Pool()
data = p.map(job3, [i for i in range(reps)])
p.close()
data = np.array(list(data))
return data
def main4():
p = mp.Pool()
data = p.map(job4, [i for i in range(reps)])
p.close()
data = np.array(list(data))
return data
def main5():
p = mp.Pool()
data = p.map(job5, [i for i in range(reps)])
p.close()
data = np.array(list(data))
return data
def main6():
p = mp.Pool()
data = p.map(job6, [i for i in range(reps)])
p.close()
data = np.array(list(data))
return data
def main7():
p = mp.Pool()
data = p.map(job7, [i for i in range(reps)])
p.close()
data = np.array(list(data))
return data
def main8():
p = mp.Pool()
data = p.map(job8, [i for i in range(reps)])
p.close()
data = np.array(list(data))
return data
if __name__ == "__main__":
data = main1()
dfNT = pd.DataFrame()
dfVI = pd.DataFrame()
dfTF = pd.DataFrame()
for i in range(reps):
name = "Rep%s" % i
dfNT[name] = data[i, 0]
dfVI[name] = data[i, 1]
dfTF[name] = data[i, 2]
dfNT.to_csv("data_config1/MC_NT.csv")
dfVI.to_csv("data_config1/MC_VI.csv")
dfTF.to_csv("data_config1/MC_TF.csv")
data = main2()
dfNT = pd.DataFrame()
dfVI = pd.DataFrame()
dfTF = pd.DataFrame()
for i in range(reps):
name = "Rep%s" % i
dfNT[name] = data[i, 0]
dfVI[name] = data[i, 1]
dfTF[name] = data[i, 2]
dfNT.to_csv("data_config2/MC_NT.csv")
dfVI.to_csv("data_config2/MC_VI.csv")
dfTF.to_csv("data_config2/MC_TF.csv")
data = main3()
dfNT = pd.DataFrame()
dfVI = pd.DataFrame()
dfTF = pd.DataFrame()
for i in range(reps):
name = "Rep%s" % i
dfNT[name] = data[i, 0]
dfVI[name] = data[i, 1]
dfTF[name] = data[i, 2]
dfNT.to_csv("data_config3/MC_NT.csv")
dfVI.to_csv("data_config3/MC_VI.csv")
dfTF.to_csv("data_config3/MC_TF.csv")
data = main4()
dfNT = pd.DataFrame()
dfVI = pd.DataFrame()
dfTF = pd.DataFrame()
for i in range(reps):
name = "Rep%s" % i
dfNT[name] = data[i, 0]
dfVI[name] = data[i, 1]
dfTF[name] = data[i, 2]
dfNT.to_csv("data_config4/MC_NT.csv")
dfVI.to_csv("data_config4/MC_VI.csv")
dfTF.to_csv("data_config4/MC_TF.csv")
data = main5()
dfNT = pd.DataFrame()
dfVI = pd.DataFrame()
dfTF = pd.DataFrame()
for i in range(reps):
name = "Rep%s" % i
dfNT[name] = data[i, 0]
dfVI[name] = data[i, 1]
dfTF[name] = data[i, 2]
dfNT.to_csv("data_config5/MC_NT.csv")
dfVI.to_csv("data_config5/MC_VI.csv")
dfTF.to_csv("data_config5/MC_TF.csv")
data = main6()
dfNT = pd.DataFrame()
dfVI = pd.DataFrame()
dfTF = pd.DataFrame()
for i in range(reps):
name = "Rep%s" % i
dfNT[name] = data[i, 0]
dfVI[name] = data[i, 1]
dfTF[name] = data[i, 2]
dfNT.to_csv("data_config6/MC_NT.csv")
dfVI.to_csv("data_config6/MC_VI.csv")
dfTF.to_csv("data_config6/MC_TF.csv")
data = main7()
dfNT = pd.DataFrame()
dfVI = pd.DataFrame()
dfTF = pd.DataFrame()
for i in range(reps):
name = "Rep%s" % i
dfNT[name] = data[i, 0]
dfVI[name] = data[i, 1]
dfTF[name] = data[i, 2]
dfNT.to_csv("data_config7/MC_NT.csv")
dfVI.to_csv("data_config7/MC_VI.csv")
dfTF.to_csv("data_config7/MC_TF.csv")
data = main8()
dfNT = pd.DataFrame()
dfVI = pd.DataFrame()
dfTF = pd.DataFrame()
for i in range(reps):
name = "Rep%s" % i
dfNT[name] = data[i, 0]
dfVI[name] = data[i, 1]
dfTF[name] = data[i, 2]
dfNT.to_csv("data_config8/MC_NT.csv")
dfVI.to_csv("data_config8/MC_VI.csv")
dfTF.to_csv("data_config8/MC_TF.csv")
| 23.40133 | 99 | 0.538185 |
5555e48d548861a9a649249c2431e3d418af9c57 | 3,792 | py | Python | appengine/findit/findit_v2/model/test/luci_build_test.py | xswz8015/infra | f956b78ce4c39cc76acdda47601b86794ae0c1ba | [
"BSD-3-Clause"
] | null | null | null | appengine/findit/findit_v2/model/test/luci_build_test.py | xswz8015/infra | f956b78ce4c39cc76acdda47601b86794ae0c1ba | [
"BSD-3-Clause"
] | 7 | 2022-02-15T01:11:37.000Z | 2022-03-02T12:46:13.000Z | appengine/findit/findit_v2/model/test/luci_build_test.py | NDevTK/chromium-infra | d38e088e158d81f7f2065a38aa1ea1894f735ec4 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from datetime import datetime
import mock
from go.chromium.org.luci.buildbucket.proto import common_pb2
from go.chromium.org.luci.buildbucket.proto.build_pb2 import Build
from go.chromium.org.luci.buildbucket.proto.builder_pb2 import BuilderID
from google.appengine.ext import ndb
from findit_v2.model import luci_build
from findit_v2.model.luci_build import ParseBuilderId
from findit_v2.model.luci_build import LuciFailedBuild
from findit_v2.services.context import Context
from findit_v2.services.failure_type import StepTypeEnum
from services import git
from waterfall.test import wf_testcase
class LuciFailedBuildTest(wf_testcase.WaterfallTestCase):
def testCreateLuciFailedBuildForCompileFailure(self):
build_id = 87654321
commit_position = 65432
legacy_build_number = 12345
build = LuciFailedBuild.Create(
luci_project='chromium',
luci_bucket='ci',
luci_builder='Linux Builder',
build_id=build_id,
legacy_build_number=legacy_build_number,
gitiles_host='chromium.googlesource.com',
gitiles_project='chromium/src',
gitiles_ref='refs/heads/master',
gitiles_id='git_hash',
commit_position=commit_position,
status=20,
create_time=datetime(2019, 3, 28),
start_time=datetime(2019, 3, 28, 0, 1),
end_time=datetime(2019, 3, 28, 1),
build_failure_type=StepTypeEnum.COMPILE)
build.put()
# Get entity by build_id.
build = LuciFailedBuild.get_by_id(build_id)
self.assertIsNotNone(build)
self.assertEqual(StepTypeEnum.COMPILE, build.build_failure_type)
self.assertEqual(commit_position, build.gitiles_commit.commit_position)
self.assertEqual('chromium/ci', build.bucket_id)
self.assertEqual('chromium/ci/Linux Builder', build.builder_id)
# Get entity by build number.
res1 = LuciFailedBuild.GetBuildByNumber('chromium', 'ci', 'Linux Builder',
legacy_build_number)
self.assertEqual(build_id, res1.build_id)
# Get entity by commit_position.
res2 = LuciFailedBuild.query(
ndb.AND(
LuciFailedBuild.builder_id == 'chromium/ci/Linux Builder',
LuciFailedBuild.gitiles_commit.commit_position ==
commit_position)).fetch()
self.assertEqual(1, len(res2))
self.assertEqual(build_id, res2[0].build_id)
def testParseBuilderId(self):
builder = ParseBuilderId('chromium/ci/Linux Builder')
self.assertEqual('chromium', builder.project)
self.assertEqual('ci', builder.bucket)
self.assertEqual('Linux Builder', builder.builder)
@mock.patch.object(git, 'GetCommitPositionFromRevision', return_value=67890)
def testSaveFailedBuild(self, _):
builder = BuilderID(project='chromium', bucket='try', builder='linux-rel')
build = Build(
id=87654321, builder=builder, number=123, status=common_pb2.FAILURE)
build.create_time.FromDatetime(datetime(2019, 4, 9))
build.start_time.FromDatetime(datetime(2019, 4, 9, 0, 1))
build.end_time.FromDatetime(datetime(2019, 4, 9, 1))
context = Context(
luci_project_name='project',
gitiles_host='gitiles.host.com',
gitiles_project='project/name',
gitiles_ref='ref/heads/master',
gitiles_id='git_sha')
build_entity = luci_build.SaveFailedBuild(context, build,
StepTypeEnum.COMPILE)
self.assertIsNotNone(build_entity)
def testGetBuilderIdString(self):
self.assertEqual(
'chromium/try/linux-rel',
luci_build.GetBuilderIdString('chromium', 'try', 'linux-rel'))
| 38.30303 | 78 | 0.714926 |
547225ed5520a6ab6247a523be1c4eeb45c97807 | 2,695 | py | Python | shaystack/__init__.py | pprados/shaystack | 5bd73173d33099213515c1cf3311aa76aba4d6c9 | [
"BSD-2-Clause"
] | 1 | 2021-03-04T10:36:33.000Z | 2021-03-04T10:36:33.000Z | shaystack/__init__.py | pprados/shaystack | 5bd73173d33099213515c1cf3311aa76aba4d6c9 | [
"BSD-2-Clause"
] | 7 | 2021-03-19T07:31:22.000Z | 2021-03-26T12:31:45.000Z | shaystack/__init__.py | pprados/shaystack | 5bd73173d33099213515c1cf3311aa76aba4d6c9 | [
"BSD-2-Clause"
] | 1 | 2021-04-19T07:48:21.000Z | 2021-04-19T07:48:21.000Z | # -*- coding: utf-8 -*-
# Haystack module
# See the accompanying LICENSE file.
# (C) 2016 VRT Systems
# (C) 2021 Engie Digital
#
# vim: set ts=4 sts=4 et tw=78 sw=4 si:
"""
Implementation of Haystack project https://www.project-haystack.org/
Propose API :
- to read or write Haystack file (Zinc, JSon, CSV)
- to manipulate ontology in memory (Grid class)
- to implement REST API (https://www.project-haystack.org/doc/Rest)
- to implement GraphQL API
With some sample provider:
- Import ontology on S3 bucket
- Import ontology on SQLite or Postgres
- and expose the data via Flask or AWS Lambda
"""
from .datatypes import Quantity, Coordinate, Uri, Bin, MARKER, NA, \
REMOVE, Ref, XStr
from .dumper import dump, dump_scalar
from .grid import Grid
from .grid_filter import parse_filter, parse_hs_datetime_format
from .metadata import MetadataObject
from .ops import *
from .parser import parse, parse_scalar, MODE, MODE_JSON, MODE_TRIO, MODE_ZINC, MODE_CSV, \
suffix_to_mode, mode_to_suffix
from .pintutil import unit_reg
from .providers import HaystackInterface
from .type import HaystackType, Entity
from .version import Version, VER_2_0, VER_3_0, LATEST_VER
__all__ = ['Grid', 'dump', 'parse', 'dump_scalar', 'parse_scalar', 'parse_filter',
'MetadataObject', 'unit_reg', 'zoneinfo',
'HaystackType', 'Entity',
'Coordinate', 'Uri', 'Bin', 'XStr', 'Quantity', 'MARKER', 'NA', 'REMOVE', 'Ref',
'MODE', 'MODE_JSON', 'MODE_ZINC', 'MODE_TRIO', 'MODE_CSV', 'suffix_to_mode', 'mode_to_suffix',
'parse_hs_datetime_format',
'VER_2_0', 'VER_3_0', 'LATEST_VER', 'Version',
"HaystackInterface",
"about",
"ops",
"formats",
"read",
"nav",
"watch_sub",
"watch_unsub",
"watch_poll",
"point_write",
"his_read",
"his_write",
"invoke_action",
]
__pdoc__ = {
"csvdumper": False,
"csvparser": False,
"datatypes": False,
"dumper": False,
"filter_ast": False,
"grid": False,
"grid_diff": False,
"grid_filter": False,
"jsondumper": False,
"jsonparser": False,
"metadata": False,
"ops": False,
"parser": False,
"pintutil": False,
"sortabledict": False,
"triodumper": False,
"trioparser": False,
"version": False,
"zincdumper": False,
"zincparser": False,
"zoneinfo": False,
}
__author__ = 'Engie Digital, VRT Systems'
__copyright__ = 'Copyright 2016-2020, Engie Digital & VRT System'
__credits__ = ['See AUTHORS']
__license__ = 'BSD'
__maintainer__ = 'Philippe PRADOS'
__email__ = 'shaystack@prados.fr'
| 30.280899 | 105 | 0.643785 |
aa1c36674710b6750c6c0294c4c82e790f013c44 | 16,096 | py | Python | nebulousAD/modimpacket/examples/ntlmrelayx/servers/httprelayserver.py | BraveLittleRoaster/nebulousAD | 37a44f131d13f1668a73b61f2444ad93b9e657cc | [
"Apache-2.0",
"MIT"
] | 130 | 2019-08-06T22:23:28.000Z | 2022-02-07T02:47:52.000Z | nebulousAD/modimpacket/examples/ntlmrelayx/servers/httprelayserver.py | BraveLittleRoaster/nebulousAD | 37a44f131d13f1668a73b61f2444ad93b9e657cc | [
"Apache-2.0",
"MIT"
] | 2 | 2019-09-16T09:08:36.000Z | 2020-02-07T10:18:16.000Z | nebulousAD/modimpacket/examples/ntlmrelayx/servers/httprelayserver.py | BraveLittleRoaster/nebulousAD | 37a44f131d13f1668a73b61f2444ad93b9e657cc | [
"Apache-2.0",
"MIT"
] | 21 | 2019-08-07T01:54:33.000Z | 2021-05-07T06:37:26.000Z | # SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# HTTP Relay Server
#
# Authors:
# Alberto Solino (@agsolino)
# Dirk-jan Mollema / Fox-IT (https://www.fox-it.com)
#
# Description:
# This is the HTTP server which relays the NTLMSSP messages to other protocols
import SimpleHTTPServer
import SocketServer
import socket
import base64
import random
import struct
import string
import traceback
from threading import Thread
from nebulousAD.modimpacket import ntlm, LOG
from nebulousAD.modimpacket.smbserver import outputToJohnFormat, writeJohnOutputToFile
from nebulousAD.modimpacket.nt_errors import STATUS_ACCESS_DENIED, STATUS_SUCCESS
from nebulousAD.modimpacket.examples.ntlmrelayx.utils.targetsutils import TargetsProcessor
from nebulousAD.modimpacket.examples.ntlmrelayx.servers.socksserver import activeConnections
class HTTPRelayServer(Thread):
class HTTPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
def __init__(self, server_address, RequestHandlerClass, config):
self.config = config
self.daemon_threads = True
if self.config.ipv6:
self.address_family = socket.AF_INET6
# Tracks the number of times authentication was prompted for WPAD per client
self.wpad_counters = {}
SocketServer.TCPServer.__init__(self,server_address, RequestHandlerClass)
class HTTPHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def __init__(self,request, client_address, server):
self.server = server
self.protocol_version = 'HTTP/1.1'
self.challengeMessage = None
self.target = None
self.client = None
self.machineAccount = None
self.machineHashes = None
self.domainIp = None
self.authUser = None
self.wpad = 'function FindProxyForURL(url, host){if ((host == "localhost") || shExpMatch(host, "localhost.*") ||' \
'(host == "127.0.0.1")) return "DIRECT"; if (dnsDomainIs(host, "%s")) return "DIRECT"; ' \
'return "PROXY %s:80; DIRECT";} '
if self.server.config.mode != 'REDIRECT':
if self.server.config.target is None:
# Reflection mode, defaults to SMB at the target, for now
self.server.config.target = TargetsProcessor(singleTarget='SMB://%s:445/' % client_address[0])
self.target = self.server.config.target.getTarget(self.server.config.randomtargets)
LOG.info("HTTPD: Received connection from %s, attacking target %s://%s" % (client_address[0] ,self.target.scheme, self.target.netloc))
try:
SimpleHTTPServer.SimpleHTTPRequestHandler.__init__(self,request, client_address, server)
except Exception, e:
LOG.error(str(e))
LOG.debug(traceback.format_exc())
def handle_one_request(self):
try:
SimpleHTTPServer.SimpleHTTPRequestHandler.handle_one_request(self)
except KeyboardInterrupt:
raise
except Exception, e:
LOG.error('Exception in HTTP request handler: %s' % e)
LOG.debug(traceback.format_exc())
def log_message(self, format, *args):
return
def send_error(self, code, message=None):
if message.find('RPC_OUT') >=0 or message.find('RPC_IN'):
return self.do_GET()
return SimpleHTTPServer.SimpleHTTPRequestHandler.send_error(self,code,message)
def serve_wpad(self):
wpadResponse = self.wpad % (self.server.config.wpad_host, self.server.config.wpad_host)
self.send_response(200)
self.send_header('Content-type', 'application/x-ns-proxy-autoconfig')
self.send_header('Content-Length',len(wpadResponse))
self.end_headers()
self.wfile.write(wpadResponse)
return
def should_serve_wpad(self, client):
# If the client was already prompted for authentication, see how many times this happened
try:
num = self.server.wpad_counters[client]
except KeyError:
num = 0
self.server.wpad_counters[client] = num + 1
# Serve WPAD if we passed the authentication offer threshold
if num >= self.server.config.wpad_auth_num:
return True
else:
return False
def do_HEAD(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_AUTHHEAD(self, message = '', proxy=False):
if proxy:
self.send_response(407)
self.send_header('Proxy-Authenticate', message)
else:
self.send_response(401)
self.send_header('WWW-Authenticate', message)
self.send_header('Content-type', 'text/html')
self.send_header('Content-Length','0')
self.end_headers()
#Trickery to get the victim to sign more challenges
def do_REDIRECT(self, proxy=False):
rstr = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
self.send_response(302)
self.send_header('WWW-Authenticate', 'NTLM')
self.send_header('Content-type', 'text/html')
self.send_header('Connection','close')
self.send_header('Location','/%s' % rstr)
self.send_header('Content-Length','0')
self.end_headers()
def do_SMBREDIRECT(self):
self.send_response(302)
self.send_header('Content-type', 'text/html')
self.send_header('Location','file://%s' % self.server.config.redirecthost)
self.send_header('Content-Length','0')
self.send_header('Connection','close')
self.end_headers()
def do_POST(self):
return self.do_GET()
def do_CONNECT(self):
return self.do_GET()
def do_HEAD(self):
return self.do_GET()
def do_GET(self):
messageType = 0
if self.server.config.mode == 'REDIRECT':
self.do_SMBREDIRECT()
return
LOG.info('HTTPD: Client requested path: %s' % self.path.lower())
# Serve WPAD if:
# - The client requests it
# - A WPAD host was provided in the command line options
# - The client has not exceeded the wpad_auth_num threshold yet
if self.path.lower() == '/wpad.dat' and self.server.config.serve_wpad and self.should_serve_wpad(self.client_address[0]):
LOG.info('HTTPD: Serving PAC file to client %s' % self.client_address[0])
self.serve_wpad()
return
# Determine if the user is connecting to our server directly or attempts to use it as a proxy
if self.command == 'CONNECT' or (len(self.path) > 4 and self.path[:4].lower() == 'http'):
proxy = True
else:
proxy = False
if (proxy and self.headers.getheader('Proxy-Authorization') is None) or (not proxy and self.headers.getheader('Authorization') is None):
self.do_AUTHHEAD(message = 'NTLM',proxy=proxy)
pass
else:
if proxy:
typeX = self.headers.getheader('Proxy-Authorization')
else:
typeX = self.headers.getheader('Authorization')
try:
_, blob = typeX.split('NTLM')
token = base64.b64decode(blob.strip())
except:
self.do_AUTHHEAD(message = 'NTLM', proxy=proxy)
else:
messageType = struct.unpack('<L',token[len('NTLMSSP\x00'):len('NTLMSSP\x00')+4])[0]
if messageType == 1:
if not self.do_ntlm_negotiate(token, proxy=proxy):
#Connection failed
LOG.error('Negotiating NTLM with %s://%s failed. Skipping to next target',
self.target.scheme, self.target.netloc)
self.server.config.target.logTarget(self.target)
self.do_REDIRECT()
elif messageType == 3:
authenticateMessage = ntlm.NTLMAuthChallengeResponse()
authenticateMessage.fromString(token)
if not self.do_ntlm_auth(token,authenticateMessage):
if authenticateMessage['flags'] & ntlm.NTLMSSP_NEGOTIATE_UNICODE:
LOG.error("Authenticating against %s://%s as %s\%s FAILED" % (
self.target.scheme, self.target.netloc,
authenticateMessage['domain_name'].decode('utf-16le'),
authenticateMessage['user_name'].decode('utf-16le')))
else:
LOG.error("Authenticating against %s://%s as %s\%s FAILED" % (
self.target.scheme, self.target.netloc,
authenticateMessage['domain_name'].decode('ascii'),
authenticateMessage['user_name'].decode('ascii')))
# Only skip to next if the login actually failed, not if it was just anonymous login or a system account
# which we don't want
if authenticateMessage['user_name'] != '': # and authenticateMessage['user_name'][-1] != '$':
self.server.config.target.logTarget(self.target)
# No anonymous login, go to next host and avoid triggering a popup
self.do_REDIRECT()
else:
#If it was an anonymous login, send 401
self.do_AUTHHEAD('NTLM', proxy=proxy)
else:
# Relay worked, do whatever we want here...
if authenticateMessage['flags'] & ntlm.NTLMSSP_NEGOTIATE_UNICODE:
LOG.info("Authenticating against %s://%s as %s\%s SUCCEED" % (
self.target.scheme, self.target.netloc, authenticateMessage['domain_name'].decode('utf-16le'),
authenticateMessage['user_name'].decode('utf-16le')))
else:
LOG.info("Authenticating against %s://%s as %s\%s SUCCEED" % (
self.target.scheme, self.target.netloc, authenticateMessage['domain_name'].decode('ascii'),
authenticateMessage['user_name'].decode('ascii')))
ntlm_hash_data = outputToJohnFormat(self.challengeMessage['challenge'],
authenticateMessage['user_name'],
authenticateMessage['domain_name'],
authenticateMessage['lanman'], authenticateMessage['ntlm'])
self.client.sessionData['JOHN_OUTPUT'] = ntlm_hash_data
if self.server.config.outputFile is not None:
writeJohnOutputToFile(ntlm_hash_data['hash_string'], ntlm_hash_data['hash_version'], self.server.config.outputFile)
self.server.config.target.logTarget(self.target, True, self.authUser)
self.do_attack()
# And answer 404 not found
self.send_response(404)
self.send_header('WWW-Authenticate', 'NTLM')
self.send_header('Content-type', 'text/html')
self.send_header('Content-Length','0')
self.send_header('Connection','close')
self.end_headers()
return
def do_ntlm_negotiate(self, token, proxy):
if self.server.config.protocolClients.has_key(self.target.scheme.upper()):
self.client = self.server.config.protocolClients[self.target.scheme.upper()](self.server.config, self.target)
# If connection failed, return
if not self.client.initConnection():
return False
self.challengeMessage = self.client.sendNegotiate(token)
# Check for errors
if self.challengeMessage is False:
return False
else:
LOG.error('Protocol Client for %s not found!' % self.target.scheme.upper())
return False
#Calculate auth
self.do_AUTHHEAD(message = 'NTLM '+base64.b64encode(self.challengeMessage.getData()), proxy=proxy)
return True
def do_ntlm_auth(self,token,authenticateMessage):
#For some attacks it is important to know the authenticated username, so we store it
if authenticateMessage['flags'] & ntlm.NTLMSSP_NEGOTIATE_UNICODE:
self.authUser = ('%s/%s' % (authenticateMessage['domain_name'].decode('utf-16le'),
authenticateMessage['user_name'].decode('utf-16le'))).upper()
else:
self.authUser = ('%s/%s' % (authenticateMessage['domain_name'].decode('ascii'),
authenticateMessage['user_name'].decode('ascii'))).upper()
if authenticateMessage['user_name'] != '' or self.target.hostname == '127.0.0.1':
clientResponse, errorCode = self.client.sendAuth(token)
else:
# Anonymous login, send STATUS_ACCESS_DENIED so we force the client to send his credentials, except
# when coming from localhost
errorCode = STATUS_ACCESS_DENIED
if errorCode == STATUS_SUCCESS:
return True
return False
def do_attack(self):
# Check if SOCKS is enabled and if we support the target scheme
if self.server.config.runSocks and self.target.scheme.upper() in self.server.config.socksServer.supportedSchemes:
# Pass all the data to the socksplugins proxy
activeConnections.put((self.target.hostname, self.client.targetPort, self.target.scheme.upper(),
self.authUser, self.client, self.client.sessionData))
return
# If SOCKS is not enabled, or not supported for this scheme, fall back to "classic" attacks
if self.target.scheme.upper() in self.server.config.attacks:
# We have an attack.. go for it
clientThread = self.server.config.attacks[self.target.scheme.upper()](self.server.config, self.client.session,
self.authUser)
clientThread.start()
else:
LOG.error('No attack configured for %s' % self.target.scheme.upper())
def __init__(self, config):
Thread.__init__(self)
self.daemon = True
self.config = config
self.server = None
def run(self):
LOG.info("Setting up HTTP Server")
if self.config.listeningPort:
httpport = self.config.listeningPort
else:
httpport = 80
# changed to read from the interfaceIP set in the configuration
self.server = self.HTTPServer((self.config.interfaceIp, httpport), self.HTTPHandler, self.config)
try:
self.server.serve_forever()
except KeyboardInterrupt:
pass
LOG.info('Shutting down HTTP Server')
self.server.server_close()
| 47.904762 | 150 | 0.576106 |
53cd8532155ba48eaa77f1c1d3992b5e4b6d876a | 423 | py | Python | sweets/migrations/0011_sweetshop_email.py | adityapandadev/FoodMaster | ac3de44964bc935d5fa669e3554f693e90813148 | [
"MIT"
] | null | null | null | sweets/migrations/0011_sweetshop_email.py | adityapandadev/FoodMaster | ac3de44964bc935d5fa669e3554f693e90813148 | [
"MIT"
] | null | null | null | sweets/migrations/0011_sweetshop_email.py | adityapandadev/FoodMaster | ac3de44964bc935d5fa669e3554f693e90813148 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.3 on 2020-04-19 09:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sweets', '0010_auto_20200418_1307'),
]
operations = [
migrations.AddField(
model_name='sweetshop',
name='email',
field=models.EmailField(blank=True, max_length=254, null=True, unique=True),
),
]
| 22.263158 | 88 | 0.612293 |
23f0e2fdd5745ff4feb8c882a904eaf93d38eced | 1,730 | py | Python | HW4/getIrisData.py | serkanbodur/Deep-Learning-Basics | 10ec82d51523ec5f1d4a422b750f23331bccb4b5 | [
"MIT"
] | null | null | null | HW4/getIrisData.py | serkanbodur/Deep-Learning-Basics | 10ec82d51523ec5f1d4a422b750f23331bccb4b5 | [
"MIT"
] | null | null | null | HW4/getIrisData.py | serkanbodur/Deep-Learning-Basics | 10ec82d51523ec5f1d4a422b750f23331bccb4b5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 19 19:25:59 2019
@author: Mysia
"""
import scipy.io
import numpy as np
irisData = scipy.io.loadmat('irisData.mat')
stack = scipy.io.loadmat('stack.mat')
testSet = scipy.io.loadmat('testSet.mat')
trainingSet = scipy.io.loadmat('trainingSet.mat')
y1 = scipy.io.loadmat('y1.mat')
y2 = scipy.io.loadmat('y2.mat')
#Arrive all datas in irisData dictionary
#for key,val in irisData.items():
# print (key, "=>", val)
#irisDatas=irisData.keys()
#print (irisDatas)
def getIrisData():
for X in irisData:
irisDatas = irisData['X']
# print (irisDatas)
#
#
#itemas=irisData.items()
#print(itemas)
#
trainingSet=np.zeros((5,120),'float64')
testSet=np.zeros((5,30),'float64')
y1=np.zeros((120,3),'float64')
y2=np.zeros((30,3),'float64')
x=np.transpose(irisDatas)
trainingSet[0:4,0:40] = x[:,0:40]
trainingSet[0:4,41:80] = x[:,51:90]
trainingSet[0:4,81:120] = x[:,101:140]
trainingSet[4,:]=1
testSet[0:4,0:10] = x[:,40:50]
testSet[0:4,11:20] = x[:,91:100]
testSet[0:4,21:30] = x[:,141:150]
testSet[4,:]=1
for i in range(0,40,1):
y1[i,:]=[1,0,0]
y1[i+40,:]=[0,1,0]
y1[i+80,:]=[0,0,1]
for i in range(0,10,1):
y2[i,:]=[1,0,0]
y2[i+10,:]=[0,1,0]
y2[i+20,:]=[0,0,1]
np.save('trainingSet', trainingSet)
np.save('testSet', testSet)
np.save('y1', y1)
np.save('y2', y2)
return trainingSet, testSet, y1, y2 | 25.441176 | 50 | 0.50289 |
1e372bd234a2fcfa94819f51892175747a3c8475 | 1,731 | py | Python | pyts/stressModels/main.py | JensGM/pyTurbSim | 89357e94607e99e5a27e069975684fe8e6a89a9d | [
"Apache-2.0"
] | 8 | 2017-07-24T23:01:28.000Z | 2021-12-06T04:31:23.000Z | pyts/stressModels/main.py | camilledub/pyTurbSim | 69db0d7670afe5b2b386087aa0229a7511ef070d | [
"Apache-2.0"
] | 9 | 2015-04-06T16:01:06.000Z | 2020-06-16T16:51:49.000Z | pyts/stressModels/main.py | camilledub/pyTurbSim | 69db0d7670afe5b2b386087aa0229a7511ef070d | [
"Apache-2.0"
] | 17 | 2015-04-22T15:27:28.000Z | 2021-11-20T08:10:16.000Z | from .base import stressModelBase, stressObj
class uniform(stressModelBase):
"""Uniform Reynold's stress model.
In this model each component of the Reynold's stress can be
specified explicitly, but the values are uniform in space.
Parameters
----------
upvp_ : float
The u'v' component of Reynold's stress.
upwp_ : float
The u'w' component of Reynold's stress.
vpwp_ : float
The v'w' component of Reynold's stress.
"""
def __init__(self, upvp_=0.0, upwp_=0.0, vpwp_=0.0):
"""
Set the Reynold's stresses to be uniform over the rotor disk.
"""
self.vals = [upvp_, upwp_, vpwp_]
def _sumfile_string(self, tsrun, ):
sumstring_format = """
Stress model used = {dat.model_desc}
u'v' = {dat.vals[0]:0.4g} [m^2/s^2]
u'w' = {dat.vals[1]:0.4g} [m^2/s^2]
v'w' = {dat.vals[2]:0.4g} [m^2/s^2]
"""
return sumstring_format.format(dat=self, )
def __call__(self, tsrun):
"""
Create and calculate the stress object for a `tsrun`
instance.
Parameters
----------
tsrun : :class:`.tsrun`
A TurbSim run object.
Returns
-------
out : :class:`.stressObj`
A stress object for the grid in `tsrun`.
"""
out = stressObj(tsrun)
out.upvp_[:] = self.vals[0]
out.upwp_[:] = self.vals[1]
out.vpwp_[:] = self.vals[2]
return out
| 29.844828 | 88 | 0.476603 |
193d2033e69b69708089eae4d287a202b7f389a6 | 405 | py | Python | Stereo/TestStereo.py | mirrorcoloured/slcypi | c47975b3523f770d12a521c82e2dfca181e3f35b | [
"MIT"
] | null | null | null | Stereo/TestStereo.py | mirrorcoloured/slcypi | c47975b3523f770d12a521c82e2dfca181e3f35b | [
"MIT"
] | null | null | null | Stereo/TestStereo.py | mirrorcoloured/slcypi | c47975b3523f770d12a521c82e2dfca181e3f35b | [
"MIT"
] | null | null | null | import numpy as np
import cv2
from matplotlib import pyplot as plt
imgL = cv2.imread('imL.png',0)
imgR = cv2.imread('imR.png',0)
print imgL.shape, imgR.shape
# Print class
print(type(imgL).__name__)
# Show image DOESNT WORK
plt.imshow(imgL,'gray')
plt.show()
stereo = cv2.StereoBM_create(numDisparities=16, blockSize=15)
disparity = stereo.compute(imgL,imgR)
plt.imshow(disparity,'gray')
plt.show()
| 19.285714 | 61 | 0.745679 |
f3f3d248a3d133d2d1714cd41d79f24320d3a487 | 78,549 | py | Python | oldversions/V14.py | outkine/Ascension | 1b942a853558cec2c94a312936bf2bbae3b1ffda | [
"MIT"
] | 1 | 2017-10-27T14:35:36.000Z | 2017-10-27T14:35:36.000Z | oldversions/V14.py | Jetmate/Ascension | 1b942a853558cec2c94a312936bf2bbae3b1ffda | [
"MIT"
] | null | null | null | oldversions/V14.py | Jetmate/Ascension | 1b942a853558cec2c94a312936bf2bbae3b1ffda | [
"MIT"
] | null | null | null | from math import sqrt
import pygame as pygame
from pygame.locals import *
pygame.init()
def get_list(l, indexes):
return [l[i] for i in indexes]
def polarity(n):
if n == 0:
return n
return n / abs(n)
def combine_lists(l1, l2, sign):
l1 = list(l1)
for i in range(2):
if sign == '+':
l1[i] += l2[i]
elif sign == '-':
l1[i] -= l2[i]
elif sign == '*':
l1[i] *= l2[i]
else:
l1[i] /= l2[i]
return l1
def opposite(n):
return abs(n - 1)
def make_tuple(thing):
if type(thing) not in (list, tuple):
# noinspection PyRedundantParentheses
return (thing,)
return thing
def find_center(d1, d2, c1=(0, 0)):
return [(c1[i] + (d1[i] / 2 - d2[i] / 2)) for i in range(2)]
def collision(c1, d1, c2, d2, inside_only=False):
d1 = list(d1)
d2 = list(d2)
collisions = [False, False]
for i in range(2):
d1[i] -= 1
d2[i] -= 1
if (c1[i] <= c2[i] and c1[i] + d1[i] >= c2[i] + d2[i]) or \
(c1[i] >= c2[i] and c1[i] + d1[i] <= c2[i] + d2[i]):
collisions[i] = True
if not inside_only:
if (c2[i] <= c1[i] <= c2[i] + d2[i]) or \
(c2[i] <= c1[i] + d1[i] <= c2[i] + d2[i]):
collisions[i] = True
if False not in collisions:
return True
elif True in collisions:
return collisions.index(False)
class SpriteSheet:
def __init__(self, filename, division_index=1):
self.sheet = pygame.image.load(filename).convert_alpha()
self.division_index = division_index
self.farthest_y_coordinate = 0
def get_image(self, coordinates, dimensions):
# noinspection PyArgumentList
image = pygame.Surface(dimensions, SRCALPHA).convert_alpha()
image.blit(self.sheet, (0, 0), (coordinates, dimensions))
return image
def get_sprites(self, starting_x_coordinate=0, farthest_y_coordinate=None, all_dimensions=None, y_constant=None,
x_constant=None, update=True, scale=3, block_number=None, dimensions=None):
sprites = []
if block_number:
y_constant = game.block_size
x_constant = (game.block_size, block_number)
elif dimensions:
y_constant = dimensions[1]
x_constant = (dimensions[0], 1)
if not farthest_y_coordinate:
farthest_y_coordinate = self.farthest_y_coordinate
if x_constant:
thing = x_constant[1]
else:
thing = len(all_dimensions)
farthest_x_coordinate = starting_x_coordinate
for i in range(thing):
coordinates = [0, 0]
coordinates[opposite(self.division_index)] = farthest_x_coordinate
coordinates[self.division_index] = farthest_y_coordinate
dimensions = [0, 0]
if x_constant or y_constant:
if x_constant:
dimensions[opposite(self.division_index)] = x_constant[0]
else:
dimensions[opposite(self.division_index)] = all_dimensions[i]
if y_constant:
dimensions[self.division_index] = y_constant
else:
dimensions[self.division_index] = all_dimensions[i]
else:
dimensions = all_dimensions[i]
farthest_x_coordinate += dimensions[opposite(self.division_index)]
sprite = self.get_image(coordinates, dimensions)
if scale:
sprite = pygame.transform.scale(sprite, combine_lists(sprite.get_size(),
(scale, scale), '*'))
sprites.append(sprite)
if update:
if y_constant:
self.farthest_y_coordinate += y_constant
elif x_constant:
self.farthest_y_coordinate += max(all_dimensions)
else:
self.farthest_y_coordinate += max(
[dimensions[self.division_index] for dimensions in all_dimensions])
return sprites
class Quadratic:
def __init__(self, sign, y_range, speed):
self.a = sign
self.y_range = y_range
if (sign == 1 and self.y_range[0] > self.y_range[1]) or (sign == -1 and self.y_range[1] > self.y_range[0]):
x_solution_index = 0
else:
x_solution_index = 1
self.c = self.y_range[opposite(x_solution_index)]
self.x_range = [self.get_x(self.y_range[i])[x_solution_index] for i in range(2)]
self.x_change = (self.x_range[1] - self.x_range[0]) / speed
self.reset()
def execute(self):
self.current_x += self.x_change
if self.current_x - .01 > self.x_range[1]:
# noinspection PyRedundantParentheses
return (self.y_change,)
current_y = int(self.get_y(self.current_x))
self.y_change = current_y - self.old_y
self.old_y = current_y
return self.y_change
def get_y(self, x):
return self.a * x ** 2 + self.c
def get_x(self, y):
x = sqrt((-self.c + y) / self.a)
return sorted((-x, x))
def reset(self):
self.current_x = self.x_range[0]
self.old_y = self.y_range[0]
class Game:
def __init__(self, speed, dimensions, scale_factor, block_size, version):
self.speed = speed
self.real_speed = self.speed
self.dimensions = dimensions
self.scale_factor = scale_factor
self.block_size = block_size
self.version = version
self.clock = pygame.time.Clock()
self.display = pygame.display.set_mode(self.dimensions)
self.movement_keys = (K_RIGHT, K_LEFT, K_d, K_a)
self.font = pygame.font.SysFont("Calibri", 100)
self.count = 0
def blit(self, thing):
self.display.blit(thing.current_sprite(),
player.generate_display_coordinates(thing.coordinates))
class Background:
def __init__(self, sprites, level_maps,
block_color_values, block_offsets,
default_block_info, block_info,
block_types,
level_transition_height, level_transition_speed,
menu_options, menu_heights, menu_selectable_options,
selecter_gap, title_transition_time,
switch_pairs, difficulty,
level_number_margins, level_number_gap):
self.sprites = sprites
self.level_maps = level_maps
self.block_color_values = block_color_values
self.block_offsets = block_offsets
for offset_name in self.block_offsets:
if offset_name not in ('entrance_background', 'exit_background'):
self.block_offsets[offset_name] = combine_lists(block_offsets[offset_name],
(game.scale_factor, game.scale_factor), '*')
self.default_block_info = default_block_info
self.block_info = block_info
self.block_size = game.scale_factor * game.block_size
self.block_types = block_types
self.level_transition_coordinates = [
(player.default_fake_coordinates[1], player.default_fake_coordinates[1] + level_transition_height),
(player.default_fake_coordinates[1] - level_transition_height, player.default_fake_coordinates[1])
]
self.level_transition_quadratics = [
Quadratic(sign, self.level_transition_coordinates[i], level_transition_speed)
for sign, i in zip((1, -1), (0, 1))
]
self.level_transition_phase = None
self.menu_coordinates = {}
previous_height = 0
for name in menu_options:
height = previous_height + menu_heights[name]
previous_height = height + self.sprites[name][0].get_height()
self.menu_coordinates[name] = (find_center(game.dimensions, self.sprites[name][0].get_size())[0], height)
self.menu_selectable_options = menu_selectable_options
self.selecter_position_index = 0
self.selecter_gap = selecter_gap * game.scale_factor
self.update_selecter_coordinates()
self.title_coordinates = (find_center(game.dimensions, self.sprites['title'][0].get_size()))
self.title_transition_speed = (self.title_coordinates[1] - self.menu_coordinates['title'][
1]) / title_transition_time
self.switch_pairs = switch_pairs
self.difficulty = difficulty
self.level_number_margins = level_number_margins
self.level_number_gap = level_number_gap
self.number_width = self.sprites['number'][0].get_width()
self.condition = 'title'
self.phase = 'menu'
self.level = 0
self.block_color = self.sprites['block'][0].get_at((0, 0))
self.background_color = self.sprites['background'][0].get_at((0, 0))
self.directions = ((-1, 0), (0, -1), (1, 0), (0, 1))
def update_selecter_coordinates(self):
self.selecter_coordinates = (
self.menu_coordinates[self.menu_selectable_options[self.selecter_position_index]][0] -
self.sprites['selecter'][0].get_width() - self.selecter_gap,
find_center(
self.sprites[self.menu_selectable_options[self.selecter_position_index]][0].get_size(),
self.sprites['selecter'][0].get_size(),
self.menu_coordinates[self.menu_selectable_options[self.selecter_position_index]]
)[1]
)
def initiate_level_transition(self):
self.level_transition_phase = 0
for quadratic in self.level_transition_quadratics:
quadratic.reset()
def block_type(self, grid_coordinates):
if grid_coordinates in self.blocks:
return self.blocks[grid_coordinates].kind
def convert_from_grid(self, grid_coordinates):
return combine_lists(grid_coordinates, (self.block_size, self.block_size), '*')
def convert_to_grid(self, coordinates):
return tuple([int((coordinates[i] - (coordinates[i] % self.block_size)) / self.block_size) for i in range(2)])
def find_all_grid_coordinates(self, coordinates, dimensions):
start = self.convert_to_grid(coordinates)
end = self.convert_to_grid(combine_lists(combine_lists(coordinates, dimensions, '+'), (1, 1), '-'))
all_coordinates = []
for x in range(start[0], end[0] + 1):
for y in range(start[1], end[1] + 1):
all_coordinates.append((x, y))
return all_coordinates
def get_color(self, coordinates):
try:
return tuple(self.level_maps[self.difficulty][self.level - 1].get_at(coordinates))
except:
return 0, 0, 0, 0
def update_level(self):
self.analyze_map()
self.player_default_coordinates = [
find_center(self.doors[0].dimensions, player.dimensions,
self.doors[0].coordinates)[0],
self.doors[0].coordinates[1] +
self.doors[0].dimensions[1] -
player.dimensions[1]
]
player.default_coordinates = list(self.player_default_coordinates)
player.total_reset()
player.fake_coordinates = player.default_fake_coordinates
player.update_grid_coordinates()
player.block = self.blocks[
self.convert_to_grid((player.coordinates[0], player.coordinates[1] + player.dimensions[1]))]
def surrounding_blocks(self, initial_coordinates, block_types, blocks, return_coordinates=False):
result = []
for direction in self.directions:
grid_coordinates = tuple(combine_lists(initial_coordinates, direction, '+'))
if grid_coordinates in blocks and blocks[grid_coordinates] in block_types:
if return_coordinates:
result.append(grid_coordinates)
else:
result.append(direction)
return result
def rotate_sprites(self, sprite_type, direction):
rotation = None
if direction == (-1, 0):
rotation = 90
elif direction == (0, 1):
rotation = 180
elif direction == (1, 0):
rotation = 270
if rotation:
sprites = []
for sprite in self.sprites[sprite_type]:
sprites.append(pygame.transform.rotate(sprite, rotation))
else:
sprites = self.sprites[sprite_type]
return sprites
def add_offset(self, coordinates, block_type, direction):
offset = self.block_offsets[block_type]
if direction[0]:
offset = offset[::-1]
return tuple(combine_lists(coordinates, offset, '+'))
def exception(self, exception, coordinates):
raise Exception(exception + ' at {0}'.format((coordinates[0] + (self.level - 1) * 25, coordinates[1])))
def get_block_info(self, block_type, info):
if self.level in self.block_info[self.difficulty] and block_type in self.block_info[self.difficulty][
self.level] and info in \
self.block_info[self.difficulty][self.level][block_type]:
return self.block_info[self.difficulty][self.level][block_type][info]
return self.default_block_info[block_type][info]
def analyze_map(self):
blocks = {}
self.level += 1
entrance = None
self.count = 0
for x in range(self.level_maps[self.difficulty][self.level - 1].get_width()):
for y in range(self.level_maps[self.difficulty][self.level - 1].get_height()):
color = self.get_color((x, y))
if color[3] == 255:
formatted_color = (color[0], color[1])
if formatted_color in self.block_color_values:
block_type = self.block_color_values[formatted_color]
if block_type == 'entrance':
entrance = (x, y)
blocks[(x, y)] = block_type
elif formatted_color != (0, 0):
raise Exception(
"Unidentified block_color {0} at {1}".format(color, (x + (self.level - 1) * 25, y)))
if not entrance:
raise Exception("Missing door")
self.backgrounds = {}
active_coordinates = (entrance,)
while active_coordinates:
all_new_coordinates = []
for coordinates in active_coordinates:
for direction in self.directions:
new_coordinates = tuple(combine_lists(coordinates, direction, '+'))
if new_coordinates not in self.backgrounds and (
new_coordinates not in blocks or blocks[new_coordinates] != 'block'):
self.backgrounds[new_coordinates] = Block(self.sprites['background'],
self.convert_from_grid(
new_coordinates), 'background')
if new_coordinates in blocks and blocks[new_coordinates] in self.block_types['solid']:
continue
all_new_coordinates.append(new_coordinates)
active_coordinates = tuple(all_new_coordinates)
self.blocks = {}
self.cannons = []
self.lasers = []
platforms = []
self.rails = []
self.gate_heads = []
self.alternating_blocks = []
self.platform_tunnel_entrances = []
self.doors = [None, None]
gate_switch = None
gate_head = None
for initial_coordinates in blocks:
block_type = blocks[initial_coordinates]
if block_type in self.block_types['multi_block']:
top_corner = True
for direction in self.surrounding_blocks(initial_coordinates, (block_type,), blocks):
if direction in ((-1, 0), (0, -1)):
top_corner = False
break
if not top_corner:
continue
sprites = None
coordinates = self.convert_from_grid(initial_coordinates)
if block_type in self.block_types['directional']:
direction = self.surrounding_blocks(initial_coordinates, ('platform',), blocks)
if not direction:
for direction_2 in self.directions:
color = self.get_color(tuple(combine_lists(initial_coordinates, direction_2, '+')))
if color[2] == 205 and color[3] == 255:
direction = (direction_2,)
break
if not direction:
direction = [(0, 1)]
direction = list(direction[0])
direction_index = opposite(direction.index(0))
direction[direction_index] *= -1
direction = tuple(direction)
if block_type in ('platform_tunnel_entrance', 'platform_tunnel_exit'):
sprite_type = 'platform_tunnel'
else:
sprite_type = block_type
sprites = self.rotate_sprites(sprite_type, direction)
if block_type in self.block_offsets:
if block_type != 'spikes' or direction in ((-1, 0), (0, -1)):
coordinates = self.add_offset(coordinates, block_type, direction)
if block_type in ('laser', 'cannon', 'gate_head'):
if block_type in ('laser', 'gate_head'):
entity_sprite = self.rotate_sprites(block_type + '_entity', direction)[0]
if block_type == 'laser':
block = EntityBlock(sprites, coordinates, 'laser', entity_sprite, direction,
direction_index)
self.lasers.append(block)
block.end = None
block.active = 0
block.active_duration = self.get_block_info('laser', 'active_duration')
block.inactive_duration = self.get_block_info('laser', 'inactive_duration')
block.frequency = block.active_duration + block.inactive_duration
block.sprite_speed = int(block.inactive_duration / len(block.current_sprites()))
else:
block = GateHead(sprites, coordinates, 'gate_head', entity_sprite, direction,
direction_index,
self.get_block_info('gate_head', 'speed'))
self.gate_heads.append(block)
else:
block = Cannon(sprites, coordinates, self.sprites['cannon_entity'][0], direction,
direction_index,
self.get_block_info('cannon', 'entity_frequency'),
self.get_block_info('cannon', 'entity_speed'))
self.cannons.append(block)
active_coordinates = list(initial_coordinates)
while True:
if tuple(active_coordinates) in blocks and blocks[
tuple(active_coordinates)
] in self.block_types['solid'] and tuple(active_coordinates) != initial_coordinates:
if block_type == 'cannon':
block.last_coordinates = tuple(active_coordinates)
break
if block_type in ('laser', 'gate_head'):
# noinspection PyTypeChecker
entity = Thing(block.entity_sprite, self.add_offset(
self.convert_from_grid(tuple(active_coordinates)), block_type + '_entity',
block.direction))
if block_type == 'laser':
entity.all_grid_coordinates = tuple(active_coordinates)
else:
entity.all_grid_coordinates = (tuple(active_coordinates),)
entity.gate_head = block
block.entities.append(entity)
active_coordinates[block.direction_index] += block.direction[block.direction_index]
else:
block = DirectionBlock(sprites, coordinates, block_type, direction, direction_index)
if block_type in ('fire', 'lava'):
block.transforming = True
elif block_type == 'platform_tunnel_entrance':
self.platform_tunnel_entrances.append(block)
if block_type == 'gate_switch':
gate_switch = block
elif block_type == 'gate_head':
gate_head = block
elif block_type == 'platform':
platforms.append(Block(self.sprites['platform'], coordinates, 'platform'))
continue
else:
if block_type in ('block', 'alternating_block'):
sprites = [sprite.copy() for sprite in self.sprites['block']]
other_sprite = None
if block_type == 'alternating_block':
other_sprite = self.sprites['alternating_block'][0].copy()
sprites.append(other_sprite)
far = self.block_size - game.scale_factor
directions = []
corner_directions = []
for direction in self.directions:
grid_coordinates = tuple(combine_lists(initial_coordinates, direction, '+'))
if (grid_coordinates in blocks and (blocks[
grid_coordinates] not in self.block_types['solid'] or
blocks[
grid_coordinates] in self.block_types[
'partial_solid'])) or (
grid_coordinates not in blocks and grid_coordinates in self.backgrounds):
directions.append(direction)
if grid_coordinates not in blocks or blocks[grid_coordinates] not in self.block_types[
'no_corner']:
corner_directions.append(direction)
rect_coordinates = [0, 0]
rect_size = [0, 0]
direction_index = opposite(direction.index(0))
if direction[direction_index] == 1:
rect_coordinates[direction_index] = far
rect_size[direction_index] = game.scale_factor
rect_size[opposite(direction_index)] = self.block_size
for sprite in sprites:
if sprite == other_sprite:
color = self.get_block_info('alternating_block', 'border_color')
else:
color = (0, 0, 0, 255)
pygame.draw.rect(sprite, color, (rect_coordinates, rect_size))
corner_coordinates = []
if (-1, 0) in corner_directions:
if (0, -1) in corner_directions:
corner_coordinates.append((0, 0))
if (0, 1) in corner_directions:
corner_coordinates.append((0, far))
if (1, 0) in corner_directions:
if (0, -1) in corner_directions:
corner_coordinates.append((far, 0))
if (0, 1) in corner_directions:
corner_coordinates.append((far, far))
for corner in corner_coordinates:
for sprite in sprites:
pygame.draw.rect(sprite, self.background_color,
(corner, (game.scale_factor, game.scale_factor)))
if block_type in self.block_offsets:
coordinates = self.add_offset(coordinates, block_type, (0, 1))
if not sprites:
sprites = self.sprites[block_type]
block = Block(sprites, coordinates, block_type)
if block_type == 'alternating_block':
block.active = 1
block.frequency = self.get_block_info('alternating_block', 'frequency')
self.alternating_blocks.append(block)
elif block_type == 'entrance':
self.doors[0] = block
elif block_type == 'exit':
self.doors[1] = block
for grid_coordinates in block.all_grid_coordinates:
self.blocks[grid_coordinates] = block
if block_type in self.block_types['delay']:
color = self.get_color(initial_coordinates)
if color[2] != 0 and color[2] < 100:
block.delay = color[2]
if block_type == 'alternating_block' and block.delay >= self.get_block_info('alternating_block',
'frequency'):
block.active = opposite(block.active)
block.delay -= self.get_block_info('alternating_block', 'frequency')
else:
block.delay = 0
if gate_switch:
gate_switch.gate_head = gate_head
self.platforms = []
for l in (platforms, self.platform_tunnel_entrances):
for block in l:
repeating = True
other_coordinates = []
if block.kind == 'platform':
start = block.all_grid_coordinates[0]
else:
start = list(block.all_grid_coordinates[0])
start[opposite(block.direction_index)] += 1
other_coordinates.append(tuple(start))
start[block.direction_index] -= 1 * block.direction[block.direction_index]
start = tuple(start)
rails = [start]
current_coordinates = start
while True:
new_coordinates = []
directions = []
for direction in self.directions:
grid_coordinates = tuple(combine_lists(current_coordinates, direction, '+'))
if block.kind == 'platform_tunnel_entrance' and len(other_coordinates) == 1 and \
self.block_type(grid_coordinates) == 'platform_tunnel_exit':
end_block = self.blocks[grid_coordinates]
end = list(end_block.all_grid_coordinates[0])
end[opposite(end_block.direction_index)] += 1
other_coordinates.append(tuple(end))
end[end_block.direction_index] -= 1 * end_block.direction[end_block.direction_index]
other_coordinates.append(tuple(end))
color = self.get_color(grid_coordinates)
if (color[2] == 255 and color[
3] == 255) or grid_coordinates == start or grid_coordinates in other_coordinates or (
grid_coordinates in blocks and blocks[grid_coordinates] == 'platform'):
directions.append(direction)
if grid_coordinates not in rails:
new_coordinates = grid_coordinates
if len(directions) == 1:
sprites = self.rotate_sprites('end_rail', directions[0])
if not new_coordinates:
repeating = False
elif len(directions) == 2:
if ((1, 0) in directions and (-1, 0) in directions) or (
(0, 1) in directions and (0, -1) in directions):
if (0, 1) in directions:
sprites = []
for sprite in self.sprites['straight_rail']:
sprites.append(pygame.transform.rotate(sprite, 90))
else:
sprites = self.sprites['straight_rail']
else:
rotation = None
if (0, 1) in directions:
if (-1, 0) in directions:
rotation = 90
else:
rotation = 180
elif (1, 0) in directions and (0, -1) in directions:
rotation = 270
if rotation:
sprites = []
for sprite in self.sprites['turn_rail']:
sprites.append(pygame.transform.rotate(sprite, rotation))
else:
sprites = self.sprites['turn_rail']
elif len(directions) == 0:
self.exception('Single rail', current_coordinates)
else:
self.exception('Surrounded rail', current_coordinates)
self.rails.append(Thing(sprites, self.convert_from_grid(current_coordinates)))
current_coordinates = new_coordinates
if not new_coordinates:
break
rails.append(new_coordinates)
if block.kind == 'platform':
if repeating:
direction = self.directions[self.get_color(block.all_grid_coordinates[0])[2]]
else:
direction = None
platform = Platform(self.sprites['platform'], rails, self.get_block_info('platform', 'speed'),
repeating, direction)
self.platforms.append(platform)
for grid_coordinates in self.surrounding_blocks(platform.all_grid_coordinates[0],
self.block_types['directional'], blocks, True):
platform.blocks.append(self.blocks[grid_coordinates])
del self.blocks[grid_coordinates]
else:
block.rails = rails
block.frequency = self.get_block_info('platform_tunnel', 'frequency')
block.platforms = []
class Thing:
def __init__(self, sprites, coordinates=(0, 0)):
self.sprites = sprites
self.coordinates = list(coordinates)
self.reset()
self.dimensions = self.current_sprite().get_size()
def update_sprites(self, speed=4, reset=True):
self.sprite_count += 1
if self.sprite_count == speed:
self.sprite_count = 0
if self.sprite_index == len(self.current_sprites()) - 1:
if reset:
self.sprite_index = 0
return 'completed'
self.sprite_index += 1
def current_sprites(self):
return make_tuple(self.sprites)
def current_sprite(self):
return self.current_sprites()[self.sprite_index]
def reset(self):
self.sprite_count = 0
self.sprite_index = 0
class Block(Thing):
def __init__(self, sprites, coordinates, kind):
super().__init__(sprites, coordinates=coordinates)
self.all_grid_coordinates = background.find_all_grid_coordinates(self.coordinates, self.dimensions)
self.kind = kind
self.transforming = False
class DirectionBlock(Block):
def __init__(self, sprites, coordinates, kind, direction, direction_index):
super().__init__(sprites, coordinates, kind)
self.direction = direction
self.direction_index = direction_index
class EntityBlock(DirectionBlock):
def __init__(self, sprites, coordinates, kind, entity_sprite, direction, direction_index):
super().__init__(sprites, coordinates, kind, direction, direction_index)
self.entity_sprite = entity_sprite
self.direction = direction
self.direction_index = opposite(self.direction.index(0))
self.entity_dimensions = self.entity_sprite.get_size()
self.entities = []
class GateHead(EntityBlock):
def __init__(self, sprites, coordinates, kind, entity_sprite, direction, direction_index, speed):
super().__init__(sprites, coordinates, kind, entity_sprite, direction, direction_index)
self.speed = speed * direction[self.direction_index] * game.scale_factor * -1
self.retracting = False
self.final_coordinate = self.coordinates[self.direction_index] - background.block_size * self.direction[
self.direction_index]
def retract(self):
for entity in self.entities:
entity.coordinates[self.direction_index] += self.speed
entity.all_grid_coordinates = background.find_all_grid_coordinates(entity.coordinates, entity.dimensions)
if entity.coordinates[self.direction_index] == self.final_coordinate:
self.entities.remove(entity)
if len(self.entities) == 0:
self.retracting = False
class Cannon(EntityBlock):
def __init__(self, sprites, coordinates, entity_sprite, direction, direction_index, entity_frequency,
entity_speed):
super().__init__(sprites, coordinates, 'cannon', entity_sprite, direction, direction_index)
self.entity_speed = entity_speed * self.direction[self.direction_index] * game.scale_factor
self.update_initial_coordinates()
self.entity_frequency = entity_frequency
def update_initial_coordinates(self):
if self.direction[self.direction_index] == 1:
entity_x = self.coordinates[self.direction_index]
else:
entity_x = self.coordinates[self.direction_index] + self.dimensions[self.direction_index] - \
self.entity_dimensions[self.direction_index]
self.entity_initial_coordinates = find_center(self.dimensions, self.entity_dimensions, self.coordinates)
self.entity_initial_coordinates[self.direction_index] = entity_x
class Platform(Block):
def __init__(self, sprites, rails, speed, repeating, direction):
self.speed = speed * game.scale_factor
self.repeating = repeating
self.rail_direction = -1
if self.repeating:
if tuple(combine_lists(rails[1], rails[0], '-')) == direction:
self.rail_direction = 1
self.end_index = 1
else:
self.end_index = 0
self.rail_coordinates = [background.convert_from_grid(rail) for rail in rails]
self.current_rail_number = 0
self.blocks = [self]
self.ends = (0, len(self.rail_coordinates) - 1)
super().__init__(sprites, self.rail_coordinates[self.current_rail_number], 'moving_platform')
def move(self):
if self.coordinates in self.rail_coordinates:
if (not self.repeating and self.current_rail_number in self.ends) or (
self.repeating and self.current_rail_number == self.ends[self.end_index]):
if self.repeating:
self.current_rail_number = self.ends[opposite(self.end_index)] - self.rail_direction
else:
self.rail_direction *= -1
self.current_rail_number += self.rail_direction
self.direction = [int(polarity(self.rail_coordinates[self.current_rail_number][i] - self.coordinates[i]))
for i
in range(2)]
self.direction_index = opposite(self.direction.index(0))
for block in self.blocks:
block.coordinates[self.direction_index] += self.speed * self.direction[self.direction_index]
block.all_grid_coordinates = background.find_all_grid_coordinates(block.coordinates, block.dimensions)
class Mob(Thing):
def __init__(self, sprites, conditions_info, coordinates=list((0, 0)),
default_sprite_type='stagnant', visible_direction=True, gravity=(0, 1)):
self.conditions_info = conditions_info
self.default_coordinates = coordinates
self.default_sprite_type = default_sprite_type
self.visible_direction = visible_direction
self.default_gravity = gravity
self.current_sprite_type = self.default_sprite_type
super().__init__(sprites, coordinates)
self.total_reset()
self.velocity = [0, 0]
self.direction = 1
def current_sprites(self):
if self.visible_direction:
return make_tuple(self.sprites[self.direction][self.current_sprite_type])
else:
return make_tuple(self.sprites[self.current_sprite_type])
def reset(self, conditions=None):
super().reset()
if conditions:
for condition in conditions:
self.conditions[condition] = False
def total_reset(self):
self.reset()
self.current_sprite_type = self.default_sprite_type
self.velocity = [0, 0]
self.conditions = {name: self.conditions_info[name]['active'] for name in self.conditions_info}
self.coordinates = list(self.default_coordinates)
self.gravity = list(self.default_gravity)
self.gravity_index = opposite(self.gravity.index(0))
def update_grid_coordinates(self, velocity=True):
if velocity:
coordinates = combine_lists(self.coordinates, self.velocity, '+')
else:
coordinates = self.coordinates
self.all_grid_coordinates = background.find_all_grid_coordinates(coordinates, self.dimensions)
class Player(Mob):
def __init__(self, sprites, conditions_info):
super().__init__(sprites, conditions_info, visible_direction=False)
self.default_fake_coordinates = find_center(game.dimensions, self.dimensions)
self.fake_coordinates = [0, 0]
self.gravity_switch = None
def generate_display_coordinates(self, coordinates):
return combine_lists(
combine_lists(coordinates, self.fake_coordinates, '+'),
self.coordinates, '-')
def die(self):
self.current_sprite_type = 'dying'
background.condition = 'reset'
if self.gravity_switch:
self.gravity_switch.sprite_index = 0
self.gravity_switch = None
self.block = None
def process_collision(self, thing):
for i in range(2):
velocity = [0, 0]
velocity[i] = self.velocity[i]
if collision(combine_lists(self.coordinates, velocity, '+'), self.dimensions,
thing.coordinates, thing.dimensions) is True:
player.align_velocity(thing, i)
def align_velocity(self, thing, i):
if i == player.gravity_index:
self.reset(('jumping', 'falling'))
if (self.gravity[self.gravity_index] == 1 and self.velocity[i] > 0) or (
self.gravity[self.gravity_index] == -1 and self.velocity[i] < 0):
self.block = thing
if self.velocity[i] > 0:
self.velocity[i] = thing.coordinates[i] - (
self.coordinates[i] + self.dimensions[i])
elif self.velocity[i] < 0:
self.velocity[i] = (thing.coordinates[i] + thing.dimensions[i]) - \
self.coordinates[i]
else:
self.die()
self.velocity = [0, 0]
def set_gravity(self, thing):
if self.gravity_switch:
self.gravity_switch.sprite_index = 0
self.gravity_switch = thing
thing.sprite_index = 1
self.gravity = list(thing.direction)
self.gravity_index = thing.direction_index
self.gravity[self.gravity_index] *= -1
# sheet input
game = Game(30, (1200, 900), 3, 10, 14)
block_sheet = SpriteSheet('block_sheet_V{0}.png'.format(game.version))
misc_sheet = SpriteSheet('misc_sheet_V{0}.png'.format(game.version))
map_sheet = SpriteSheet('map_sheet_V{0}.png'.format(game.version))
# background_map_sheet = SpriteSheet('Ivan\'s_levels.png', 1)
background_maps = map_sheet.get_sprites(y_constant=25, x_constant=(25, 30), scale=False)
background_maps_hard = map_sheet.get_sprites(y_constant=25, x_constant=(25, 30), scale=False)
# sprite input
single_block_names = (
'block', 'background', 'cannon', 'platform', 'straight_rail', 'turn_rail', 'end_rail', 'alternating_block')
single_blocks = block_sheet.get_sprites(block_number=len(single_block_names))
background_sprites = {name: [single_blocks[single_block_names.index(name)]]
for name in single_block_names}
misc_names = ('gate_head_entity', 'laser_entity', 'cannon_entity', 'spikes')
misc = block_sheet.get_sprites(all_dimensions=((6, 10), (4, 10), (4, 4), (10, 3)))
misc_sprites = {name: [misc[misc_names.index(name)]] for name in misc_names}
background_sprites.update(misc_sprites)
background_doors = block_sheet.get_sprites(y_constant=12, x_constant=(16, 10))
background_sprites['door_background'] = [background_doors[-1]]
del background_doors[-1]
background_sprites['entrance'] = background_doors
background_sprites['exit'] = background_doors[::-1]
background_sprites['checkpoint'] = block_sheet.get_sprites(y_constant=10, x_constant=(8, 3))
background_sprites['gate_head'] = block_sheet.get_sprites(block_number=2)
background_sprites['gravity_switch'] = block_sheet.get_sprites(block_number=2)
background_sprites['laser'] = block_sheet.get_sprites(block_number=4) # y_constant=10, x_constant=(8, 4))
player_sprites = block_sheet.get_sprites(block_number=8)
background_sprites['gate_switch'] = block_sheet.get_sprites(y_constant=10, x_constant=(9, 4))
background_sprites['lava'] = block_sheet.get_sprites(block_number=9)
background_sprites['fire'] = block_sheet.get_sprites(block_number=4)
background_sprites['platform_tunnel'] = block_sheet.get_sprites(all_dimensions=((30, 10),))
# background_sprites['number'] = misc_sheet.get_sprites(y_constant=40, x_constant=(20, 10))
background_sprites['number'] = misc_sheet.get_sprites(y_constant=7, x_constant=(7, 10))
background_sprites['title'] = misc_sheet.get_sprites(dimensions=(81, 31), scale=4)
player_option_sprites = misc_sheet.get_sprites(y_constant=7, x_constant=(61, 2))
background_sprites['1 player'] = [player_option_sprites[0]]
background_sprites['2 player'] = [player_option_sprites[1]]
background_sprites['selecter'] = misc_sheet.get_sprites(dimensions=(4, 5), scale=4)
for sprites in background_sprites:
if type(background_sprites[sprites]) != list:
raise Exception('Non-list sprite')
# sprite processing
player_sprites = {
'stagnant': player_sprites[0],
'dying': get_list(player_sprites, range(1, 8))
}
player = Player(player_sprites, {
'moving': {'active': False, 'velocity': int(1.7 * game.scale_factor)},
'jumping': {'active': False, 'velocity': 30 * game.scale_factor, 'quadratic': None, 'speed': .5 * game.speed},
'falling': {'active': False, 'velocity': 60 * game.scale_factor, 'quadratic': None, 'speed': .7 * game.speed},
})
def convert_to_color(number, base):
b = int(number / base)
number -= (b * base)
factor = 255 / (base - 1)
return int(number * factor), int(b * factor)
block_names = (
'block', 'lava', 'fire', 'checkpoint', 'cannon', 'entrance', 'exit', 'laser', 'gate_head', 'gate_switch',
'gravity_switch', 'platform', 'spikes', 'platform_tunnel_entrance',
'platform_tunnel_exit', 'alternating_block'
)
background_color_values = {color: name for color, name in zip(
[convert_to_color(number, 16) for number in
range(1, len(block_names) + 1)
], block_names)}
background_offsets = {
'checkpoint': (1, 0),
'laser_entity': (3, 0),
'entrance': (2, 8),
'exit': (2, 8),
'gate_head_entity': (2, 0),
'spikes': (0, 7)
}
default_background_block_info = {
'platform': {'speed': 1},
'cannon': {'entity_speed': 2, 'entity_frequency': 1.6 * game.speed},
'laser': {'inactive_duration': 2 * game.speed, 'active_duration': 1 * game.speed},
'gate_head': {'speed': 1},
'alternating_block': {'frequency': 2 * game.speed, 'border_color': (77, 64, 64, 255)},
'platform_tunnel': {'frequency': 1.5 * game.speed, 'platform_offset': 3}
}
background_block_info = {
'easy': {
},
'hard': {
}
}
background_block_types = {
'solid': ('block', 'cannon', 'gate_head', 'gravity_switch', 'laser',
'alternating_block', 'platform_tunnel_entrance', 'platform_tunnel_exit', 'moving_platform'),
'partial_solid': (),
'foreground': ('fire', 'lava', 'spikes'),
'directional': (
'lava', 'fire', 'checkpoint', 'cannon', 'laser', 'gate_head', 'gate_switch', 'gravity_switch', 'spikes',
'platform_tunnel_entrance', 'platform_tunnel_exit'),
'dangerous': ('lava', 'fire', 'spikes'),
'multi_block': ('exit', 'entrance', 'platform_tunnel_entrance', 'platform_tunnel_exit'),
'no_corner': ('spikes', 'fire', 'lava'),
'delay': ('alternating_block', 'laser', 'cannon', 'platform_tunnel_entrance')
}
background_maps = {'easy': background_maps, 'hard': background_maps_hard}
background_switch_pairs = None
background = Background(background_sprites, background_maps,
background_color_values, background_offsets,
default_background_block_info, background_block_info,
background_block_types,
750 + player.fake_coordinates[1], 1.5 * game.speed,
('title', '1 player', '2 player'), {'title': 300, '1 player': 50, '2 player': 25},
('1 player', '2 player'), 3, 1.5 * game.speed,
background_switch_pairs, 'easy',
[10, 10], game.scale_factor,
)
# developer tools
#
background.level = 30
background.level -= 1
background.condition = None
background.update_level()
background.doors[0].sprite_index = len(background.doors[0].current_sprites()) - 1
background.phase = 'levels'
# for block in background.blocks:
# if background.blocks[block].kind == 'checkpoint':
# player.coordinates = background.convert_from_grid(background.blocks[block].all_grid_coordinates[0])
# break
# player.coordinates = background.convert_from_grid(background.doors[1].all_grid_coordinates[0])
while True:
print(player.coordinates)
events = pygame.event.get()
# print(events)
for event in events:
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_SPACE):
pygame.quit()
quit()
elif event.type == KEYDOWN:
if event.key == K_z:
if game.real_speed == 30:
game.real_speed = 1
else:
game.real_speed = 30
elif event.key == K_x:
# player.coordinates = background.convert_from_grid((5, 14))
if game.real_speed == 30:
game.real_speed = 100
else:
game.real_speed = 30
if background.level_transition_phase is not None:
result = background.level_transition_quadratics[background.level_transition_phase].execute()
if type(result) == tuple:
if background.level_transition_phase == 0:
background.update_level()
background.phase = 'levels'
player.fake_coordinates[1] = background.level_transition_coordinates[1][0]
background.level_transition_phase = 1
else:
background.condition = 'doors_opening'
background.level_transition_phase = None
continue
else:
player.fake_coordinates[1] += result
if background.phase == 'menu':
if background.condition == 'title':
game.display.fill(background.block_color)
game.display.blit(background.sprites['title'][0], background.title_coordinates)
pygame.display.update()
pygame.time.wait(1000)
background.condition = 'title_transition'
continue
elif background.condition == 'title_transition':
background.title_coordinates[1] -= background.title_transition_speed
if round(background.title_coordinates[1]) <= background.menu_coordinates['title'][1]:
background.condition = 'menu'
continue
elif background.condition == 'menu':
for event in events:
if event.type == KEYDOWN:
if event.key == K_DOWN:
if background.selecter_position_index != len(background.menu_selectable_options) - 1:
background.selecter_position_index += 1
background.update_selecter_coordinates()
elif event.key == K_UP:
if background.selecter_position_index != 0:
background.selecter_position_index -= 1
background.update_selecter_coordinates()
elif event.key == K_RETURN:
if background.menu_selectable_options[background.selecter_position_index] == '1 player':
background.condition = None
background.initiate_level_transition()
break
if background.level_transition_phase:
continue
game.display.fill(background.block_color)
if background.condition != 'title_transition':
for option in background.menu_coordinates:
game.display.blit(background.sprites[option][0], (
player.generate_display_coordinates(background.menu_coordinates[option])))
game.display.blit(background.sprites['selecter'][0],
player.generate_display_coordinates(background.selecter_coordinates))
else:
game.display.blit(background.sprites['title'][0], background.title_coordinates)
else:
if background.condition:
if background.condition == 'reset':
if player.update_sprites(3) == 'completed':
player.total_reset()
background.condition = None
continue
elif background.condition == 'doors_closing':
if background.doors[1].update_sprites(4, False) == 'completed':
background.initiate_level_transition()
background.condition = 'transitioning'
continue
elif background.condition == 'doors_opening':
if background.doors[0].update_sprites(4, False) == 'completed':
background.condition = None
continue
for platform_tunnel_entrance in background.platform_tunnel_entrances:
for platform in platform_tunnel_entrance.platforms:
if platform.coordinates in platform.rail_coordinates and platform.current_rail_number in platform.ends:
platform_tunnel_entrance.platforms.remove(platform)
background.platforms.remove(platform)
break
if (game.count - platform_tunnel_entrance.delay) % platform_tunnel_entrance.frequency == 0:
platform = Platform(background.sprites['platform'], platform_tunnel_entrance.rails,
background.get_block_info('platform', 'speed'), False, None)
platform_tunnel_entrance.platforms.append(platform)
background.platforms.append(platform)
for platform in background.platforms:
platform.move()
for gate_head in background.gate_heads:
if gate_head.retracting:
gate_head.retract()
for alternating_block in background.alternating_blocks:
if (game.count - alternating_block.delay) % alternating_block.frequency == 0:
alternating_block.sprite_index = alternating_block.active
alternating_block.active = opposite(alternating_block.active)
if alternating_block.active == 0:
del background.blocks[alternating_block.all_grid_coordinates[0]]
if player.block == alternating_block:
player.block = None
else:
background.blocks[alternating_block.all_grid_coordinates[0]] = alternating_block
if not background.condition:
background.count += 1
player.velocity = [0, 0]
if player.block:
dimensions = list(player.dimensions)
dimensions[player.gravity_index] = 1
coordinates_1 = list(player.coordinates)
if player.gravity[player.gravity_index] == -1:
coordinates_1[player.gravity_index] -= 1
else:
coordinates_1[player.gravity_index] += player.dimensions[player.gravity_index]
coordinates_2 = list(coordinates_1)
coordinates_2[player.gravity_index] -= player.gravity[player.gravity_index]
coordinates = (coordinates_1, coordinates_2)
velocity = [0, 0]
for platform in background.platforms:
if player.block in platform.blocks:
velocity[platform.direction_index] = platform.speed * platform.direction[
platform.direction_index]
break
if type(player.block) == Thing and player.block.gate_head.retracting:
velocity[player.block.gate_head.direction_index] = player.block.gate_head.speed * \
player.block.gate_head.direction[
player.block.gate_head.direction_index] * -1
coordinates = [combine_lists(coordinates[i], velocity, '+') for i in range(2)]
if (
collision(coordinates[0], player.dimensions, player.block.coordinates,
player.block.dimensions) is True and
collision(coordinates[1], dimensions, player.block.coordinates,
player.block.dimensions) is not True
):
player.reset(('jumping', 'falling'))
if velocity != [0, 0]:
player.velocity = velocity
else:
player.block = None
keys = pygame.key.get_pressed()
if keys[K_RIGHT] or keys[K_LEFT] or keys[K_d] or [K_a]:
if not player.conditions['moving']:
player.conditions['moving'] = True
if (keys[K_UP] or keys[K_w]) and player.block and not (
player.conditions['jumping'] or player.conditions['falling']):
player.conditions['jumping'] = True
player.block = None
# noinspection PyTypeChecker
player.conditions_info['jumping']['quadratic'] = Quadratic(-1, (0,
player.conditions_info['jumping'][
'velocity']),
player.conditions_info['jumping'][
'speed'])
if not (keys[K_RIGHT] and keys[K_d]) and player.direction == 1 or not (
keys[K_LEFT] and keys[K_a]) and player.direction == -1:
player.direction = 0
if keys[K_RIGHT] or keys[K_d]:
player.direction = 1
elif keys[K_LEFT] or keys[K_a]:
player.direction = -1
if player.gravity == [1, 0]:
player.direction *= -1
if player.direction == 0:
player.reset(('moving',))
player.direction = 1
if not (player.block or player.conditions['falling'] or player.conditions['jumping']):
player.conditions['falling'] = True
# noinspection PyTypeChecker
player.conditions_info['falling']['quadratic'] = Quadratic(1,
(0,
player.conditions_info['falling'][
'velocity']),
player.conditions_info['falling']['speed'])
for condition in player.conditions:
if player.conditions[condition]:
if condition == 'moving':
# noinspection PyTypeChecker
player.velocity[opposite(player.gravity_index)] += player.conditions_info['moving'][
'velocity'] * player.direction
if condition == 'jumping':
# noinspection PyUnresolvedReferences
result = player.conditions_info['jumping']['quadratic'].execute()
if type(result) == tuple:
player.reset(('jumping',))
else:
player.velocity[player.gravity_index] -= result * player.gravity[player.gravity_index]
elif condition == 'falling':
result = player.conditions_info['falling']['quadratic'].execute()
player.velocity[player.gravity_index] += make_tuple(result)[0] * player.gravity[
player.gravity_index]
for collision_type in ('platform', 'block', 'gate'):
player.update_grid_coordinates()
collided_object = None
if collision_type == 'platform':
for platform in background.platforms:
for block in platform.blocks:
for grid_coordinates in block.all_grid_coordinates:
if grid_coordinates in player.all_grid_coordinates:
if collision(combine_lists(player.coordinates, player.velocity, '+'),
player.dimensions,
block.coordinates, block.dimensions) is True:
if block.kind in background.block_types['dangerous']:
player.die()
break
elif block.kind in background.block_types['solid']:
if collision(player.coordinates, player.dimensions, block.coordinates,
block.dimensions) is True:
player.velocity[platform.direction_index] = 0
player.block = block
if platform.direction[platform.direction_index] == 1:
player.coordinates[platform.direction_index] = block.coordinates[
platform.direction_index] + \
block.dimensions[
platform.direction_index]
else:
player.coordinates[platform.direction_index] = block.coordinates[
platform.direction_index] - \
player.dimensions[
platform.direction_index]
else:
collided_object = block
player.process_collision(collided_object)
elif collision_type == 'block':
for grid_coordinates in player.all_grid_coordinates:
if background.block_type(grid_coordinates) in background.block_types['solid']:
collided_object = background.blocks[grid_coordinates]
if collision(player.coordinates, player.dimensions, collided_object.coordinates,
collided_object.dimensions) is True:
player.die()
break
player.process_collision(collided_object)
if collided_object.kind == 'gravity_switch' and player.gravity_switch != collided_object:
player.set_gravity(collided_object)
player.reset(('jumping', 'falling'))
else:
for gate_head in background.gate_heads:
for entity in gate_head.entities:
for grid_coordinates in entity.all_grid_coordinates:
if grid_coordinates in player.all_grid_coordinates:
if collision(combine_lists(player.coordinates, player.velocity, '+'),
player.dimensions,
entity.coordinates,
entity.dimensions) is True:
collided_object = entity
player.process_collision(collided_object)
if collided_object and player.velocity != [0, 0] and player.current_sprite_type != 'dying':
if collision(combine_lists(player.coordinates, player.velocity, '+'), player.dimensions,
collided_object.coordinates, collided_object.dimensions) is True:
player.align_velocity(collided_object, opposite(player.gravity_index))
player.update_grid_coordinates()
if not background.condition:
for grid_coordinates in player.all_grid_coordinates:
block_type = background.block_type(grid_coordinates)
if block_type:
block = background.blocks[grid_coordinates]
if collision(combine_lists(player.coordinates, player.velocity, '+'), player.dimensions,
block.coordinates, block.dimensions) is True:
if block_type == 'exit' and player.gravity == [0, 1]:
if collision(
combine_lists(player.coordinates, player.velocity, '+'),
player.dimensions,
block.coordinates,
block.dimensions,
True) is True:
player.coordinates = (
find_center(
block.dimensions,
player.dimensions,
c1=block.coordinates)[0],
block.coordinates[1] +
block.dimensions[1] -
player.dimensions[1]
)
background.condition = 'doors_closing'
break
elif block_type in background.block_types['dangerous']:
player.die()
break
elif block_type == 'checkpoint' and block.sprite_index == 0:
player.default_coordinates = background.convert_from_grid(grid_coordinates)
player.default_gravity = player.gravity
block.transforming = True
elif block_type == 'gate_switch' and block.gate_head.sprite_index == 0:
player.default_coordinates = background.convert_from_grid(grid_coordinates)
player.default_gravity = player.gravity
block.gate_head.retracting = True
block.gate_head.sprite_index += 1
block.transforming = True
if background.condition != 'doors_closing':
player.coordinates = combine_lists(player.velocity, player.coordinates, '+')
for cannon in background.cannons:
for entity in cannon.entities:
entity.coordinates[cannon.direction_index] += cannon.entity_speed
all_grid_coordinates = background.find_all_grid_coordinates(entity.coordinates, entity.dimensions)
collided = False
if cannon.last_coordinates in all_grid_coordinates:
cannon.entities.remove(entity)
break
for grid_coordinates in all_grid_coordinates:
if not background.condition:
if grid_coordinates in player.all_grid_coordinates:
if collision(entity.coordinates, entity.dimensions, player.coordinates,
player.dimensions) is True:
player.die()
collided = True
if not collided:
for gate_head in background.gate_heads:
for gate in gate_head.entities:
if grid_coordinates in gate.all_grid_coordinates:
if collision(entity.coordinates, entity.dimensions,
gate.coordinates,
gate.dimensions) is True:
collided = True
break
if not collided:
for platform in background.platforms:
for block in platform.blocks:
if block.kind in background.block_types['solid'] and \
grid_coordinates in block.all_grid_coordinates:
if collision(entity.coordinates, entity.dimensions, block.coordinates,
block.dimensions) is True:
collided = True
break
if collided and grid_coordinates not in cannon.all_grid_coordinates:
break
if collided:
cannon.entities.remove(entity)
if (game.count - cannon.delay) % cannon.entity_frequency == 0:
cannon.entities.append(Thing(cannon.entity_sprite, cannon.entity_initial_coordinates))
for laser in background.lasers:
cycle = (game.count - laser.delay) % laser.frequency
if cycle == 0:
laser.active = 0
laser.reset()
if cycle == laser.inactive_duration:
laser.active = 1
laser.sprite_index = len(laser.current_sprites()) - 1
if laser.active:
laser.end = None
for entity in laser.entities:
if not background.condition:
if entity.all_grid_coordinates in player.all_grid_coordinates:
if collision(entity.coordinates, entity.dimensions,
player.coordinates,
player.dimensions) is True:
player.die()
for gate_head in background.gate_heads:
for gate in gate_head.entities:
if entity.all_grid_coordinates in gate.all_grid_coordinates:
if collision(entity.coordinates, entity.dimensions,
gate.coordinates,
gate.dimensions) is True:
laser.end = (entity, gate)
break
for platform in background.platforms:
for block in platform.blocks:
if block.kind in background.block_types['solid'] and \
entity.all_grid_coordinates in block.all_grid_coordinates:
if collision(entity.coordinates, entity.dimensions,
block.coordinates,
block.dimensions) is True:
laser.end = (entity, block)
break
if laser.end:
break
else:
laser.update_sprites(laser.sprite_speed, False)
game.display.fill(background.block_color)
for grid_coordinates in background.backgrounds:
game.blit(background.backgrounds[grid_coordinates])
for door in background.doors:
game.display.blit(background.sprites['door_background'][0],
player.generate_display_coordinates(door.coordinates))
for rail in background.rails:
game.blit(rail)
for gate_head in background.gate_heads:
for entity in gate_head.entities:
game.blit(entity)
for platform in background.platforms:
for block in platform.blocks:
game.blit(block)
for grid_coordinates in background.blocks:
block = background.blocks[grid_coordinates]
if block.kind not in ('cannon', 'entrance', 'exit', 'laser', 'alternating_block') and block.kind not in \
background.block_types['foreground']:
if block.transforming:
if block.kind in ('checkpoint', 'gate_switch'):
if block.update_sprites(5, reset=False):
block.transforming = False
else:
block.update_sprites()
game.blit(block)
for block in background.alternating_blocks:
game.blit(block)
if background.condition in ('doors_closing', 'doors_opening') or background.level_transition_phase is not None:
game.display.blit(player.current_sprite(), player.fake_coordinates)
player.display_after = False
else:
player.display_after = True
for door in background.doors:
# noinspection PyTypeChecker
game.blit(door)
for cannon in background.cannons:
for entity in cannon.entities:
game.blit(entity)
game.blit(cannon)
for laser in background.lasers:
if laser.active:
for entity in laser.entities:
if laser.end and entity == laser.end[0]:
area_dimensions = [0, 0]
area_dimensions[opposite(laser.direction_index)] = laser.end[0].dimensions[
opposite(laser.direction_index)]
blit_coordinates = list(laser.end[0].coordinates)
if laser.direction[laser.direction_index] == 1:
area_dimensions[laser.direction_index] = laser.end[1].coordinates[
laser.direction_index] - \
laser.end[0].coordinates[laser.direction_index]
else:
blit_coordinates[laser.direction_index] = laser.end[1].coordinates[
laser.direction_index] + \
laser.end[1].dimensions[laser.direction_index]
area_dimensions[laser.direction_index] = laser.end[0].coordinates[
laser.direction_index] - (
laser.end[1].coordinates[
laser.direction_index] +
laser.end[1].dimensions[
laser.direction_index])
game.display.blit(laser.entity_sprite,
player.generate_display_coordinates(blit_coordinates),
((0, 0), area_dimensions))
break
else:
game.blit(entity)
game.blit(laser)
if player.display_after:
game.display.blit(player.current_sprite(), player.fake_coordinates)
for grid_coordinates in background.blocks:
block = background.blocks[grid_coordinates]
if block.kind in background.block_types['foreground']:
if block.transforming:
block.update_sprites()
game.blit(block)
level = str(background.level)
for number, i in zip(level, reversed(range(len(level)))):
game.display.blit(background.sprites['number'][int(number)],
(game.dimensions[0] - (i + 1) * (background.number_width + background.level_number_gap) -
background.level_number_margins[0], background.level_number_margins[1]))
game.count += 1
pygame.display.update()
game.clock.tick(game.real_speed)
| 47.779197 | 133 | 0.52655 |
7f4bea60e6882845275b4d1773fa13b51b383b99 | 13,019 | py | Python | dymos/transcriptions/common/control_group.py | naylor-b/dymos | 56ee72041056ae20c3332d060e291c4da93844b1 | [
"Apache-2.0"
] | null | null | null | dymos/transcriptions/common/control_group.py | naylor-b/dymos | 56ee72041056ae20c3332d060e291c4da93844b1 | [
"Apache-2.0"
] | null | null | null | dymos/transcriptions/common/control_group.py | naylor-b/dymos | 56ee72041056ae20c3332d060e291c4da93844b1 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function, division
from six import string_types, iteritems
import numpy as np
from openmdao.api import ExplicitComponent, Group, IndepVarComp
from ..grid_data import GridData
from ...utils.misc import get_rate_units, CoerceDesvar
from ...utils.constants import INF_BOUND
class ControlInterpComp(ExplicitComponent):
"""
Compute the approximated control values and rates given the values of a control at all nodes,
given values at the control discretization nodes.
Notes
-----
.. math::
u = \\left[ L \\right] u_d
\\dot{u} = \\frac{d\\tau_s}{dt} \\left[ D \\right] u_d
\\ddot{u} = \\left( \\frac{d\\tau_s}{dt} \\right)^2 \\left[ D_2 \\right] u_d
where
:math:`u_d` are the values of the control at the control discretization nodes,
:math:`u` are the values of the control at all nodes,
:math:`\\dot{u}` are the time-derivatives of the control at all nodes,
:math:`\\ddot{u}` are the second time-derivatives of the control at all nodes,
:math:`L` is the Lagrange interpolation matrix,
:math:`D` is the Lagrange differentiation matrix,
and :math:`\\frac{d\\tau_s}{dt}` is the ratio of segment duration in segment tau space
[-1 1] to segment duration in time.
"""
def initialize(self):
self.options.declare(
'control_options', types=dict,
desc='Dictionary of options for the dynamic controls')
self.options.declare(
'time_units', default=None, allow_none=True, types=string_types,
desc='Units of time')
self.options.declare(
'grid_data', types=GridData,
desc='Container object for grid info')
# Save the names of the dynamic controls/parameters
self._dynamic_names = []
self._input_names = {}
self._output_val_names = {}
self._output_rate_names = {}
self._output_rate2_names = {}
def _setup_controls(self):
control_options = self.options['control_options']
num_nodes = self.num_nodes
num_control_input_nodes = self.options['grid_data'].subset_num_nodes['control_input']
time_units = self.options['time_units']
for name, options in iteritems(control_options):
self._input_names[name] = 'controls:{0}'.format(name)
self._output_val_names[name] = 'control_values:{0}'.format(name)
self._output_rate_names[name] = 'control_rates:{0}_rate'.format(name)
self._output_rate2_names[name] = 'control_rates:{0}_rate2'.format(name)
shape = options['shape']
input_shape = (num_control_input_nodes,) + shape
output_shape = (num_nodes,) + shape
units = options['units']
rate_units = get_rate_units(units, time_units)
rate2_units = get_rate_units(units, time_units, deriv=2)
self._dynamic_names.append(name)
self.add_input(self._input_names[name], val=np.ones(input_shape), units=units)
self.add_output(self._output_val_names[name], shape=output_shape, units=units)
self.add_output(self._output_rate_names[name], shape=output_shape, units=rate_units)
self.add_output(self._output_rate2_names[name], shape=output_shape,
units=rate2_units)
size = np.prod(shape)
self.val_jacs[name] = np.zeros((num_nodes, size, num_control_input_nodes, size))
self.rate_jacs[name] = np.zeros((num_nodes, size, num_control_input_nodes, size))
self.rate2_jacs[name] = np.zeros((num_nodes, size, num_control_input_nodes, size))
for i in range(size):
self.val_jacs[name][:, i, :, i] = self.L
self.rate_jacs[name][:, i, :, i] = self.D
self.rate2_jacs[name][:, i, :, i] = self.D2
self.val_jacs[name] = self.val_jacs[name].reshape((num_nodes * size,
num_control_input_nodes * size),
order='C')
self.rate_jacs[name] = self.rate_jacs[name].reshape((num_nodes * size,
num_control_input_nodes * size),
order='C')
self.rate2_jacs[name] = self.rate2_jacs[name].reshape((num_nodes * size,
num_control_input_nodes * size),
order='C')
self.val_jac_rows[name], self.val_jac_cols[name] = \
np.where(self.val_jacs[name] != 0)
self.rate_jac_rows[name], self.rate_jac_cols[name] = \
np.where(self.rate_jacs[name] != 0)
self.rate2_jac_rows[name], self.rate2_jac_cols[name] = \
np.where(self.rate2_jacs[name] != 0)
self.sizes[name] = size
rs, cs = self.val_jac_rows[name], self.val_jac_cols[name]
self.declare_partials(of=self._output_val_names[name],
wrt=self._input_names[name],
rows=rs, cols=cs, val=self.val_jacs[name][rs, cs])
cs = np.tile(np.arange(num_nodes, dtype=int), reps=size)
rs = np.concatenate([np.arange(0, num_nodes * size, size, dtype=int) + i
for i in range(size)])
self.declare_partials(of=self._output_rate_names[name],
wrt='dt_dstau',
rows=rs, cols=cs)
self.declare_partials(of=self._output_rate_names[name],
wrt=self._input_names[name],
rows=self.rate_jac_rows[name], cols=self.rate_jac_cols[name])
self.declare_partials(of=self._output_rate2_names[name],
wrt='dt_dstau',
rows=rs, cols=cs)
self.declare_partials(of=self._output_rate2_names[name],
wrt=self._input_names[name],
rows=self.rate2_jac_rows[name], cols=self.rate2_jac_cols[name])
def setup(self):
num_nodes = self.options['grid_data'].num_nodes
time_units = self.options['time_units']
gd = self.options['grid_data']
self.add_input('dt_dstau', shape=num_nodes, units=time_units)
self.val_jacs = {}
self.rate_jacs = {}
self.rate2_jacs = {}
self.val_jac_rows = {}
self.val_jac_cols = {}
self.rate_jac_rows = {}
self.rate_jac_cols = {}
self.rate2_jac_rows = {}
self.rate2_jac_cols = {}
self.sizes = {}
self.num_nodes = num_nodes
num_disc_nodes = gd.subset_num_nodes['control_disc']
num_input_nodes = gd.subset_num_nodes['control_input']
# Find the indexing matrix that, multiplied by the values at the input nodes,
# gives the values at the discretization nodes
L_id = np.zeros((num_disc_nodes, num_input_nodes), dtype=float)
L_id[np.arange(num_disc_nodes, dtype=int),
gd.input_maps['dynamic_control_input_to_disc']] = 1.0
# Matrices L_da and D_da interpolate values and rates (respectively) at all nodes from
# values specified at control discretization nodes.
L_da, D_da = gd.phase_lagrange_matrices('control_disc', 'all')
self.L = np.dot(L_da, L_id)
self.D = np.dot(D_da, L_id)
# Matrix D_dd interpolates rates at discretization nodes from values given at control
# discretization nodes.
_, D_dd = gd.phase_lagrange_matrices('control_disc', 'control_disc')
# Matrix D2 provides second derivatives at all nodes given values at input nodes.
self.D2 = np.dot(D_da, np.dot(D_dd, L_id))
self._setup_controls()
self.set_check_partial_options('*', method='cs')
def compute(self, inputs, outputs):
control_options = self.options['control_options']
for name, options in iteritems(control_options):
u = inputs[self._input_names[name]]
a = np.tensordot(self.D, u, axes=(1, 0)).T
b = np.tensordot(self.D2, u, axes=(1, 0)).T
# divide each "row" by dt_dstau or dt_dstau**2
outputs[self._output_val_names[name]] = np.tensordot(self.L, u, axes=(1, 0))
outputs[self._output_rate_names[name]] = (a / inputs['dt_dstau']).T
outputs[self._output_rate2_names[name]] = (b / inputs['dt_dstau'] ** 2).T
def compute_partials(self, inputs, partials):
control_options = self.options['control_options']
num_input_nodes = self.options['grid_data'].subset_num_nodes['control_input']
for name, options in iteritems(control_options):
control_name = self._input_names[name]
size = self.sizes[name]
rate_name = self._output_rate_names[name]
rate2_name = self._output_rate2_names[name]
# Unroll matrix-shaped controls into an array at each node
u_d = np.reshape(inputs[control_name], (num_input_nodes, size))
dt_dstau = inputs['dt_dstau']
dt_dstau_tile = np.tile(dt_dstau, size)
partials[rate_name, 'dt_dstau'] = \
(-np.dot(self.D, u_d).ravel(order='F') / dt_dstau_tile ** 2)
partials[rate2_name, 'dt_dstau'] = \
-2.0 * (np.dot(self.D2, u_d).ravel(order='F') / dt_dstau_tile ** 3)
dt_dstau_x_size = np.repeat(dt_dstau, size)[:, np.newaxis]
r_nz, c_nz = self.rate_jac_rows[name], self.rate_jac_cols[name]
partials[rate_name, control_name] = \
(self.rate_jacs[name] / dt_dstau_x_size)[r_nz, c_nz]
r_nz, c_nz = self.rate2_jac_rows[name], self.rate2_jac_cols[name]
partials[rate2_name, control_name] = \
(self.rate2_jacs[name] / dt_dstau_x_size ** 2)[r_nz, c_nz]
class ControlGroup(Group):
def initialize(self):
self.options.declare('control_options', types=dict,
desc='Dictionary of options for the dynamic controls')
self.options.declare('time_units', default=None, allow_none=True, types=string_types,
desc='Units of time')
self.options.declare('grid_data', types=GridData, desc='Container object for grid info')
def setup(self):
ivc = IndepVarComp()
# opts = self.options
gd = self.options['grid_data']
control_options = self.options['control_options']
time_units = self.options['time_units']
if len(control_options) < 1:
return
opt_controls = [name for (name, opts) in iteritems(control_options) if opts['opt']]
if len(opt_controls) > 0:
ivc = self.add_subsystem('indep_controls', subsys=IndepVarComp(),
promotes_outputs=['*'])
self.add_subsystem(
'control_interp_comp',
subsys=ControlInterpComp(time_units=time_units, grid_data=gd,
control_options=control_options),
promotes_inputs=['*'],
promotes_outputs=['*'])
for name, options in iteritems(control_options):
if options['opt']:
num_input_nodes = gd.subset_num_nodes['control_input']
desvar_indices = list(range(gd.subset_num_nodes['control_input']))
if options['fix_initial']:
desvar_indices.pop(0)
if options['fix_final']:
desvar_indices.pop()
if len(desvar_indices) > 0:
coerce_desvar = CoerceDesvar(gd.subset_num_nodes['control_disc'],
desvar_indices, options)
lb = -INF_BOUND if coerce_desvar('lower') is None else coerce_desvar('lower')
ub = INF_BOUND if coerce_desvar('upper') is None else coerce_desvar('upper')
self.add_design_var(name='controls:{0}'.format(name),
lower=lb,
upper=ub,
scaler=coerce_desvar('scaler'),
adder=coerce_desvar('adder'),
ref0=coerce_desvar('ref0'),
ref=coerce_desvar('ref'),
indices=desvar_indices)
ivc.add_output(name='controls:{0}'.format(name),
val=options['val'],
shape=(num_input_nodes, np.prod(options['shape'])),
units=options['units'])
| 43.687919 | 98 | 0.571549 |
cf069335756ba5c958e0b1f2d3d09bc66cfb0165 | 5,195 | py | Python | Scrapers/commons.py | Eric-Canas/bet-arbitrage-analysis | ac1b6939a7688bcefeb3e53fbd584adf3a2cb16c | [
"MIT"
] | 3 | 2021-07-21T16:20:01.000Z | 2022-03-27T18:25:09.000Z | Scrapers/commons.py | Eric-Canas/bet-arbitrage-analysis | ac1b6939a7688bcefeb3e53fbd584adf3a2cb16c | [
"MIT"
] | 1 | 2021-06-28T18:08:19.000Z | 2021-06-28T18:35:26.000Z | Scrapers/commons.py | Eric-Canas/bet-arbitrage-analysis | ac1b6939a7688bcefeb3e53fbd584adf3a2cb16c | [
"MIT"
] | null | null | null | from Constants import *
import numpy as np
#Defining the sports which information is easily computable
sports = ['tenis-mesa/tenis-de-mesa', 'e-sports', 'baloncesto',
'rugby', 'tenis', 'futbol-americano', '/boxeo-mma/boxeo',
'boxeo-mma/ufc-mma', 'futbol']
# Defining sports where draw is not possible
sports_with_no_draw = ['tenis-de-mesa', 'baloncesto', 'tenis', 'e-sports']
def parse(response, sport='Some sport'):
"""
Parse the content of the HTML and transform it to an structured dictionary
:param response: HTML response. Web-page HTML downloaded
:return: Dictionary. Structured dictionary with the response
"""
# Take the different boxes containing each day matches
each_day_matches = response.xpath(MATCHES_XPATH)
# If it was an error in the web-page return an empty dict
if len(each_day_matches) == 0:
return {}
# Extract the date of each of those matches
dates = [str(valid_match.xpath(DAY_OF_MATCH_XPATH+'/text()')[0]) for valid_match in each_day_matches]
#Transform to an organized list of selectors with one response by each match
table_of_days = {}
for day, day_matches in zip(dates, each_day_matches):
if day in table_of_days:
table_of_days[day].extend(day_matches.xpath(TABLE_OF_DAY_XPATH))
else:
table_of_days[day] = day_matches.xpath(TABLE_OF_DAY_XPATH)
# Get the count of how many bets where incomplete
discarded = 0
# For each day check all its matches
for day, matches in table_of_days.items():
# For each match
for i, match in enumerate(matches):
# Extract all information from the bet
hour = str(match.xpath(HOUR_OF_MATCH_XPATH+'/text()')[0])
teams = [str(team) for team in match.xpath(TEAMS_XPATH+'/text()')]
bets = [str(bet) for bet in match.xpath(BETS_XPATH+'/text()')]
houses = [get_house_name(str(house)) for house in match.xpath(HOUSE_XPATH)]
link = match.xpath(LINK_XPATH)[0]
# Check if it have any incomplete information
try:
np.array(bets, dtype=np.float)
except:
# If the error was because draw was invalid in a no draw sport. Erase the invalid draw
if sport in sports_with_no_draw:
bets = [bets[0], bets[-1]]
else:
table_of_days[day][i] = INCOMPLETE_INFO
discarded += 1
continue
# Check if there is any additional incomplete information
try:
np.array(bets, dtype=np.float)
except:
# In that case fill it as incomplete info
#print(ValueError("Information not complete for "+sport+" match: "+str(' vs'.join(teams))+" -- Bets = "+str(bets)))
table_of_days[day][i] = INCOMPLETE_INFO
discarded += 1
continue
# Compose and save the bet
bet = {HOUR:hour, TEAMS:teams, BETS:bets, HOUSES:houses, LINK:link}
table_of_days[day][i] = bet
# Clean the incomplete information of the dictionary
table_of_days = clean_dictionary(table_of_days)
# If any bet was discarded print the information about how many were discarded
if discarded > 0:
print("Discarded "+str(discarded)+" bets in "+sport+" by incomplete information.")
return table_of_days
def clean_dictionary(dictionary):
"""
Clean the dictionary of the incomplete information that it could contain
:param dictionary: Dictionary. Dictionary containing all the bets of a sport
:return: Dictionary. The input dictionary cleared
"""
# Erase all the cases which contained incomplete information
dictionary = {date : [{key : value for key, value in match.items()}
for match in matches if match != INCOMPLETE_INFO]
for date, matches in dictionary.items()}
# Clean all the dates which ended up by not containing any day
dictionary = {date : matches for date, matches in dictionary.items() if len(matches)>0}
return dictionary
def get_house_name(house_class):
"""
Translate the betting house class code to the human readable name of the house
:param house_class: Complete class name of the div where the best house was encoded
:return: str. The human readable name of the house or houses offering the best price.
"""
# Extract the from the complete code the concrete code of the house
house_codes = house_class[len(HOUSE_CLASS_1 + ' bg'):-len('-ES')].replace(',', ' ').split()
try:
# Translate it to human readable code
house_names = ' or '.join([HOUSE_CODE[code] for code in house_codes])
return house_names
except:
# If the code was not known alert about it and return the str with that information
house_alert = 'Unknown-House-Code('+str(house_codes)+')'
print(house_alert)
return house_alert | 49.47619 | 136 | 0.625987 |
ea60e3a983cb60a3613d48e77127d243f50e3466 | 46,973 | py | Python | pgdata.py | jpmorgen/precisionguide | d90936f3cfbaa45102ddc57a32696f3fbb7df33f | [
"MIT"
] | null | null | null | pgdata.py | jpmorgen/precisionguide | d90936f3cfbaa45102ddc57a32696f3fbb7df33f | [
"MIT"
] | null | null | null | pgdata.py | jpmorgen/precisionguide | d90936f3cfbaa45102ddc57a32696f3fbb7df33f | [
"MIT"
] | null | null | null | """Define the base data containing classes for precisionguide system
These are designed to be portable across any platform, since they do
not depend on the specifics of the telescsope control system. Thus,
data can be recorded using this code on, e.g., a Windows system and
uploaded to any other platform for detailed analysis using this same
code.
"""
from copy import deepcopy
import numpy as np
from scipy.signal import convolve2d
from scipy.ndimage.measurements import center_of_mass
from astropy import log
from astropy.io import fits
from astropy import units as u
from astropy.time import Time
from ccdmultipipe.utils.ccddata import FbuCCDData
_NotFound = object()
###########################
# Decorators #
###########################
class pgproperty(property):
"""Caching property decorator with auto setting and None reset
. myprop = None resets system and forces the getter to run
. when getter runs and returns a non-None value, that value is
cached and returned instead of running the getter
. myprop = 'some_value' overrides the getter -- 'some_value' is
cached and returned on subsequent queries of myprop
. No setter is required to set the property, but if one is
provided, its return value is used as the cached value that
overrides the getter (i.e., no knowledge of the internal workings
of the system is requried to make it work)
. Deleters can be specified to do something before the cache
reference is deleted. Deletion is not permanent -- the next time
myprop is queried, the getter is run
. shape_check class variable: None, 0, or tuple
This class variable affects the treatment of the value
returned by the setter or if no setter is
present, value provided by user.
. None: the value is cached without modification
. 0: value is converted to a numpy array with np.asarray()
. tuple: like 0 but shape of value must match shape_check or
a ValueError is raised
Inspired by `astropy.utils.lazyproperty` and `property_manager
<https://github.com/xolox/python-property-manager>`
.NOTE: This cache operates on the top-level property. In a
complicated multi-inheritance system with inter-connected property
like `pgdata`, some time savings in calculation of quantities
could be achieved by caching property at each inheritance level
individually, say by making the `key` a MRO level-unique string,
e.g. property name, `self.__class__.__name__` and module name.
Then a None reset would only reset the levels from the top down to
the MRO at which the property was set to None. This guarantees
the children get a freshly calculated value of the affected
properties and acknowledges that the parents don't care that the
change was made, since they never depended on the relationship
between the property in the first place. Or if they did, they
should be super()ed into the calculation and the most senior
setter concerned about the interrelationship would also be calling
for the reset of the affected other property. The side-affect of
caching all of the intermediate results would be more memory use,
since all of the intermediate property would have long-lived
references. For the pgdata coordiate-oriented stuff, this is not
a big deal, but it is not generally appropriate.
"""
shape_check = None
def __init__(self, fget,
fset=None,
fdel=None,
doc=None):
super().__init__(fget, fset, fdel, doc)
self._key = self.fget.__name__
self._none_proxy = 'None proxy'
def npval(self, val):
"""Turn val into np.array if shape_check is non-None. Checks
shape of resulting array if shape_check is a tuple"""
if self.shape_check is None or val is None:
return val
val = np.asarray(val)
if (self.shape_check != 0
and val.shape != self.shape_check):
raise ValueError(f'value "{val}" is not a sensible {self.shape_check}-dimensional coordinate')
return val
def __get__(self, obj, owner=None):
try:
obj_dict = obj.__dict__
val = obj_dict.get(self._key, _NotFound)
if val is self._none_proxy:
return None
if val is _NotFound or val is None:
val = self.fget(obj)
if val is None:
obj_dict[self._key] = self._none_proxy
return None
val = self.npval(val)
obj_dict[self._key] = val
return val
except AttributeError:
if obj is None:
return self
raise
def __set__(self, obj, val):
obj_dict = obj.__dict__
if self.fset:
val = self.fset(obj, val)
val = self.npval(val)
obj_dict[self._key] = val
def __delete__(self, obj):
if self.fdel:
self.fdel(obj)
obj.__dict__.pop(self._key, None) # Delete if present
class pgcoordproperty(pgproperty):
shape_check=(2,)
#######################
# Primary Objects #
#######################
def center_quality_checker(value):
if value is None:
return
if not isinstance(value, int) or value < 0 or value > 10:
raise ValueError('quality must be an integer value from 0 to 10')
return value
class PGCenter():
"""Base class for containing object center and desired center
Parameters
----------
obj_center : tuple or array
Center of object, (y, x) in *unbinned* coordinates referenced to the
*origin* of the CCD. Note Python C array indexing into 2D array
desired_center : tuple-like
Desired center of object, (y, x) in *unbinned* coordinates
referenced to the *origin* of the CCD. Note Python C array
indexing into 2D array
center_quality : int
0 -- 10 ranking of quality of ``obj_center``
determination, with 1 = very bad, 10 = very good, 0 = invalid
tavg : `~astropy.time.Time`
Average time of observation. For simple exposures, this is
the midpoint. For exposures integrated from several segments
one might imagine this would be the average of the exposure
efficiency as a function of time. Hoever Rots et al (A&A 574,
A36 2015) note no standard is defined. This turns into the
DATE-AVG FITS keyword
"""
def __init__(self,
obj_center=None,
desired_center=None,
center_quality=None,
tavg=None):
self.obj_center = obj_center
self.desired_center = desired_center
self.center_quality = center_quality
self.tavg = tavg
# Our decorator does everything we need :-)
@pgcoordproperty
def obj_center(self):
pass
@pgcoordproperty
def desired_center(self):
pass
@pgproperty
def center_quality(self):
pass
@center_quality.setter
def center_quality(self, value):
"""Unset center_quality translates to 0 center_quality"""
return center_quality_checker(value)
@pgproperty
def tavg(self):
pass
# We want to use astropy's CCDData to store our
# Pretty much all CCD-like detectors present their raw data in adu.
# In the event this needed to change for a particular system, insert a
# subclass redefining raw_unit anywhere on the PGData inheritance
# chain. That raw_unit will override this one. Note, doing this as a
# class variable is necessary because of the CCDData read classmethod
class ADU():
"""Simple class to define the class variable `fallback_unit` =
`~astropy.units.adu` for classes using the
:class:`ccdmultipipe.FbuCCDData` system
"""
fallback_unit = u.adu
class PGData(ADU, FbuCCDData):
"""Base class for image data in the `precisionguide` system
This class stores CCD data and defines methods and property to
calculate/store four primary quantities: :prop:`obj_center`,
:prop:`desired_center`, :prop:`center_quality`, and :prop:`tavg`, as
described in the documentation.
CCD data are stored using `astropy.nddata.CCDData` as its base
class with the addition of a class,
:class:`ccdmultipipe.FbuCCDData`, that ensures the
`~astropy.nddata.CCDData` will always be defined with a
:class:`astropy.units.Unit`. Since all or nearly all CCD-like
detectors present their raw data in `~astropy.units.adu`, this is
the default :class:`~astropy.units.Unit` assumed by
:class:`PGData`. In the event this needs to change for a
particular system, a class similar to :class:`ADU` (e.g. "Count")
could be defined and inserted into the inheritance chain
superclass and adds properties and methods that calculate/store
four quantities: :prop:`obj_center`, :prop:`desired_center`,
:prop:`center_quality`, and :prop:`tavg`. These four quantities are
intended to be returned in a :class:`PGCenter` object for
subsequent lightweight storage and use in the precisionguide
system. Because precisionguide controls the absolute position of
an object (or FOV center) on the CCD, :prop:`desired_center` and
:prop:`obj_center` always read in *unbinned* pixel values
referenced to the origin of the CCD itself. Thus, the image input
to :class:`PGData` must include both the image FITS header and
image array(s)
Parameters
----------
"""
def __init__(self,
data,
obj_center=None,
desired_center=None,
center_quality=None,
recalculate=False,
subframe_origin=None,
binning=None,
date_obs_key='DATE-OBS',
exptime_key='EXPTIME',
darktime_key='DARKTIME',
copy=False,
**kwargs):
# Pattern after NDData init but skip all the tests
if isinstance(data, PGData):
# Sigh. We have to undo the convenience of our pgproperty
# lest we trigger the calculation of property, which leads
# to recursion problems
obj_dict = data.__dict__
obj_center = obj_dict.get('obj_center')
desired_center = obj_dict.get('desired_center')
center_quality = data._center_quality
subframe_origin = data.subframe_origin
binning = data.binning
date_obs_key = data.date_obs_key
exptime_key = data.exptime_key
darktime_key = data.darktime_key
if copy:
obj_center = deepcopy(obj_center)
desired_center = deepcopy(desired_center)
center_quality = deepcopy(center_quality)
subframe_origin = deepcopy(subframe_origin)
binning = deepcopy(binning)
date_obs_key = deepcopy(date_obs_key)
exptime_key = deepcopy(exptime_key)
darktime_key = deepcopy(darktime_key)
super().__init__(data, copy=copy, **kwargs)
self.recalculate = recalculate # May be not appropriate at this level
self.obj_center = obj_center
self.desired_center = desired_center
self._center_quality = center_quality
self.subframe_origin = subframe_origin
self.binning = binning
self.date_obs_key = date_obs_key
self.exptime_key = exptime_key
self.darktime_key = darktime_key
self._invalid_obj_center = (-99, -99)
self._invalid_desired_center = (-99, -99)
self._invalid_center_quality = 0
def _init_args_copy(self, kwargs):
"""The NDData _slice and _arithmetic methods create new class instances by running the __init__ method with the appropriate kwarg to copy over relevant property. Thus we need to copy all our property into a keyword dictionary too"""
# Grab calculated property directly from our pgproperty
# decorator system so it doesn't trigger calculations!
obj_dict = self.__dict__
obj_center = obj_dict.get('obj_center')
desired_center = obj_dict.get('desired_center')
kwargs['obj_center'] = obj_center
kwargs['desired_center'] = desired_center
kwargs['center_quality'] = self._center_quality
kwargs['subframe_origin'] = self.subframe_origin
kwargs['binning'] = self.binning
kwargs['date_obs_key'] = self.date_obs_key
kwargs['exptime_key'] = self.exptime_key
kwargs['darktime_key'] = self.darktime_key
return kwargs
def _arithmetic(self, *args, **kwds):
"""Insert our property when new class instances are created using arithmetic"""
result, kwargs = super()._arithmetic(*args, **kwds)
kwargs = self._init_args_copy(kwargs)
return result, kwargs
def _slice(self, item):
"""Override NDSlicingMixin definition to move subframe origin"""
kwargs = super()._slice(item)
kwargs = self._init_args_copy(kwargs)
yslice = item[0]
xslice = item[1]
yorg = yslice.start or 0
xorg = xslice.start or 0
kwargs['subframe_origin'] = (yorg, xorg)
return kwargs
@pgcoordproperty
def obj_center(self):
"""Center of object, (Y, X) in pixel
Coordinates are referenced to the image stored in the
:prop:`data` property. This image may be a binned subframe of
full detector. Use :meth:`unbinned(self.obj_center)` to obtain
the coordinates in raw detector pixels. Quality of center
determination must be set as well. Base class uses out-of-bounds
value (-99, -99) for center and 0 for center_quality
Results are stored using the :class:`pgcoordproperty`
decorator system. See documentation of that class for
explanation of features.
"""
obj_center = self._invalid_obj_center
self.center_quality = self._invalid_center_quality
return obj_center
@pgcoordproperty
def desired_center(self):
"""Desired center of object (Y, X). Base class uses center of
data array. As with :prop:`obj_center`, :prop:`desired_center` is
referenced to the data stored in :prop:`data`
Results are stored using the :class:`pgcoordproperty`
decorator system. See documentation of that class for
explanation of features.
"""
# Here is where the complicated desired_center calculation is
# done:
npshape = np.asarray(self.data.shape)
desired_center = npshape/2
return desired_center
@property
def center_quality(self):
"""Quality of center determination on a 0 to 10 integer scale.
Quality should always be set in the :prop:`obj_center` setter
"""
if self._center_quality is None:
self.obj_center
return self._center_quality
@center_quality.setter
def center_quality(self, value):
self._center_quality = center_quality_checker(value)
@pgproperty
def tavg(self):
"""`~astropy.time.Time` Average time of observation, See
:param:`tavg` of :class:`PGCenter.`"""
tavg_str = self.meta.get('date-avg')
if tavg_str is not None:
return Time(tavg_str, format='fits')
try:
exptime = self.meta[self.exptime_key.lower()]
exptime *= u.s
dateobs_str = self.meta[self.date_obs_key.lower()]
return Time(dateobs_str, format='fits') + exptime/2
except:
log.error(f'Cannot read sufficient combination of '
'{self.date_obs_key}, {self.darktime_key} '
'and/or {self.exptime_key} keywords from FITS '
'header to establish tavg')
return None
@tavg.setter
def tavg(self, val):
if not isinstance(val, Time):
raise ValueError('tavg must be of type `~astropy.time.Time`')
@pgcoordproperty
def binning(self):
"""Image binning in Y,X order.
NOTE: this needs to be overridden in a subclass with the
actual FITS binning keywords used (which are unfortunately
not in any FITS standard). E.g.:
>>> binning = (self.meta['YBINNING'],
>>> self.meta['XBINNING'])
>>> return binning
"""
pass
#binning = (1,1)
#return binning
@pgcoordproperty
def subframe_origin(self):
"""Subframe origin in *unbinned* pixels with full CCD origin =
(0,0). Y,X order
NOTE: this needs to be overridden in a subclass with the
actual FITS binning keywords used (which are unfortunately
not in any FITS standard). E.g.:
>>> subframe_origin = (self.meta['YORGSUBF'],
>>> self.meta['XORGSUBF'])
>>> subframe_origin = np.asarray(subframe_origin)
>>> subframe_origin *= self.binning
>>> return subframe_origin
"""
pass
#subframe_origin = (0,0)
#return subframe_origin
def coord_unbinned(self, coords):
"""Returns pixel coords referenced to full CCD given internally stored binning/subim info"""
coords = np.asarray(coords)
unbinned = self.binning * coords + self.subframe_origin
return unbinned.astype(int)
def x_unbinned(self, coords):
"""Returns x coord referenced to full CCD given internally stored binning/subim info"""
coords = np.asarray(coords)
unbinned = self.binning[1] * coords + self.subframe_origin[1]
return unbinned.astype(int)
def y_unbinned(self, coords):
"""Returns y coord referenced to full CCD given internally stored binning/subim info"""
coords = np.asarray(coords)
unbinned = self.binning[0] * coords + self.subframe_origin[0]
return unbinned.astype(int)
def coord_binned(self, coords, limit_edges=False):
"""Assuming coords are referenced to full CCD, return location in binned coordinates relative to the subframe origin. Limit_edges=True sets output value(s) that would otherwise fall outside the binned image to the appropriate boundary value """
coords = np.asarray(coords)
binned_coords = (coords - self.subframe_origin) / self.binning
if not limit_edges:
return binned_coords
s = np.asarray(self.shape)
if len(coords.shape) == 1:
binned_coords[0] = np.min((s[0], binned_coords[0]))
binned_coords[1] = np.min((s[1], binned_coords[1]))
return binned_coords.astype(int)
lidx = np.flatnonzero(binned_coords[0, :] > s[0])
binned_coords[0, lidx] = s[0]
lidx = np.flatnonzero(binned_coords[1, :] > s[1])
binned_coords[1, lidx] = s[1]
return binned_coords.astype(int)
def x_binned(self, xs, **kwargs):
"""Assuming x coords are referenced to full CCD, return location in binned coordinates relative to the subframe origin. Limit_edges=True sets output value(s) that would otherwise fall outside the binned image to the appropriate boundary value """
if np.isscalar(xs):
unbinned_coords = np.asarray((-99, xs))
binned_coords = self.coord_binned(unbinned_coords, **kwargs)
xs = binned_coords[1]
else:
xs = np.asarray(xs)
ys = len(xs)*(-99,)
unbinned_coords = list(zip(ys, xs))
binned_coords = self.coord_binned(unbinned_coords, **kwargs)
xs = binned_coords[:, 1]
return xs
def y_binned(self, ys, **kwargs):
"""Assuming x coords are referenced to full CCD, return location in binned coordinates relative to the subframe origin. Limit_edges=True sets output value(s) that would otherwise fall outside the binned image to the appropriate boundary value """
if np.isscalar(ys):
unbinned_coords = np.asarray((ys, -99))
binned_coords = self.coord_binned(unbinned_coords, **kwargs)
ys = binned_coords[0]
else:
ys = np.asarray(ys)
xs = len(ys)*(-99,)
unbinned_coords = list(zip(ys, xs))
binned_coords = self.coord_binned(unbinned_coords, **kwargs)
ys = binned_coords[:, 0]
return ys
def im_unbinned(self, a):
"""Returns an unbinned version of a. a must be same shape as self.data
NOTE: to preserve flux, divide by (np.prod(self.binning)
"""
if a is None:
return None
assert a.shape == self.data.shape
# Don't bother if we are already unbinned
if np.sum(self.binning) == 2:
return a
newshape = self.binning * a.shape
# From http://scipy-cookbook.readthedocs.io/items/Rebinning.html
assert len(a.shape) == len(newshape)
slices = [ slice(0,old, float(old)/new)
for old,new in zip(a.shape,newshape) ]
coordinates = np.mgrid[slices]
indices = coordinates.astype('i') #choose the biggest smaller integer index
unbinned = a[tuple(indices)]
# Check to see if we need to make a larger array into which to
# plop unbinned array
if np.sum(self.subframe_origin) > 0:
# Note subframe origin reads in binned pixels
origin = self.unbinned(self.subframe_origin)
full_unbinned = np.zeros(origin + np.asarray(unbinned.shape))
full_unbinned[origin[0]:, origin[1]:] = unbinned
unbinned = full_unbinned
return unbinned
@pgproperty
def self_unbinned(self):
"""Returns an unbinned version of this object. NOTE: just as with the
primary object, if this object needs to be modified, it should be
copied first, particularly since, in the case the image is not
binned, it might just point to self
"""
if (self.binning.sum() == 2
and self.subframe_origin.sum() == 0):
# We are already unbinned, don't bother to copy
return self
# If we made it here, we need to unbin
self_unbinned = deepcopy(self)
self_unbinned.data = self.im_unbinned(self.data)
self_unbinned.mask = self.im_unbinned(self.mask)
self_unbinned.uncertainty = self.im_unbinned(self.uncertainty)
self_unbinned.binning = np.asarray((1,1))
self_unbinned.subframe_origin = np.asarray((0,0))
# --> NOTE we are not messing with any other metadata
return self_unbinned
@property
def pgcenter(self):
"""Returns a :class:`PGCenter` object with the *unbinned* obj and
desired center coordinates. Working in *unbinned* coordinates is
essential for the internal workings of the precisionguide
telescope control system, but rather confusing and abstract for
the user in any other context.
"""
# I may want to also export the binning information for
# display purposes in precisionguide
return PGCenter(self.coord_unbinned(self.obj_center),
self.coord_unbinned(self.desired_center),
self.center_quality,
self.tavg)
def _card_write(self):
"""Writes FITS header cards for obj and desired center coordinates in
*binned* coordinates (i.e., as they would likely presesnt in
image analysis software showing the image)
"""
if not np.all(np.isfinite(self.obj_center)):
self.obj_center = self._invalid_obj_center
log.warning(f'non-finite obj_center found, converting to '
f'{self.obj_center}')
# Note pythonic y, x coordinate ordering
self.meta['OBJ_CR0'] = (self.obj_center[1], 'Calculated object '
'center X')
self.meta['OBJ_CR1'] = (self.obj_center[0], 'Calculated object '
'center Y')
self.meta['DES_CR0'] = (self.desired_center[1], 'Desired center X')
self.meta['DES_CR1'] = (self.desired_center[0], 'Desired center Y')
self.meta['HIERARCH CENTER_QUALITY'] = (
self.center_quality or self._invalid_center_quality,
'Quality on 0-10 scale '
'of center determination')
# As per https://www.aanda.org/articles/aa/pdf/2015/02/aa24653-14.pdf
if not self.meta.get('DATE-AVG'):
self.meta.insert('DATE-OBS',
('DATE-AVG', self.tavg.value,
'midpoint of exposure, UT'),
after=True)
def write(self, *args, **kwargs):
self._card_write()
super().write(*args, **kwargs)
class NoCenterPGD(PGData):
"""Sets :prop:`obj_center` to invalid value and :prop:`center_quality` to 0`"""
pass
class FITSReaderPGD(PGData):
"""Read FITS keys to set defaults"""
## Inefficient way -- FITS header is read each time object is
## initialized even if it never uses the property
#def __init__(self, *args,
# obj_center=None,
# desired_center=None,
# center_quality=0,
# recalculate=False,
# **kwargs):
# super().__init__(*args, **kwargs)
# try:
# assert not recalculate, ('Recalculation requested')
# log.debug('Trying to read center info from FITS header,')
# cx = self.meta['OBJ_CR0']
# cy = self.meta['OBJ_CR1']
# dx = self.meta['DES_CR0']
# dy = self.meta['DES_CR1']
# q = self.meta['CENTER_QUALITY']
# tavg_str = self.meta['DATE-AVG']
# self.obj_center = (cy, cx)
# self.desired_center = (dy, dx)
# self.center_quality = q
# self.tavg = Time(tavg_str, format='fits')
# log.info('Center info set from FITS header, use '
# 'recalculate=True to avoid')
# except Exception as e:
# log.debug(str(e))
# log.debug('Setting obj_center, desired_center, and center_quality '
# 'from object instantiation keywords')
# self.obj_center = obj_center
# self.desired_center = desired_center
# self.center_quality = center_quality
@pgproperty
def obj_center(self):
"""Center of object, (Y, X). Quality of center determination must be
set as well. Base class uses out-of-bounds value (-99, -99) for
center and 0 for center_quality
Results are stored using the :class:`pgcoordproperty`
decorator system. See documentation of that class for
explanation of features.
"""
try:
assert not self.recalculate, ('Recalculation requested')
log.debug('Trying to read center info from FITS header,')
cx = self.meta['OBJ_CR0']
cy = self.meta['OBJ_CR1']
obj_center = np.asarray((cy, cx))
except Exception as e:
log.debug(str(e))
log.debug('Not setting center from FITS header,')
obj_center = None
self.center_quality = None
return obj_center
@pgproperty
def desired_center(self):
"""Desired center of object (Y, X). Base class uses center of
data array.
Results are stored using the :class:`pgcoordproperty`
decorator system. See documentation of that class for
explanation of features.
"""
try:
assert not self.recalculate, ('Recalculation requested')
log.debug('Trying to read desired center info from FITS header,')
dx = self.meta['DES_CR0']
dy = self.meta['DES_CR1']
desired_center = np.asarray((dy, dx))
except Exception as e:
log.debug(str(e))
log.debug('Not setting desired center from FITS header,')
desired_center = None
return desired_center
@pgproperty
def center_quality(self):
"""Quality of center determination. center_quality should always be
set in the obj_center setter
"""
try:
assert not self.recalculate, ('Recalculation requested')
log.debug('Trying to read center_quality info from FITS header,')
q = self.meta['CENTER_QUALITY']
return int(q)
except Exception as e:
log.debug(str(e))
log.debug('Not setting center_quality from FITS header,')
# obj_center should set center_quality
self.obj_center
@center_quality.setter
def center_quality(self, value):
"""Unset center_quality translates to 0 center_quality"""
if value is not None:
value = center_quality_checker(value)
return value
#### In the end, I think CameraData is a bad idea, since this can all
#### go into and out of metadata. Unit in principle could go there
#### too (e.g. BUNIT), however it is implemented as separate property
#### in the underlying CCDData/NDData. No need to add that level of
#### complexity to these. FITS card metadata property with comments
#### is good enough.
###
#### Although CameraData is intended to be for containing just camera
#### information, make it a subclass of FbuCCDData so that the raw_unit
#### can be properly inserted when CCD images are read in.
###
#### I envision either the camera file being use with a camera name and
#### method to initialize all of the propriety, or just a new class being
#### prepared that has the property hand-coded as class variables
###class CameraData(FbuCCDData):
### raw_unit = u.adu
### camera_data_file = None # some kind of file that would contain this info
### camera_name = None
### camera_description = None
### full_naxis1 = None
### full_naxis2 = None
### satlevel = None
### nonlinlevel = None
### gain = None
### readnoise = None
###
### def __init__(self,
### *args,
### camera_data_file=None,
### camera_name=None,
### camera_description=None,
### raw_unit=None, # This will not affect the read classmethod
### full_naxis1=None,
### full_naxis2=None,
### satlevel=None,
### nonlinlevel=None,
### gain=None,
### readnoise=None,
### **kwargs):
### self.camera_data_file = camera_data_file or self.camera_data_file
### self.camera_name = camera_name or self.camera_name
### self.camera_description = camera_description or self.camera_description
### self.raw_unit = raw_unit or self.raw_unit
### self.full_naxis1 = full_naxis1 or self.full_naxis1
### self.full_naxis2 = full_naxis2 or self.full_naxis2
### self.satlevel = satlevel or self.satlevel
### self.nonlinlevel = nonlinlevel or self.nonlinlevel
### self.gain = gain or self.gain
### self.readnoise = readnoise or self.readnoise
### super().__init__(*args, fallback_unit=self.raw_unit, **kwargs)
###
### @classmethod
### def read(cls, filename,
### raw_unit=None,
### **kwargs):
### """Use ``raw_unit`` instead of ``fallback_unit``."""
### raw_unit = raw_unit or cls.raw_unit
### return super(CameraData, cls).read(filename,
### fallback_unit=raw_unit,
### **kwargs)
###
### def _card_write(self):
### """Write FITS header cards for camera
###
### """
### self.meta['GAIN'] = (self.gain, f'CCD charge gain '
### '{self.gain.unit.to_str()}')
### self.meta['SATLEVEL'] = (self.satlevel, f'CCD saturation level '
### '{self.satlevel.unit.to_str()}')
### self.meta['NONLIN'] = (self.nonlinlevel, f'CCD nonlinearity '
### 'level {self.nonlinlevel.unit.to_str()}')
### self.meta['RDNOISE'] = (self.readnoise, f'CCD readnoise '
### 'level {self.readnoise.unit.to_str()}')
###
###class SX694(CameraData):
### camera_name = 'SX694'
### camera_description = 'Starlight Xpress Trius SX694 mono, 2017 model version'
### raw_unit = u.adu
### # naxis1 = fastest changing axis in FITS primary image = X in
### # Cartesian thought
### # naxis1 = next to fastest changing axis in FITS primary image = Y in
### # Cartesian thought
### full_naxis1 = 2750*u.pix
### full_naxis2 = 2200*u.pix
### # 16-bit A/D converter
### satlevel = (2**16-1)*raw_unit
### nonlinlevel = (42000 - 1811)*raw_unit
### # Gain measured in /data/io/IoIO/observing/Exposure_Time_Calcs.xlsx.
### # Value agrees well with Trius SX-694 advertised value (note, newer
### # "PRO" model has a different gain value). Stored in GAIN keyword
### gain = 0.3 * u.electron/raw_unit
### # Sample readnoise measured as per ioio.notebk
### # Tue Jul 10 12:13:33 2018 MCT jpmorgen@byted
### # Readnoies is measured regularly as part of master bias creation and
### # stored in the RDNOISE keyword. This is used as a sanity check.
### readnoise = 15.475665*raw_unit
class Ex(PGData):
@pgcoordproperty
def obj_center(self):
# Pattern for all that want to read the FITS
print('in obj_center')
t = super().obj_center
return t
class ExampleFITSReaderPGD(FITSReaderPGD):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
print('in init')
@pgcoordproperty
def obj_center(self):
# Pattern for all that want to read the FITS
t = FITSReaderPGD(self).obj_center
if t is not None:
return t
# Real calculation starts here
obj_center = self.desired_center
self.center_quality = 10
return obj_center
@pgcoordproperty
def desired_center(self):
# Pattern for all that want to read the FITS
t = FITSReaderPGD(self).desired_center
if t is not None:
return t
# Real calculation starts here
npshape = np.asarray(self.shape)
desired_center = npshape/2
return desired_center
@pgproperty
def center_quality(self):
# Pattern for all that want to read the FITS
t = FITSReaderPGD(self).center_quality
if t is not None:
return t
self.obj_center
class CenteredPGD(PGData):
"""Sets :prop:`obj_center` to :prop:`desired_center`"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._old_desired_center = None
@pgcoordproperty
def obj_center(self):
self.center_quality = 10
return self.desired_center
@pgcoordproperty
def desired_center(self):
return super().desired_center
@desired_center.setter
def desired_center(self, value):
# Reset obj_center for proper recalculation
del self.obj_center
return value
class OffsetPGD(PGData):
"""Offsets :prop:`obj_center` by :prop:`center_offset` Note:
proper object that determins center needs to be inherited before
this for sensible center: e.g.:
class MyPGD(PGDOffset, PGDCentered): pass"""
def __init__(self, *args,
center_offset=None,
**kwargs):
super().__init__(*args, **kwargs)
# Center offset is used in the calculation of a
# pgcoordproperty such that we want to delete that
# property for recalculation in the event center_offset
# changes. Particularly considering we end up in np.arrays
# and the += operator increments the object numpy object
# itself (e.g. pgd.center_offset += (10,10)), this is just a
# little too complex for the pgcoordproperty system to
# handle, so do it the olde fashionede waye
if center_offset is None:
center_offset = (0,0)
self._old_center_offset = center_offset
self.center_offset = center_offset
@pgcoordproperty
def _old_center_offset(self):
pass
@pgcoordproperty
def center_offset(self):
pass
@center_offset.setter
def center_offset(self, value):
if value is None:
value = (0,0)
value = np.asarray(value)
if not np.all(self._old_center_offset == value):
del self.obj_center
# This copy is key for avoiding subtle assign by reference bugs
self._old_center_offset = value.copy()
self.obj_center = None
return value
@pgcoordproperty
def obj_center(self):
return super().obj_center + self.center_offset
def _card_write(self):
"""Writes FITS header cards in *binned* coordinates (i.e., as they
would likely presesnt in image analysis software showing the image)
"""
# Note pythonic y, x coordinate ordering
self.meta['OFF_CR0'] = (self.center_offset[1], 'Offset from '
'orig obj center X')
self.meta['OFF_CR1'] = (self.center_offset[0], 'Offset from '
'orig obj center Y')
super()._card_write()
class MaxPGD(PGData):
@pgcoordproperty
def obj_center(self):
self.center_quality = 6
obj_center = np.unravel_index(np.argmax(self), self.shape)
return obj_center
# --> Still working on this. Looks like I am going to need a
# readnoise property in the object if I really want to do this. Maybe
# we need a readnoise_keyword or something like that.... Very camera
# specific
# --> this could be property in pgdata and use biweight_local
class BackgroundPGD(PGData):
"""Hold background. Might need this to expand to hold other
things. Util is a bit too general a name, but that is sort of
what I would envision"""
@pgproperty
def background(self):
return np.median(self)
class CenterOfMassPGD(BackgroundPGD):
"""Use simple center-of-mass calculation to find center of
brightest object. Works best if pixels not likely to be part of
the source are set to zero."""
@pgcoordproperty
def obj_center(self):
#bsub = self.subtract(self.background, handle_meta='first_found')
#return center_of_mass(bsub.data)
bsub = self.data - self.background
return center_of_mass(bsub)
class BrightestPGD(PGData):
def __init__(self, *args,
seeing=2,
**kwargs):
super().__init__(*args, **kwargs)
self.seeing = seeing
@pgcoordproperty
def obj_center(self):
# https://keflavich-astropy.readthedocs.io/en/convolve_fft_profiling/convolution/performance.html
# suggests that astropy convolv is only needed when their are NaNs
# Still working on this
return super().obj_center + self.center_offset
class MaxImPGD(PGData):
"""Handles MaxIM DL :prop:`binning` and :prop:`subframe_origin`"""
# It is important to just read these once, since we may modify
# them in the object before write
@pgcoordproperty
def binning(self):
"""Image binning in Y,X order"""
binning = (self.meta['YBINNING'],
self.meta['XBINNING'])
binning = np.asarray(binning)
return binning
@pgcoordproperty
def subframe_origin(self):
"""Subframe origin in *unbinned* pixels with full CCD origin = (0,0). Y,X order"""
subframe_origin = (self.meta['YORGSUBF'],
self.meta['XORGSUBF'])
subframe_origin = np.asarray(subframe_origin)
subframe_origin *= self.binning
return subframe_origin
def _card_write(self):
"""Write FITS card unique to MaxIMPGD"""
# Note pythonic y, x coordinate ordering
self.meta['XBINNING'] = self.binning[1]
self.meta['YBINNING'] = self.binning[0]
self.meta['XORGSUBF'] = self.subframe_origin[1]
self.meta['YORGSUBF'] = self.subframe_origin[0]
super()._card_write()
def write(self, *args, **kwargs):
self._card_write()
super().write(*args, **kwargs)
if __name__ == "__main__":
log.setLevel('DEBUG')
#pgc = PGCenter()
#pgc.obj_center = (1,1)
#print(pgc.obj_center)
##pgc.obj_center = 1
###pgc = PGCenter((1,2), (3,4))
#pgc = PGCenter([1,2], (3,4))
#print(pgc.obj_center, pgc.desired_center)
#pgd = PGData()
fname = '/data/io/IoIO/raw/2020-07-15/HD87696-0016_Na_off.fit'
class MyPGD(OffsetPGD, CenteredPGD, PGData):
pass
#pgd = PGData.read(fname)
#print(pgd.obj_center, pgd.desired_center)
#pgd.obj_center = (1,1)
#print(pgd.obj_center, pgd.desired_center)
#pgd = PGData.read(fname, obj_center=(2,2))
#print(pgd.obj_center, pgd.desired_center)
#pgd.desired_center = (1,1)
#print(pgd.obj_center, pgd.desired_center)
#del pgd.desired_center
#print(pgd.obj_center, pgd.desired_center)
#pgd.obj_center = None
#print(pgd.obj_center, pgd.desired_center)
#
#pgd = CenteredPGD.read(fname)
#print(pgd.obj_center, pgd.desired_center)
#pgd = MyPGD.read(fname)
#print(pgd.obj_center, pgd.desired_center, pgd.center_offset)
#pgd.center_offset += np.asarray((10, -10))
#pgd.center_offset += (10, -10)
#print(pgd.obj_center, pgd.desired_center, pgd.center_offset)
#pgd.center_offset = None
#pgd.desired_center = (3,3)
#pgd.center_offset += (10, -30)
#pgd.center_offset = (10, -10)
#print(pgd.obj_center, pgd.desired_center, pgd.center_offset)
#pgd.obj_center = (0,0)
#print(pgd.obj_center, pgd.desired_center, pgd.center_offset)
#pgd.obj_center = None
##del pgd.obj_center
###pgd.calculated_center = (0,0)
#print(pgd.obj_center, pgd.desired_center, pgd.center_offset)
#pgd.center_offset = pgd.center_offset + (10, -10)
#print(pgd.obj_center, pgd.desired_center, pgd.center_offset)
#pgd.center_offset += (10, -30)
#print(pgd.obj_center, pgd.desired_center, pgd.center_offset)
#pgd.write('/tmp/testfits.fits', overwrite=True)
#class MyPGD(OffsetPGD, CenteredPGD, PGData):
class MyPGD(OffsetPGD, CenteredPGD, MaxImPGD):#, SX694):
pass
#pgd = MyPGD.read(fname)
#pgd.center_offset += (10, -30)
#print(pgd.obj_center, pgd.desired_center, pgd.center_offset)
#pgd.write('/tmp/testfits.fits', overwrite=True)
#pgd = MyPGD(pgd)
#print(pgd.obj_center, pgd.desired_center, pgd.center_offset)
#pgd = MyPGD(pgd.data)
#print(pgd.obj_center, pgd.desired_center, pgd.center_offset)
#pgd = Ex.read(fname)
#print(pgd.obj_center, pgd.desired_center)
#pgd = FITSReaderPGD.read('/tmp/testfits.fits')
#print(pgd.obj_center, pgd.desired_center)
#
#pgd = ExampleFITSReaderPGD.read('/tmp/testfits.fits')
#print(pgd.obj_center, pgd.desired_center, pgd.center_quality)
#
#pgd = ExampleFITSReaderPGD.read(fname)
#print(pgd.obj_center, pgd.desired_center, pgd.center_quality)
#pgd = CenterOfMassPGD.read(fname)
#print(pgd.obj_center, pgd.desired_center, pgd.center_quality)
#
#off_filt = '/data/io/IoIO/raw/2018-01-28/R-band_off_ND_filter.fit'
#pgd = CenterOfMassPGD.read(off_filt)
#print(pgd.obj_center, pgd.desired_center, pgd.center_quality)
#pgd.obj_center = (1,1)
#pgd.desired_center = (1,1)
#print(pgd.obj_center, pgd.desired_center)
#del pgd.desired_center
#print(pgd.obj_center, pgd.desired_center)
#pgd.center_offset = (20. -10)
#print(pgd.obj_center, pgd.desired_center, pgd.center_offset)
#log.setLevel('DEBUG')
#pgd = MaxPGD.read(fname)
#print(pgd.obj_center)
#wname = '/tmp/test_kwd_write.fits'
#pgd.write(wname, overwrite=True)
#pgd = MaxPGD.read(wname)
#print(pgd.obj_center, pgd.desired_center)
#pgd = PGData.read(wname, recalculate=True)
#print(pgd.obj_center, pgd.desired_center)
#class MyPGD(OffsetPGD, MaxPGD):
# pass
##pgd = MyPGD.read(fname, center_offset=(-10,20))
##print(pgd.obj_center, pgd.desired_center, pgd.center_offset)
#
#bname = '/data/io/IoIO/reduced/Calibration/2020-07-07_ccdT_-10.3_bias_combined.fits'
#bpgd = MyPGD.read(bname)
#epgd = MyPGD.read(bname, unit='adu')
#ccd = FbuCCDData.read(bname)
#pgd = MyPGD.read(fname)
#print(pgd.desired_center)
#
#ccd = CCDData.read(fname, unit='adu')
#
#rpgd = PGData(ccd.data, meta=ccd.meta)
#print(pgd.desired_center)
#
#pgd = PGData.read(fname)
#print(pgd.desired_center)
#
#pgd = PGData.read(fname, desired_center=(2,2))
#print(pgd.desired_center)
#
#dspgd = PGData.read(fname, desired_center=(100,100))
#print(dspgd.obj_center, dspgd.desired_center)
#
#dspgd.write('/tmp/test.fits', overwrite=True)
#
#pgdc = PGDCentered.read(fname)
#print(pgdc.obj_center, pgdc.desired_center)
#
#pgdo = PGDOffset.read(fname)
#print(pgdo.obj_center, pgdo.desired_center)
#
#pgdo = PGDOffset(pgd.data, meta=pgd.meta, center_offset=(20,10))
#print(pgdo.obj_center, pgdo.desired_center)
#
#pgdo = PGDOffset.read(fname, center_offset=(20,10))
#print(pgdo.obj_center, pgdo.desired_center)
#
#class MyPGD(PGDOffset, PGDCentered):
# pass
#
#mpgd = MyPGD.read(fname, center_offset=(20,10))
#print(mpgd.obj_center, mpgd.desired_center)
#
#print('done')
#bname = '/data/io/IoIO/reduced/Calibration/2020-07-07_ccdT_-10.3_bias_combined.fits'
##ccd = CCDData.read(bname)
#
#fname1 = '/data/Mercury/raw/2020-05-27/Mercury-0005_Na-on.fit'
##ccd = CCDData.read(fname1)
#ccd = FbuCCDData.read(fname1, fallback_unit='adu')
##ccd = FbuCCDData.read(fname1, fallback_unit='aduu')
#
#ccd = FbuCCDData.read(fname1, unit='electron')
fname1 = '/data/Mercury/raw/2020-05-27/Mercury-0005_Na-on.fit'
pgd = CenterOfMassPGD.read(fname1)
print(pgd.obj_center, pgd.desired_center)
fname1 = '/data/IoIO/raw/20210310/HD 132052-S001-R001-C002-R.fts'
pgd = MaxImPGD.read(fname1)
##print(pgd.meta)
#sub = pgd[0:100, 20:100]
#print(sub.subframe_origin)
#fname = '/data/IoIO/raw/2021-04_Astrometry/Main_Astrometry_East_of_Pier.fit'
#c = MaxImPGD.read(fname)
#print(c.binning)
#print(c.x_binned(6))
#print(c.x_binned(np.asarray((6, 12, 24, 48))))
#print(c.y_binned(6))
#print(c.y_binned(np.asarray((6, 12, 24, 48))))
| 38.470925 | 256 | 0.630107 |
75dba91333f195823c0a9019873052586c4eb969 | 7,127 | py | Python | meiduo_mall/meiduo_mall/settings/dev.py | amour-lee/MeiDuoProject | 72bae3886d5db79f63725d3aa1a6b4bad294572e | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/settings/dev.py | amour-lee/MeiDuoProject | 72bae3886d5db79f63725d3aa1a6b4bad294572e | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/settings/dev.py | amour-lee/MeiDuoProject | 72bae3886d5db79f63725d3aa1a6b4bad294572e | [
"MIT"
] | null | null | null | # 开发环境配置文件
"""
Django settings for meiduo_mall project.
Generated by 'django-admin startproject' using Django 1.11.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import sys
import datetime
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# 打印导包路径
# print(sys.path)
# 为了保证应用的注册和导包正常,需要追加导包路径执行'apps'
sys.path.insert(0, os.path.join(BASE_DIR, 'apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# sys.path(0, os.path.join(BASE_DIR, 'apps'))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n7wk_8a-=&!&(9^o_4!!@50gn3i=4=f7_fyhq95luau%d!oysq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'localhost', 'www.meiduo.site', 'api.meiduo.site']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework', # DRF
'corsheaders', # 解决JS跨域请求问题
'users.apps.UsersConfig', # 用户模块 由用户模型类限制的此种注册方式
'verifications.apps.VerificationsConfig', # 验证模块
'oauth.apps.OauthConfig', # QQ登录模块
]
MIDDLEWARE = [
# 放在最外层的原因,跨域的问题需要在请求一开始就得到解决,所以需要优先执行,中间件处理请求自上而下
'corsheaders.middleware.CorsMiddleware', # 最外层中间件
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'meiduo_mall.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'meiduo_mall.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': '127.0.0.1', # 数据库主机
'PORT': 3306, # 数据库端口
'USER': 'meiduo', # 数据库用户名
'PASSWORD': 'meiduo', # 数据库用户密码
'NAME': 'meiduo_mall', # 数据库名字
}
}
# 配置Redis作为缓存的后端数据库
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/0",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
"session": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
"verify_codes": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/2",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "session"
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
# 集成日志
LOGGING = {
'version': 1,
'disable_existing_loggers': False, # 是否禁用已经存在的日志器
'formatters': { # 日志信息显示的格式
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(lineno)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(module)s %(lineno)d %(message)s'
},
},
'filters': { # 对日志进行过滤
'require_debug_true': { # django在debug模式下才输出日志
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': { # 日志处理方法
'console': { # 向终端中输出日志
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'file': { # 向文件中输出日志
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(os.path.dirname(BASE_DIR), "logs/meiduo.log"), # 日志文件的位置
'maxBytes': 300 * 1024 * 1024,
'backupCount': 10,
'formatter': 'verbose'
},
},
'loggers': { # 日志器
'django': { # 定义了一个名为django的日志器
'handlers': ['console', 'file'], # 可以同时向终端与文件中输出日志
'propagate': True, # 是否继续传递日志信息
'level': 'INFO', # 日志器接收的最低日志级别
},
}
}
REST_FRAMEWORK = {
# 异常处理
'EXCEPTION_HANDLER': 'meiduo_mall.utils.exceptions.exception_handler',
# 认证(读取用户身份信息,判断当前的登录用户是否是本网站的用户)
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication', # JWT认证,在前面的认证方案优先
'rest_framework.authentication.SessionAuthentication', # session认证机制
'rest_framework.authentication.BasicAuthentication', # 基础认证
),
}
# JWT配置
JWT_AUTH = {
# 配置状态保持的有效期
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=1),
# 为JWT登录视图补充返回值
'JWT_RESPONSE_PAYLOAD_HANDLER': 'users.utils.jwt_response_payload_handler',
}
# 指定django用户认证后端
AUTHENTICATION_BACKENDS = [
'users.utils.UsernameMobileAuthBackend',
]
# 指定默认的用户模型类
# 注意点:语法规则必须是'应用名.用户模型类'
AUTH_USER_MODEL = 'users.User'
# CORS:白名单
CORS_ORIGIN_WHITELIST = (
'127.0.0.1:8080',
'localhost:8080',
'www.meiduo.site:8080',
'api.meiduo.site:8000',
)
CORS_ALLOW_CREDENTIALS = True # 允许携带cookie
# QQ登录参数
QQ_CLIENT_ID = '101474184'
QQ_CLIENT_SECRET = 'c6ce949e04e12ecc909ae6a8b09b637c'
QQ_REDIRECT_URI = 'http://www.meiduo.site:8080/oauth_callback.html'
| 27.20229 | 94 | 0.645152 |
bd762f81bf475eed4f2fa7fc384e51d93a4ec686 | 7,348 | py | Python | tasks/simplification.py | CartoDB/bigmetadata | a32325382500f23b8a607e4e02cc0ec111360869 | [
"BSD-3-Clause"
] | 45 | 2015-12-14T03:05:55.000Z | 2021-06-29T22:46:40.000Z | tasks/simplification.py | CartoDB/bigmetadata | a32325382500f23b8a607e4e02cc0ec111360869 | [
"BSD-3-Clause"
] | 480 | 2016-02-19T15:58:44.000Z | 2021-09-10T16:38:56.000Z | tasks/simplification.py | CartoDB/bigmetadata | a32325382500f23b8a607e4e02cc0ec111360869 | [
"BSD-3-Clause"
] | 13 | 2016-08-09T21:03:02.000Z | 2020-04-29T23:40:20.000Z | import os
from luigi import Task, Parameter, LocalTarget
from .util import shell
from .targets import PostgresTarget
from .meta import CurrentSession
OBSERVATORY_SCHEMA = 'observatory'
DEFAULT_GEOMFIELD = 'the_geom'
TMP_DIRECTORY = 'tmp'
SIMPLIFICATION_DIRECTORY = 'simplification'
SIMPLIFIED_SUFFIX = '_simpl'
SKIPFAILURES_NO = 'no' # Avoids https://trac.osgeo.org/gdal/ticket/6803
SKIPFAILURES_YES = 'yes'
DEFAULT_P_RETAIN_FACTOR_MAPSHAPER = '10'
DEFAULT_P_RETAIN_FACTOR_POSTGIS = '50' # Retain factors used for simplification (this is NOT a percentage) \
# The higher the retain factor, the lower the simplification
DEFAULT_MAX_MEMORY = '8192'
def tmp_directory(schema, table):
return os.path.join(TMP_DIRECTORY, SIMPLIFICATION_DIRECTORY,
'{schema}.{table}'.format(schema=schema, table=table))
def shp_filename(table, suffix=''):
return '{table}{suffix}.shp'.format(table=table, suffix=suffix)
class ExportShapefile(Task):
schema = Parameter()
table = Parameter()
skipfailures = Parameter(default=SKIPFAILURES_NO)
def run(self):
with self.output().temporary_path() as temp_path:
self.output().fs.mkdir(temp_path)
cmd = 'ogr2ogr -f "ESRI Shapefile" {shapefile} ' \
'{skipfailures} ' \
'"PG:dbname=$PGDATABASE active_schema={schema}" {table} -nlt MultiPolygon'.format(
shapefile=os.path.join(temp_path, shp_filename(self.table)), schema=self.schema, table=self.table,
skipfailures='-skipfailures' if self.skipfailures.lower() == SKIPFAILURES_YES else '')
shell(cmd)
def output(self):
return LocalTarget(tmp_directory(self.schema, self.table))
class SimplifyShapefile(Task):
schema = Parameter()
table_input = Parameter()
table_output = Parameter()
geomfield = Parameter(default=DEFAULT_GEOMFIELD)
retainfactor = Parameter(default=DEFAULT_P_RETAIN_FACTOR_MAPSHAPER)
skipfailures = Parameter(default=SKIPFAILURES_NO)
maxmemory = Parameter(default=DEFAULT_MAX_MEMORY)
def requires(self):
return ExportShapefile(schema=self.schema, table=self.table_input, skipfailures=self.skipfailures)
def run(self):
factor = simplification_factor(self.schema, self.table_input, self.geomfield, self.retainfactor)
with self.output().temporary_path() as temp_path:
self.output().fs.mkdir(temp_path)
cmd = 'node --max-old-space-size={maxmemory} `which mapshaper` ' \
'{input} snap -simplify interval={interval} planar keep-shapes -o {output}'.format(
maxmemory=self.maxmemory,
input=os.path.join(self.input().path, shp_filename(self.table_input)),
interval=factor,
output=os.path.join(temp_path, shp_filename(self.table_output)))
shell(cmd)
def output(self):
return LocalTarget(tmp_directory(self.schema, self.table_output))
class SimplifyGeometriesMapshaper(Task):
schema = Parameter()
table_input = Parameter()
table_output = Parameter(default='')
geomfield = Parameter(default=DEFAULT_GEOMFIELD)
retainfactor = Parameter(default=DEFAULT_P_RETAIN_FACTOR_MAPSHAPER)
skipfailures = Parameter(default=SKIPFAILURES_NO)
maxmemory = Parameter(default=DEFAULT_MAX_MEMORY)
def __init__(self, *args, **kwargs):
super(SimplifyGeometriesMapshaper, self).__init__(*args, **kwargs)
def requires(self):
return SimplifyShapefile(schema=self.schema, table_input=self.table_input, table_output=self.table_output,
geomfield=self.geomfield, retainfactor=self.retainfactor,
skipfailures=self.skipfailures, maxmemory=self.maxmemory)
def run(self):
cmd = 'PG_USE_COPY=yes ' \
'ogr2ogr -f PostgreSQL "PG:dbname=$PGDATABASE active_schema={schema}" ' \
'-t_srs "EPSG:4326" -nlt MultiPolygon -nln {table} ' \
'-lco OVERWRITE=yes -lco PRECISION=no -lco GEOMETRY_NAME={geomfield} ' \
'-lco SCHEMA={schema} {shp_path} '.format(
schema=self.output().schema,
table=self.output().tablename,
geomfield=self.geomfield,
shp_path=os.path.join(self.input().path, shp_filename(self.table_output)))
shell(cmd)
session = CurrentSession().get()
session.execute('UPDATE "{schema}".{table} '
'SET {geomfield}=ST_CollectionExtract(ST_MakeValid({geomfield}), 3)'.format(
schema=self.output().schema, table=self.output().tablename, geomfield=self.geomfield))
session.commit()
def output(self):
return PostgresTarget(self.schema, self.table_output)
def simplification_factor(schema, table, geomfield, divisor_power):
session = CurrentSession().get()
return session.execute('SELECT '
'AVG(ST_Perimeter({geomfield}) / ST_NPoints({geomfield})) / 10 ^ ({divisor}::Decimal / 10) '
'FROM "{schema}".{table} WHERE ST_NPoints({geomfield}) > 0'.format(
schema=schema, table=table, geomfield=geomfield, divisor=divisor_power
)).fetchone()[0]
class SimplifyGeometriesPostGIS(Task):
schema = Parameter()
table_input = Parameter()
table_output = Parameter()
geomfield = Parameter(default=DEFAULT_GEOMFIELD)
retainfactor = Parameter(default=DEFAULT_P_RETAIN_FACTOR_POSTGIS)
def __init__(self, *args, **kwargs):
super(SimplifyGeometriesPostGIS, self).__init__(*args, **kwargs)
def run(self):
session = CurrentSession().get()
columns = session.execute("SELECT column_name "
"FROM information_schema.columns "
"WHERE table_schema = '{schema}' "
"AND table_name = '{table}'".format(
schema=self.schema, table=self.table_input.lower())).fetchall()
factor = simplification_factor(self.schema, self.table_input, self.geomfield, self.retainfactor)
simplified_geomfield = 'ST_CollectionExtract(ST_MakeValid(ST_SimplifyVW({geomfield}, {factor})), 3) ' \
'{geomfield}'.format(geomfield=self.geomfield, factor=factor)
session.execute('CREATE TABLE "{schema}".{table_output} '
'AS SELECT {fields} '
'FROM "{schema}".{table_in} '.format(
schema=self.output().schema, table_in=self.table_input,
table_output=self.output().tablename,
fields=', '.join([x[0] if x[0] != self.geomfield else simplified_geomfield
for x in columns])))
session.commit()
session.execute('CREATE INDEX {table_output}_{geomfield}_geo ON '
'"{schema}".{table_output} USING GIST ({geomfield})'.format(
table_output=self.output().tablename, geomfield=self.geomfield, schema=self.output().schema))
def output(self):
return PostgresTarget(self.schema, self.table_output)
| 45.079755 | 121 | 0.633506 |
5ec586fd92c852bd84f12a7089d0cb6ea4ad8b5f | 13,314 | py | Python | constrained_language_typology/vanilla_reader_main.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 23,901 | 2018-10-04T19:48:53.000Z | 2022-03-31T21:27:42.000Z | constrained_language_typology/vanilla_reader_main.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 891 | 2018-11-10T06:16:13.000Z | 2022-03-31T10:42:34.000Z | constrained_language_typology/vanilla_reader_main.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 6,047 | 2018-10-12T06:31:02.000Z | 2022-03-31T13:59:28.000Z | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Utilities for loading vanilla WALS distribution.
The WALS files are distributed here:
https://github.com/cldf-datasets/wals
Example:
--------
Clone the GitHub WALS data to WALS_DIR. Then run:
> WALS_DIR=...
> python3 vanilla_reader_main.py \
--wals_dir ${WALS_DIR} \
--output_dir ${OUTPUT_DIR}
The above will create "wals.csv" files converted from the CLDF format
provided by WALS. Our models should be able to injest these csv files.
By default this exports everything as strings. To change this behavior
and exports categorical variables as ints, please pass
--categorical_as_ints flag.
Note: It looks like the "countrycodes" information is not provided by WALS,
possibly coming from other sources, such as Glottolog. Leaving this column
empty for now.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import os
from absl import app
from absl import flags
from absl import logging
import constants as const
import data_info as data_lib
import numpy as np
import pandas as pd
flags.DEFINE_string(
"wals_dir", "", "Root directory of WALS distribution.")
flags.DEFINE_string(
"output_dir", "",
"Output directory for preprocessed files.")
flags.DEFINE_bool(
"categorical_as_ints", False,
"Encode all the categorical features as ints.")
FLAGS = flags.FLAGS
# Names of the data files, also serving as keys into the global data dictionary.
_DATA_LANGUAGE = "languages"
_DATA_CODES = "codes"
_DATA_VALUES = "values"
_DATA_PARAMS = "parameters"
# Language column names.
_LANGUAGE_COLUMN_NAMES = [
"ISO639P3code", "Glottocode", "Name", "Latitude", "Longitude", "Genus",
"Family", "Subfamily", "Macroarea", "ISO_codes"
]
# Parameter ID key used to denote feature name ID.
_PARAM_ID = "Parameter_ID"
# Name of the output dataset.
_DATASET_NAME = "wals"
def _cldf_dir(wals_dir):
"""Returns directory where the cldf files reside."""
return os.path.join(wals_dir, "cldf")
def _build_df(wals_dir, filename):
"""Reads data file in csv format into a dictionary."""
path = os.path.join(_cldf_dir(wals_dir), filename)
logging.info("Reading \"%s\" ...", path)
df = pd.read_csv(path, encoding=const.ENCODING)
df = df.to_dict("index")
logging.info("Read %d elements.", len(df))
return df
def _get_languages(data):
"""Builds dictionary of languages."""
languages = collections.defaultdict(lambda: collections.defaultdict(int))
for lang_id in range(len(data[_DATA_LANGUAGE])):
for col_id in _LANGUAGE_COLUMN_NAMES:
lang_data = data[_DATA_LANGUAGE][lang_id]
languages[lang_data["ID"]][col_id] = lang_data[col_id]
logging.info("Collected %d languages.", len(languages))
return languages
def _get_feature_names(codes):
"""Builds feature names dictionary from the codes table.
Runs over all the feature value types and their names and collects a
mapping from all the feature IDs to their possible values. The
corresponding code table entries in data look as follows:
ID,Parameter_ID,Name,Description,Number,icon
...
5A-3,5A,Missing /p/,Missing /p/,3,cdd0000
5A-4,5A,Missing /g/,Missing /g/,4,c0000dd
...
Resulting dictionary:
...
144Q -> {'n_values': 4, 'Names': [
'NoDoubleNeg', 'OptDoubleNeg', 'OnlyWithAnotherNeg', 'No SNegOV']}
...
Args:
codes: (list) List of language codes (strings).
Returns:
Dictionary containing the mapping of features codes to their values.
"""
feat2values = collections.defaultdict(lambda: collections.defaultdict(int))
current_feature = codes[0][_PARAM_ID] # 1A
current_feature_names = []
current_value_codes = []
num_values = 0
for code_id in range(len(codes)):
code_info = codes[code_id]
feature_value = code_info["Name"]
if code_info[_PARAM_ID] == current_feature:
current_feature_names.append(feature_value)
current_value_codes.append(code_info["ID"])
feat2values[code_info[_PARAM_ID]]["num_values"] += 1
num_values += 1
else:
prev_feature = feat2values[codes[code_id - 1][_PARAM_ID]]
prev_feature["Names"] = current_feature_names
prev_feature["Codes"] = current_value_codes
current_feature_names = [feature_value]
current_value_codes = [code_info["ID"]]
feat2values[code_info[_PARAM_ID]]["num_values"] += 1
num_values += 1
if code_id == len(codes) - 1:
# Last feature value.
feature = feat2values[code_info[_PARAM_ID]]
feature["Names"] = current_feature_names
feature["Codes"] = current_value_codes
current_feature = code_info[_PARAM_ID]
logging.info("Collected %d feature names (%d supported values).",
len(feat2values), num_values)
return feat2values
def _fill_feature_values(data_values, feature_names, languages):
"""Adds feature values to languages dictionary."""
logging.info("Filling feature values for languages ...")
for value_id in range(len(data_values)):
values = data_values[value_id]
feature_name = values[_PARAM_ID]
cur_value_code = feature_name + "-" + str(values["Value"])
if cur_value_code != values["Code_ID"]:
raise ValueError("Invalid value code: %s" % cur_value_code)
val = feature_names[feature_name]["Codes"].index(cur_value_code) + 1
if val != values["Value"]:
raise ValueError("Invalid value: %s" % val)
languages[values["Language_ID"]][feature_name] = val
def _get_areas(params, feature_names):
"""Returns mapping between areas and constituent feature types."""
areas = {}
for feature_id in range(len(params)):
feature_info = params[feature_id]
feature = feature_info["ID"]
area = feature_info["Area"]
if area not in areas:
areas[area] = [feature]
else:
areas[area].append(feature)
# Update the mapping from ID to name.
feature_names[feature]["Name"] = feature_info["Name"]
logging.info("Collected %d areas (%s).", len(areas), ",".join(areas.keys()))
return areas
def _fill_feature_stats(languages, feature_names, areas):
"""Fills in basic feature statistics given the features and their areas."""
total_num_features = len(feature_names)
for lang_id in languages:
language = languages[lang_id]
# Measure of inverse feature sparsity: percentage of populated features.
language["%"] = ((len(language) - len(_LANGUAGE_COLUMN_NAMES)) *
100.0 / total_num_features)
# Fill in area statistics.
for area in areas:
count = 0
features = areas[area]
for feature in features:
# Following will insert a 0-valued feature if it is not found.
if language[feature] != 0:
count += 1
language[area] = count / len(features)
def _get_feature_types(areas, feature_names):
"""Fills in some statistics on feature types (aka areas)."""
types = collections.defaultdict(lambda: collections.defaultdict(int))
for area in areas:
features = areas[area]
feature_type = types[area]
feature_type["num_features"] = len(features)
for feature in features:
feature_info = feature_names[feature]
feature_type["num_values"] += feature_info["num_values"]
feature_info["emb_dim"] = max(1, int(max(1, np.floor(
(feature_info["num_values"] + 1) / 10.0))))
feature_type["total_dim"] += feature_info["emb_dim"]
return types
def _read(wals_dir):
"""Read vanilla WALS dataset from the supplied directory."""
logging.info("Reading WALS from a root directory \"%s\" ...", wals_dir)
# Read in all the relevant data files.
datafile_names = [_DATA_LANGUAGE, _DATA_CODES, _DATA_VALUES, _DATA_PARAMS]
data = {}
for name in datafile_names:
data[name] = _build_df(wals_dir, name + ".csv")
# Build dictionaries.
languages = _get_languages(data)
feature_names = _get_feature_names(data[_DATA_CODES])
areas = _get_areas(data[_DATA_PARAMS], feature_names)
feature_types = _get_feature_types(areas, feature_names)
_fill_feature_values(data[_DATA_VALUES], feature_names, languages)
_fill_feature_stats(languages, feature_names, areas)
return data, languages, areas, feature_names, feature_types
def _prepare_data_info(mappings):
"""Prepares data info mappings."""
# Prepare the container.
_, languages, _, feature_names_dict, _ = mappings
data_info = {}
data_info[const.DATA_KEY_FEATURES] = {}
features = data_info[const.DATA_KEY_FEATURES]
data_info[const.DATA_KEY_GENERA] = []
genera = data_info[const.DATA_KEY_GENERA]
data_info[const.DATA_KEY_FAMILIES] = []
families = data_info[const.DATA_KEY_FAMILIES]
# Fill in data mappings.
for lang_id in languages.keys():
# Genera and families.
language = languages[lang_id]
if language["Genus"] not in genera:
genera.append(language["Genus"])
if language["Family"] not in families:
families.append(language["Family"])
# Actual features.
feature_ids = list(feature_names_dict.keys())
feature_names = [
feature_names_dict[name]["Name"] for name in feature_ids]
feature_names = [name.replace(" ", "_") for name in feature_names]
for i in range(len(feature_ids)):
feature_id = feature_ids[i]
name = feature_names[i]
features[name] = feature_names_dict[feature_id]["Names"]
features[name].sort(key=str)
# Postprocess.
genera.sort(key=str)
families.sort(key=str)
return data_info
def _make_df(mappings, categorical_as_ints=False):
"""Converts WALS mappings to data frame."""
# Prepare the core columns.
_, languages, _, feature_names_dict, _ = mappings
data_info = _prepare_data_info(mappings)
wals_codes = sorted(languages.keys(), key=str)
names = [languages[code]["Name"] for code in wals_codes]
latitudes = [languages[code]["Latitude"] for code in wals_codes]
longitudes = [languages[code]["Longitude"] for code in wals_codes]
countries = [np.nan for code in wals_codes]
genera = [languages[code]["Genus"] for code in wals_codes]
if categorical_as_ints:
genera_names = data_info[const.DATA_KEY_GENERA]
genera = [genera_names.index(name) + 1 for name in genera]
families = [languages[code]["Family"] for code in wals_codes]
if categorical_as_ints:
families_names = data_info[const.DATA_KEY_FAMILIES]
families = [families_names.index(name) + 1 for name in families]
# Prepare feature columns.
feature_ids = list(feature_names_dict.keys())
feature_names = [
feature_names_dict[name]["Name"] for name in feature_ids]
feature_names = [name.replace(" ", "_") for name in feature_names]
all_feature_values = []
for i in range(len(feature_ids)):
feature_id = feature_ids[i]
feature_name = feature_names[i]
feature_values = []
data_info_feature_vals = data_info[const.DATA_KEY_FEATURES][feature_name]
for code in wals_codes:
language = languages[code]
assert feature_id in language
val = language[feature_id]
if val == 0: # Missing marker.
value_name = np.nan
else:
value_name = feature_names_dict[feature_id]["Names"][val - 1]
if categorical_as_ints:
value_name = data_info_feature_vals.index(value_name) + 1
feature_values.append(value_name)
all_feature_values.append(feature_values)
# Create dataframe.
columns = ["wals_code", "name", "latitude", "longitude", "genus", "family",
"countrycodes"]
for feature_id in range(len(feature_ids)):
columns.append(feature_names[feature_id])
data = {
"wals_code": wals_codes, "name": names, "latitude": latitudes,
"longitude": longitudes, "genus": genera, "family": families,
"countrycodes": countries
}
for feature_id in range(len(feature_ids)):
data[feature_names[feature_id]] = all_feature_values[feature_id]
return pd.DataFrame(data, columns=columns), data_info
def main(unused_argv):
if not FLAGS.wals_dir:
raise ValueError("Specify --wals_dir!")
if not FLAGS.output_dir:
raise ValueError("Specify --output_dir!")
logging.info("Preparing dataset ...")
df, data_info = _make_df(_read(FLAGS.wals_dir),
categorical_as_ints=FLAGS.categorical_as_ints)
output_file = os.path.join(FLAGS.output_dir, _DATASET_NAME + ".csv")
logging.info("Saving dataset to \"%s\" ...", output_file)
df.to_csv(output_file, sep="|", index=False, float_format="%g")
logging.info("Saved %d languages.", len(df))
output_file = os.path.join(
FLAGS.output_dir,
const.DATA_INFO_FILENAME + "_" + _DATASET_NAME +
data_lib.FILE_EXTENSION)
data_lib.write_data_info(output_file, data_info)
if __name__ == "__main__":
app.run(main)
| 34.314433 | 80 | 0.709704 |
118d17b45e9ab23aac0dd9b3a14187de23202d45 | 13,920 | py | Python | scripts/render_core.py | tomuram/mb_aligner_adisuissa | eaaa81fd7bc9ffc58c2683dc6cca8e2d5e90c11a | [
"MIT"
] | null | null | null | scripts/render_core.py | tomuram/mb_aligner_adisuissa | eaaa81fd7bc9ffc58c2683dc6cca8e2d5e90c11a | [
"MIT"
] | null | null | null | scripts/render_core.py | tomuram/mb_aligner_adisuissa | eaaa81fd7bc9ffc58c2683dc6cca8e2d5e90c11a | [
"MIT"
] | 3 | 2020-04-28T05:23:09.000Z | 2021-04-13T20:10:00.000Z | from rh_renderer.tilespec_renderer import TilespecRenderer
from rh_renderer.multiple_tiles_renderer import BlendType
from rh_renderer import models
#from rh_renderer.hist_matcher import HistMatcher
#import rh_renderer.normalization.hist_adjuster
#from rh_aligner.common.bounding_box import BoundingBox
import cv2
import argparse
import numpy as np
import time
import ujson
import os
import sys
import math
#from rh_renderer.normalization.histogram_diff_minimization import HistogramDiffMinimization
from rh_renderer.normalization.histogram_clahe import HistogramCLAHE, HistogramGB11CLAHE
import common
def pad_image(img, from_x, from_y, start_point):
"""Pads the image (zeros) that starts from start_point (returned from the renderer), to (from_x, from_y)"""
# Note that start_point is (y, x)
if start_point[0] == from_y and start_point[1] == from_x:
# Nothing to pad, return the image as is
return img
full_height_width = (img.shape + np.array([start_point[1] - from_y, start_point[0] - from_x])).astype(int)
full_img = np.zeros(full_height_width, dtype=img.dtype)
full_img[int(start_point[1] - from_y):int(start_point[1] - from_y + img.shape[0]), int(start_point[0] - from_x):int(start_point[0] -from_x + img.shape[1])] = img
return full_img
#def render_tilespec(tile_fname, output, scale, output_type, in_bbox, tile_size, invert_image, threads_num=1, empty_placeholder=False, hist_adjuster_fname=None, hist_adjuster_alg_type=None, from_to_cols_rows=None):
def render_tilespec(tile_fname, output, scale, output_type, in_bbox, tile_size, invert_image, threads_num=1, empty_placeholder=False, hist_adjuster_alg_type=None, from_to_cols_rows=None, blend_type=BlendType.MULTI_BAND_SEAM):
"""Renders a given tilespec.
If the in_bbox to_x/to_y values are -1, uses the tilespecs to determine the output size.
If tile_size is 0, the output will be a single image, otherwise multiple tiles will be created.
output is either a single filename to save the output in (using the output_type),
or a prefix for the tiles output, which will be of the form: {prefix}_tr%d-tc%d.{output_type}
and the row (tr) and column (tc) values will be one-based."""
start_time = time.time()
# Determine the output shape
if in_bbox[1] == -1 or in_bbox[3] == -1:
image_bbox = common.read_bboxes_grep(tile_fname)
image_bbox[0] = max(image_bbox[0], in_bbox[0])
image_bbox[2] = max(image_bbox[2], in_bbox[2])
if in_bbox[1] > 0:
image_bbox[1] = in_bbox[1]
if in_bbox[3] > 0:
image_bbox[3] = in_bbox[3]
else:
image_bbox = in_bbox
scaled_bbox = [
int(math.floor(image_bbox[0] * scale)),
int(math.ceil(image_bbox[1] * scale)),
int(math.floor(image_bbox[2] * scale)),
int(math.ceil(image_bbox[3] * scale))
]
# Set the post-scale out shape of the image
out_shape = (scaled_bbox[1] - scaled_bbox[0], scaled_bbox[3] - scaled_bbox[2])
print("Final out_shape for the image: {}".format(out_shape))
hist_adjuster = None
# if hist_adjuster_fname is not None:
# #reference_histogram = HistMatcher(histogram_fname=reference_histogram_fname, saturate_low_pct=0.001, saturate_high_pct=0.001)
# #reference_histogram = HistMatcher(histogram_fname=reference_histogram_fname)
# hist_adjuster = rh_renderer.normalization.hist_adjuster.load_adjuster(hist_adjuster_fname)
# elif hist_adjuster_alg_type is not None:
# if hist_adjuster_alg_type.upper() == 'CLAHE':
# hist_adjuster = HistogramCLAHE()
if hist_adjuster_alg_type is not None:
if hist_adjuster_alg_type.upper() == 'CLAHE':
hist_adjuster = HistogramCLAHE()
if hist_adjuster_alg_type.upper() == 'GB11CLAHE':
hist_adjuster = HistogramGB11CLAHE()
with open(tile_fname, 'r') as data:
tilespec = ujson.load(data)
renderer = TilespecRenderer(tilespec, hist_adjuster=hist_adjuster, dynamic=(scale != 1.0), blend_type=blend_type)
# FOR THE IARPA latest dataset
# trans_model = models.AffineModel(np.array([
# [1., 0., 13679.],
# [0., 1., 2108.],
# [0., 0., 1.]
# ]))
# renderer.add_transformation(trans_model)
# Add the downsampling transformation
if scale != 1.0:
downsample = models.AffineModel(np.array([
[scale, 0., 0.],
[0., scale, 0.],
[0., 0., 1.]
]))
renderer.add_transformation(downsample)
# FOR THE IARPA R2B1 first dataset
# rot = 0.29441193975
# rot_model = models.AffineModel(np.array([
# [math.cos(rot), -math.sin(rot), 0.],
# [math.sin(rot), math.cos(rot), 0.],
# [0., 0., 1.]
# ]))
# renderer.add_transformation(rot_model)
# # FOR THE IARPA R2B1 Layers_1_2 dataset
# rot = 0.3490658504
# rot_model = models.AffineModel(np.array([
# [math.cos(rot), -math.sin(rot), 0.],
# [math.sin(rot), math.cos(rot), 0.],
# [0., 0., 1.]
# ]))
# renderer.add_transformation(rot_model)
if tile_size == 0:
# no tiles, just render a single file
out_fname = "{}.{}".format(os.path.splitext(output)[0], output_type)
out_fname_empty = "{}_empty".format(out_fname)
# Render the image
img, start_point = renderer.crop(scaled_bbox[0], scaled_bbox[2], scaled_bbox[1] - 1, scaled_bbox[3] - 1)
print("Rendered cropped and downsampled version")
if empty_placeholder:
if img is None or np.all(img == 0):
# create the empty file, and return
print("saving empty image {}".format(out_fname_empty))
open(out_fname_empty, 'a').close()
print("Rendering and saving empty file {} took {} seconds.".format(out_fname_empty, time.time() - start_time))
return
if img is None:
# No actual image, set a blank image of the wanted size
img = np.zeros((out_shape[1], out_shape1[0]), dtype=np.uint8)
start_point = (0, 0)
print("Padding image")
img = pad_image(img, scaled_bbox[0], scaled_bbox[2], start_point)
if invert_image:
print("inverting image")
img = 255 - img
print("saving image {}".format(out_fname))
cv2.imwrite(out_fname, img)
else:
# Tile the image
rows = int(math.ceil(out_shape[1] / float(tile_size)))
cols = int(math.ceil(out_shape[0] / float(tile_size)))
from_row = 0
from_col = 0
to_row = rows
to_col = cols
if from_to_cols_rows is not None:
from_col, from_row, to_col, to_row = from_to_cols_rows
# Iterate over each row and column and save the tile
for cur_row in range(from_row, to_row):
from_y = scaled_bbox[2] + cur_row * tile_size
to_y = min(scaled_bbox[2] + (cur_row + 1) * tile_size, scaled_bbox[3])
for cur_col in range(from_col, to_col):
tile_start_time = time.time()
out_fname = "{}_tr{}-tc{}.{}".format(output, str(cur_row + 1), str(cur_col + 1), output_type)
out_fname_empty = "{}_empty".format(out_fname)
from_x = scaled_bbox[0] + cur_col * tile_size
to_x = min(scaled_bbox[0] + (cur_col + 1) * tile_size, scaled_bbox[1])
# Render the tile
img, start_point = renderer.crop(from_x, from_y, to_x - 1, to_y - 1)
print("Rendered cropped and downsampled version")
if empty_placeholder:
if img is None or np.all(img == 0):
# create the empty file, and return
print("saving empty image {}".format(out_fname_empty))
open(out_fname_empty, 'a').close()
continue
if img is None:
# No actual image, set a blank image of the wanted size
img = np.zeros((to_y - from_y, to_x - from_x), dtype=np.uint8)
start_point = (from_y, from_x)
print("Padding image")
img = pad_image(img, from_x, from_y, start_point)
if invert_image:
print("inverting image")
img = 255 - img
print("saving image {}".format(out_fname))
cv2.imwrite(out_fname, img)
print("single tile rendering and saving to {} took {} seconds.".format(out_fname, time.time() - tile_start_time))
print("Rendering and saving {} took {} seconds.".format(tile_fname, time.time() - start_time))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Renders a given tilespec to a file (or multiple files/tiles).\
Note that the images output sizes will be the (to_x - from_x, to_y - from_y) if given, or the entire image size.')
parser.add_argument('tilespec', metavar='tilespec', type=str,
help='the tilespec to render')
parser.add_argument('output', type=str,
help='the output filename (in case of a single image output), or a filename prefix (in case of tiled output)')
parser.add_argument('-t', '--threads_num', type=int,
help='the number of threads to use (default: 1) - not used at the moment',
default=1)
parser.add_argument('-s', '--scale', type=float,
help='the scale of the output images (default: 0.1)',
default=0.1)
parser.add_argument('--output_type', type=str,
help='the images output type (default: png)',
default='png')
parser.add_argument('--from_x', type=int,
help='the left coordinate, full res (default: 0)',
default=0)
parser.add_argument('--from_y', type=int,
help='the top coordinate, full res (default: 0)',
default=0)
parser.add_argument('--to_x', type=int,
help='the right coordinate, full res (default: full image)',
default=-1)
parser.add_argument('--to_y', type=int,
help='the bottom coordinate, full res (default: full image)',
default=-1)
parser.add_argument('--tile_size', type=int,
help='the size (square side) of each tile, post-scale (default: 0 - no tiles)',
default=0)
parser.add_argument('-i', '--invert_image', action='store_true',
help='store an inverted image')
parser.add_argument('-e', '--empty_placeholder', action='store_true',
help='store an empty file name (suffix will be "_empty"), when the tile/image has no data')
# parser.add_argument('--hist_adjuster', type=str,
# help='A location of a pkl file that containg the histogram adjuster object file to adjust the images with (default: None)',
# default=None)
parser.add_argument('--hist_adjuster_alg_type', type=str,
help='the type of algorithm to use for a general per-tile histogram normalization. Supported typed: CLAHE (default: None)',
default=None)
#parser.add_argument('--entire_image_bbox', type=str,
# help='the 2D pre-scale image bbox for the entire 3D image (default: use current section bbox)',
# default=None)
parser.add_argument('--from_to_cols_rows', type=str,
help='Only to be used with tiled output (the tile_size argument is set). The input includes 4 numbers separated by commas, \
in the form "from_col,from_row,to_col,to_row" and only the output tiles in the given range (including from, excluding to) will be saved. (default: None)',
default=None)
parser.add_argument('--blend_type', type=str,
help='The type of blending to use. Values = {} (default: MULTI_BAND_SEAM)'.format(BlendType.__members__.keys()),
default='MULTI_BAND_SEAM')
args = parser.parse_args()
print(args)
blend_type = BlendType[args.blend_type]
from_to_cols_rows = None
if args.from_to_cols_rows is not None:
assert(args.tile_size > 0)
from_to_cols_rows = [int(i) for i in args.from_to_cols_rows.split(',')]
assert(len(from_to_cols_rows) == 4)
render_tilespec(args.tilespec, args.output, args.scale, args.output_type,
(args.from_x, args.to_x, args.from_y, args.to_y), args.tile_size, args.invert_image,
# args.threads_num, args.empty_placeholder, args.hist_adjuster, args.hist_adjuster_alg_type, from_to_cols_rows)
args.threads_num, args.empty_placeholder, args.hist_adjuster_alg_type, from_to_cols_rows,
blend_type)
| 50.989011 | 225 | 0.581178 |
28b52e31aca258473ad66b9dc2bfc0a7c791c688 | 7,133 | py | Python | django/core/management/commands/runserver.py | benjaoming/django | 6dbe979b4d9396e1b307c7d27388c97c13beb21c | [
"BSD-3-Clause"
] | 1 | 2016-06-27T08:35:00.000Z | 2016-06-27T08:35:00.000Z | django/core/management/commands/runserver.py | benjaoming/django | 6dbe979b4d9396e1b307c7d27388c97c13beb21c | [
"BSD-3-Clause"
] | null | null | null | django/core/management/commands/runserver.py | benjaoming/django | 6dbe979b4d9396e1b307c7d27388c97c13beb21c | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
from datetime import datetime
import errno
import os
import re
import sys
import socket
from django.core.management.base import BaseCommand, CommandError
from django.core.servers.basehttp import run, get_internal_wsgi_application
from django.db import connections, DEFAULT_DB_ALIAS
from django.db.migrations.executor import MigrationExecutor
from django.utils import autoreload
from django.utils.encoding import get_system_encoding
from django.utils import six
from django.core.exceptions import ImproperlyConfigured
naiveip_re = re.compile(r"""^(?:
(?P<addr>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
DEFAULT_PORT = "8000"
class Command(BaseCommand):
help = "Starts a lightweight Web server for development."
# Validation is called explicitly each time the server is reloaded.
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('addrport', nargs='?',
help='Optional port number, or ipaddr:port')
parser.add_argument('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use an IPv6 address.')
parser.add_argument('--nothreading', action='store_false', dest='use_threading', default=True,
help='Tells Django to NOT use threading.')
parser.add_argument('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.')
def execute(self, *args, **options):
if options.get('no_color'):
# We rely on the environment because it's currently the only
# way to reach WSGIRequestHandler. This seems an acceptable
# compromise considering `runserver` runs indefinitely.
os.environ[str("DJANGO_COLORS")] = str("nocolor")
super(Command, self).execute(*args, **options)
def get_handler(self, *args, **options):
"""
Returns the default WSGI handler for the runner.
"""
return get_internal_wsgi_application()
def handle(self, *args, **options):
from django.conf import settings
if not settings.DEBUG and not settings.ALLOWED_HOSTS:
raise CommandError('You must set settings.ALLOWED_HOSTS if DEBUG is False.')
self.use_ipv6 = options.get('use_ipv6')
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError('Your Python does not support IPv6.')
self._raw_ipv6 = False
if not options.get('addrport'):
self.addr = ''
self.port = DEFAULT_PORT
else:
m = re.match(naiveip_re, options['addrport'])
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % options['addrport'])
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." % self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.' % self.addr)
if not self.addr:
self.addr = '::1' if self.use_ipv6 else '127.0.0.1'
self._raw_ipv6 = bool(self.use_ipv6)
self.run(**options)
def run(self, **options):
"""
Runs the server, using the autoreloader if needed
"""
use_reloader = options.get('use_reloader')
if use_reloader:
autoreload.main(self.inner_run, None, options)
else:
self.inner_run(None, **options)
def inner_run(self, *args, **options):
from django.conf import settings
from django.utils import translation
threading = options.get('use_threading')
shutdown_message = options.get('shutdown_message', '')
quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'
self.stdout.write("Performing system checks...\n\n")
self.validate(display_num_errors=True)
try:
self.check_migrations()
except ImproperlyConfigured:
pass
now = datetime.now().strftime('%B %d, %Y - %X')
if six.PY2:
now = now.decode(get_system_encoding())
self.stdout.write((
"%(started_at)s\n"
"Django version %(version)s, using settings %(settings)r\n"
"Starting development server at http://%(addr)s:%(port)s/\n"
"Quit the server with %(quit_command)s.\n"
) % {
"started_at": now,
"version": self.get_version(),
"settings": settings.SETTINGS_MODULE,
"addr": '[%s]' % self.addr if self._raw_ipv6 else self.addr,
"port": self.port,
"quit_command": quit_command,
})
# django.core.management.base forces the locale to en-us. We should
# set it up correctly for the first request (particularly important
# in the "--noreload" case).
translation.activate(settings.LANGUAGE_CODE)
try:
handler = self.get_handler(*args, **options)
run(self.addr, int(self.port), handler,
ipv6=self.use_ipv6, threading=threading)
except socket.error as e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
errno.EACCES: "You don't have permission to access that port.",
errno.EADDRINUSE: "That port is already in use.",
errno.EADDRNOTAVAIL: "That IP address can't be assigned-to.",
}
try:
error_text = ERRORS[e.errno]
except KeyError:
error_text = str(e)
self.stderr.write("Error: %s" % error_text)
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
self.stdout.write(shutdown_message)
sys.exit(0)
def check_migrations(self):
"""
Checks to see if the set of migrations on disk matches the
migrations in the database. Prints a warning if they don't match.
"""
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if plan:
self.stdout.write(self.style.NOTICE(
"\nYou have unapplied migrations; your app may not work properly until they are applied."
))
self.stdout.write(self.style.NOTICE("Run 'python manage.py migrate' to apply them.\n"))
# Kept for backward compatibility
BaseRunserverCommand = Command
| 40.76 | 105 | 0.604514 |
288e6fd2b52da40e083b72dcca58df21a67e0f71 | 4,287 | py | Python | py/desitarget/test/test_mock_build.py | ameisner/desitarget | 86f211b72ff2b989a42269ff7b3d801e34387628 | [
"BSD-3-Clause"
] | null | null | null | py/desitarget/test/test_mock_build.py | ameisner/desitarget | 86f211b72ff2b989a42269ff7b3d801e34387628 | [
"BSD-3-Clause"
] | null | null | null | py/desitarget/test/test_mock_build.py | ameisner/desitarget | 86f211b72ff2b989a42269ff7b3d801e34387628 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""Test desitarget.mock.build, but only add_mock_shapes_and_fluxes for now.
"""
import unittest
import tempfile
import os
import shutil
from pkg_resources import resource_filename
import numpy as np
from astropy.table import Table
import healpy as hp
import fitsio
import desimodel.footprint
from desitarget.mock.sky import random_sky
from desitarget.mock.build import targets_truth
from desitarget.targetmask import desi_mask, bgs_mask, mws_mask
class TestMockBuild(unittest.TestCase):
def setUp(self):
self.outdir = tempfile.mkdtemp()
def tearDown(self):
if os.path.exists(self.outdir):
shutil.rmtree(self.outdir)
@unittest.skipUnless('DESITARGET_RUN_MOCK_UNITTEST' in os.environ, '$DESITARGET_RUN_MOCK_UNITTEST not set; skipping expensive mock tests')
def test_targets_truth(self):
configfile = resource_filename('desitarget.mock', 'data/select-mock-targets.yaml')
import yaml
with open(configfile) as fx:
params = yaml.load(fx)
for targettype in params['targets'].keys():
mockfile = params['targets'][targettype]['mockfile'].format(**os.environ)
self.assertTrue(os.path.exists(mockfile), 'Missing {}'.format(mockfile))
#- Test without spectra
targets_truth(params, healpixels=[99737,], nside=256, output_dir=self.outdir, no_spectra=True)
targetfile = self.outdir + '/997/99737/targets-256-99737.fits'
truthfile = self.outdir + '/997/99737/truth-256-99737.fits'
self.assertTrue(os.path.exists(targetfile))
self.assertTrue(os.path.exists(truthfile))
with fitsio.FITS(truthfile) as fx:
self.assertTrue('TRUTH' in fx)
#- WAVE is there, and FLUX is there but with strange shape (n,0)
# self.assertTrue('WAVE' not in fx)
# self.assertTrue('FLUX' not in fx)
#- Test with spectra
shutil.rmtree(self.outdir+'/997')
targets_truth(params, healpixels=[99737,], nside=256, output_dir=self.outdir, no_spectra=False)
self.assertTrue(os.path.exists(targetfile))
self.assertTrue(os.path.exists(truthfile))
with fitsio.FITS(truthfile) as fx:
self.assertTrue('TRUTH' in fx)
self.assertTrue('WAVE' in fx)
self.assertTrue('FLUX' in fx)
@unittest.skip('This test is deprecated, so skip for now.')
def test_shapes_and_fluxes(self):
from desitarget.mock.build import add_mock_shapes_and_fluxes
nreal = 40
real = Table()
real['DESI_TARGET'] = 2**np.random.randint(0,3,size=nreal)
real['BGS_TARGET'] = np.zeros(nreal, dtype=int)
real['BGS_TARGET'][0:5] = bgs_mask.BGS_BRIGHT
real['BGS_TARGET'][5:10] = bgs_mask.BGS_FAINT
real['DESI_TARGET'][0:10] = 0
real['DECAM_FLUX'] = np.random.uniform(size=(nreal,6))
real['SHAPEDEV_R'] = np.random.uniform(size=nreal)
real['SHAPEEXP_R'] = np.random.uniform(size=nreal)
nmock = 45
mock = Table()
mock['DESI_TARGET'] = 2**np.random.randint(0,3,size=nmock)
mock['BGS_TARGET'] = np.zeros(nmock, dtype=int)
mock['BGS_TARGET'][10:15] = bgs_mask.BGS_BRIGHT
mock['BGS_TARGET'][15:20] = bgs_mask.BGS_FAINT
mock['DESI_TARGET'][10:20] = 0
add_mock_shapes_and_fluxes(mock, real)
self.assertTrue('DECAM_FLUX' in mock.colnames)
self.assertTrue('SHAPEDEV_R' in mock.colnames)
self.assertTrue('SHAPEEXP_R' in mock.colnames)
def test_sky(self):
nside = 256
ra, dec, pix = random_sky(nside, allsky=False)
self.assertEqual(len(ra), len(dec))
surveypix = desimodel.footprint.tiles2pix(nside)
theta = np.radians(90 - dec)
phi = np.radians(ra)
skypix = hp.ang2pix(nside, theta, phi, nest=True)
self.assertEqual(set(surveypix), set(skypix))
if __name__ == '__main__':
unittest.main()
def test_suite():
"""Allows testing of only this module with the command:
python setup.py test -m desitarget.test.test_mock_build
"""
return unittest.defaultTestLoader.loadTestsFromName(__name__)
| 37.605263 | 142 | 0.658036 |
5596edc89ee216191c19b02e69002a91fce67f55 | 306 | py | Python | tweetset/manage.py | janezkranjc/tweetset | 0f7ff4bed48efd5122522e1aa68c2502fefa9846 | [
"MIT"
] | 17 | 2015-06-07T20:44:45.000Z | 2021-02-07T23:34:06.000Z | tweetset/manage.py | janezkranjc/tweetset | 0f7ff4bed48efd5122522e1aa68c2502fefa9846 | [
"MIT"
] | 1 | 2018-03-15T23:28:56.000Z | 2018-03-15T23:28:56.000Z | tweetset/manage.py | janezkranjc/tweetset | 0f7ff4bed48efd5122522e1aa68c2502fefa9846 | [
"MIT"
] | 8 | 2015-02-24T21:26:29.000Z | 2016-11-10T08:28:22.000Z | #!/usr/bin/env python
import os
import sys
sys.path.append(os.path.realpath(__file__))
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tweetset.settings.production")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 25.5 | 83 | 0.781046 |
07f3fed0dd31bdaf2a383b1a465654d2026c699b | 2,846 | py | Python | 2021/day/05/day5.py | patrick-andrew-jain-taylor/advent-of-code | ce87ed4e60f46189323a1bd6a5ba4d711597526b | [
"MIT"
] | null | null | null | 2021/day/05/day5.py | patrick-andrew-jain-taylor/advent-of-code | ce87ed4e60f46189323a1bd6a5ba4d711597526b | [
"MIT"
] | 23 | 2021-12-02T16:12:37.000Z | 2021-12-20T18:17:39.000Z | 2021/day/05/day5.py | patrick-andrew-jain-taylor/advent-of-code | ce87ed4e60f46189323a1bd6a5ba4d711597526b | [
"MIT"
] | null | null | null | """
Day 5
"""
class OceanFloor:
"""
Representation of the ocean floor with geothermic vents.
"""
def __init__(self, size: int):
self.floor = [['.' for i in range(size)] for j in range(size)]
def __repr__(self):
return '\n'.join(''.join(row) for row in self.floor)
def __str__(self):
return '\n'.join(''.join(row) for row in self.floor)
def __diag_line(self, left, right):
"""
Take action on the diagonal.
:return:
"""
horz_range = (
range(int(left[0]), int(right[0]) + 1)
if int(left[0]) < int(right[0])
else range(int(left[0]), int(right[0]) - 1, -1)
)
vert_range = (
range(int(left[1]), int(right[1]) + 1)
if int(left[1]) < int(right[1])
else range(int(left[1]), int(right[1]) - 1, -1)
)
for i, j in zip(horz_range, vert_range):
self.floor[j][i] = '1' if self.floor[j][i] == '.' else f'{int(self.floor[j][i]) + 1}'
return self.floor
def __horz_line(self, left, right):
"""
Take action on the horizontal.
:return:
"""
row = int(left[1])
start, stop = min(int(left[0]), int(right[0])), max(int(left[0]), int(right[0]))
for i in range(start, stop + 1):
self.floor[row][i] = '1' if self.floor[row][i] == '.' else f'{int(self.floor[row][i]) + 1}'
return self.floor
def __vert_line(self, left, right):
"""
Take action on the vertical.
:return:
"""
column = int(left[0])
start, stop = min(int(left[1]), int(right[1])), max(int(left[1]), int(right[1]))
for i in range(start, stop + 1):
self.floor[i][column] = '1' if self.floor[i][column] == '.' else f'{int(self.floor[i][column]) + 1}'
return self.floor
def add_line(self, line):
"""
Take in line segment, update floor
:return:
"""
left, right = line.split(' -> ')
left, right = left.split(','), right.split(',')
if left[0] == right[0]:
self.__vert_line(left, right)
elif left[1] == right[1]:
self.__horz_line(left, right)
else:
self.__diag_line(left, right)
def danger(self):
"""
Determine number of danger points
:return:
"""
danger = 0
for row in self.floor:
for column in row:
if column != '.' and int(column) >= 2:
danger += 1
return danger
def main():
with open("input.txt", "r") as file:
lines = list(file.read().splitlines())
floor = OceanFloor(1000)
for line in lines:
floor.add_line(line)
print(floor.danger())
if __name__ == '__main__':
main()
| 28.747475 | 112 | 0.501405 |
1b9a4867f39d169b3b45af213e09cc0c7bf78b13 | 991 | py | Python | src/ZODB/scripts/referrers.py | unkloud/ZODB | 63dcee8d3bf0df6a05b75a378292a1a6061ce0c2 | [
"ZPL-2.1"
] | null | null | null | src/ZODB/scripts/referrers.py | unkloud/ZODB | 63dcee8d3bf0df6a05b75a378292a1a6061ce0c2 | [
"ZPL-2.1"
] | null | null | null | src/ZODB/scripts/referrers.py | unkloud/ZODB | 63dcee8d3bf0df6a05b75a378292a1a6061ce0c2 | [
"ZPL-2.1"
] | null | null | null | ##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Compute a table of object id referrers
$Id$
"""
from ZODB.serialize import referencesf
def referrers(storage):
result = {}
for transaction in storage.iterator():
for record in transaction:
for oid in referencesf(record.data):
result.setdefault(oid, []).append((record.oid, record.tid))
return result
| 34.172414 | 78 | 0.602422 |
0b4cb5ff09dee55d438b9ae9d5c595bb96d38216 | 444 | py | Python | test/visuals/test_pattern.py | rohernandezz/coldtype | 724234fce454699a469d17b6c78ae50fa8138169 | [
"Apache-2.0"
] | null | null | null | test/visuals/test_pattern.py | rohernandezz/coldtype | 724234fce454699a469d17b6c78ae50fa8138169 | [
"Apache-2.0"
] | null | null | null | test/visuals/test_pattern.py | rohernandezz/coldtype | 724234fce454699a469d17b6c78ae50fa8138169 | [
"Apache-2.0"
] | null | null | null | from coldtype import *
@renderable((1000, 1000))
def test_pattern_generate(r):
return (DATPen()
.rect(r.inset(250))
.rotate(45)
.flatten()
.roughen(150)
.f(hsl(0.7, 0.7, 0.7)))
@renderable((1000, 1000))
def test_pattern_use(r):
imgp = test_pattern_generate.last_passes[0].output_path
return (DATPen()
.rect(r)
.f(1)
.img(imgp, r.take(50, "mdx").square(), pattern=1)) | 24.666667 | 59 | 0.578829 |
a8638317578df29a041c7eebf5c64b9cd208285b | 6,672 | py | Python | mv_gp.py | shiyuechengineer/mv-gp | f68111f06e9d29f626b2d638d5576cca0fc3236d | [
"MIT"
] | null | null | null | mv_gp.py | shiyuechengineer/mv-gp | f68111f06e9d29f626b2d638d5576cca0fc3236d | [
"MIT"
] | null | null | null | mv_gp.py | shiyuechengineer/mv-gp | f68111f06e9d29f626b2d638d5576cca0fc3236d | [
"MIT"
] | null | null | null | #!/usr/bin/python3
READ_ME = '''
=== PREREQUISITES ===
Run in Python 3
Install both requests & Meraki Dashboard API Python modules:
pip[3] install requests [--upgrade]
pip[3] install meraki [--upgrade]
=== DESCRIPTION ===
This script finds all MV cameras with a specified tag, and then iterates
through all networks to apply an exisitng group policy (enforced by the MX)
to the applicable cameras as client devices.
Maintained in GitHub @ https://github.com/shiyuechengineer/mv-gp
=== USAGE ===
python mv_gp.py -k <api_key> -o <org_id> -t <tag> -p <policy> [-m <mode>]
The -t parameter specifies the required tag that needs to be present on the MV
camera, and -p the name of the MX group policy to be applied.
The optional -m parameter is either "simulate" (default) to only print changes,
or "commit" to also apply those changes to Dashboard.
'''
import getopt
import logging
import sys
from datetime import datetime
from meraki import meraki
# Prints READ_ME help message for user to read
def print_help():
lines = READ_ME.split('\n')
for line in lines:
print('# {0}'.format(line))
logger = logging.getLogger(__name__)
def configure_logging():
logging.basicConfig(
filename='mv_gp_log_{:%Y%m%d_%H%M%S}.txt'.format(datetime.now()),
level=logging.DEBUG,
format='%(asctime)s: %(levelname)7s: [%(name)s]: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
def main(argv):
# Set default values for command line arguments
api_key = org_id = arg_tag = arg_policy = arg_mode = None
# Get command line arguments
try:
opts, args = getopt.getopt(argv, 'hk:o:t:p:m:')
except getopt.GetoptError:
print_help()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print_help()
sys.exit()
elif opt == '-k':
api_key = arg
elif opt == '-o':
org_id = arg
elif opt == '-t':
arg_tag = arg
elif opt == '-p':
arg_policy = arg
elif opt == '-m':
arg_mode = arg
# Check if all required parameters have been input
if api_key == None or org_id == None or arg_tag == None or arg_policy == None:
print_help()
sys.exit(2)
# Assign default mode to "simulate" unless "commit" specified
if arg_mode != 'commit':
arg_mode = 'simulate'
# Get org's inventory
inventory = meraki.getorginventory(api_key, org_id)
# Filter for only MV devices
cameras = [device for device in inventory if device['model'][:2] in ('MV') and device['networkId'] is not None]
# Gather the networks (IDs) where cameras have been added
camera_network_ids = set([camera['networkId'] for camera in cameras])
logger.info('Found a total of {0} cameras added to {1} networks in this Dashboard organization'.format(len(cameras), len(camera_network_ids)))
# Iterate through camera networks and find cameras with specified tag
camera_macs = []
for net_id in camera_network_ids:
devices = meraki.getnetworkdevices(api_key, net_id)
for device in devices:
if device['model'][:2] == 'MV' and 'tags' in device and arg_tag in device['tags']:
camera_macs.append(device['mac'])
logger.info('Found {0} cameras with the tag "{1}"'.format(len(camera_macs), arg_tag))
# Get list of all networks in org
networks = meraki.getnetworklist(api_key, org_id)
# Iterate through all networks, looking for cameras as clients, and apply group policy
for network in networks:
# Get the Meraki devices in this network
devices = meraki.getnetworkdevices(api_key, network['id'])
# Filter for just the first two characters of each device model
device_models = [device['model'][:2] for device in devices]
# Is there an MX here? If so, get its index in the list of devices
if 'MX' in device_models:
# We found the MX device in the network
mx_device = devices[device_models.index('MX')]
else:
# No MX in this network, doesn't make sense to apply a group policy to wired clients (cameras), so move on
continue
# Get list of MX clients
clients = meraki.getclients(api_key, mx_device['serial'], timestamp=2592000)
# Filter for MAC addresses of these clients
client_macs = [client['mac'] for client in clients]
# Cameras in this network = intersection of clients in this network and cameras in the org
network_cameras = set(client_macs).intersection(camera_macs)
# Assign group policy to these cameras in the network
if network_cameras:
# Gather group policies of network
gps = meraki.getgrouppolicies(api_key, network['id'])
# Get human-readable names of all group policies
gp_names = [gp['name'] for gp in gps]
# Look for the group policy
gp_camera = gps[gp_names.index(arg_policy)]
# Assign that group policy (by ID) to the camera by MAC address
for mac in network_cameras:
if arg_mode == 'commit':
meraki.updateclientpolicy(api_key, network['id'], mac, policy='group', policyid=gp_camera['groupPolicyId'])
logger.info('Assigning group policy "{0}" on network "{1}" for MV camera {2}'.format(arg_policy, network['name'], mac))
else:
logger.info('Simulating group policy "{0}" on network "{1}" for MV camera {2}'.format(arg_policy, network['name'], mac))
if __name__ == '__main__':
# Configure logging to stdout
configure_logging()
# Define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# Set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# Tell the handler to use this format
console.setFormatter(formatter)
# Add the handler to the root logger
logging.getLogger('').addHandler(console)
# Output to logfile/console starting inputs
start_time = datetime.now()
logger.info('Started script at {0}'.format(start_time))
inputs = sys.argv[1:]
key_index = inputs.index('-k')
inputs.pop(key_index+1)
inputs.pop(key_index)
logger.info('Input parameters: {0}'.format(inputs))
main(sys.argv[1:])
# Finish output to logfile/console
end_time = datetime.now()
logger.info('Ended script at {0}'.format(end_time))
logger.info(f'Total run time = {end_time - start_time}')
| 37.483146 | 146 | 0.646283 |
7eda1f1eb620394617665044e36e0a7034d7007f | 3,727 | py | Python | openGaussBase/testcase/TOOLS/INTERNAL_TOOLS/gs_probackup/Opengauss_Function_Tools_Gs_Probackup_Case0043.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/TOOLS/INTERNAL_TOOLS/gs_probackup/Opengauss_Function_Tools_Gs_Probackup_Case0043.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/TOOLS/INTERNAL_TOOLS/gs_probackup/Opengauss_Function_Tools_Gs_Probackup_Case0043.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 系统内部使用工具
Case Name : 指定正确的连接选项、强制输入密码,指定-p、-d、-U(omm用户)、
-W选项,执行备份
Description :
1.新建目录
2.进行初始化
3.在备份路径内初始化一个新的备份实例
4.执行备份
5.删除新建目录
Expect :
1.新建目录成功
2.进行初始化成功
3.在备份路径内初始化一个新的备份实例成功
4.执行备份成功
5.删除新建目录成功
History :
"""
import unittest
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from testcase.utils.CommonSH import CommonSH
from yat.test import Node
from yat.test import macro
LOG = Logger()
class SystemInternalTools(unittest.TestCase):
def setUp(self):
LOG.info('-------------------this is setup--------------------')
LOG.info('-Opengauss_Function_Tools_Gs_Probackup_Case0043开始执行-')
self.constant = Constant()
self.PrimaryNode = Node('PrimaryDbUser')
self.sh_primary = CommonSH('PrimaryDbUser')
def test_system_internal_tools(self):
LOG.info('---------step1 新建备份目录--------------')
instance_path = f'{macro.DB_INSTANCE_PATH}'
LOG.info('实例路径为:' + instance_path)
index1 = instance_path.find('/')
index2 = instance_path.rfind('/')
self.cluster_path = instance_path[index1:index2]
LOG.info(self.cluster_path)
init_cmd = f"mkdir {self.cluster_path}/testdir;"
LOG.info(init_cmd)
init_msg = self.PrimaryNode.sh(init_cmd).result()
LOG.info(init_msg)
self.assertNotIn(self.constant.SQL_WRONG_MSG[1], init_msg)
LOG.info('----------step2 进行初始化------------------')
init_cmd = f"source {macro.DB_ENV_PATH};gs_probackup init -B " \
f"{self.cluster_path}/testdir;"
LOG.info(init_cmd)
init_msg = self.PrimaryNode.sh(init_cmd).result()
LOG.info(init_msg)
self.assertIn(self.constant.init_success, init_msg)
LOG.info('-----step3 在备份路径内初始化一个新的备份实例---')
init_cmd = f"source {macro.DB_ENV_PATH};" \
f"gs_probackup add-instance -B {self.cluster_path}/testdir " \
f"-D {macro.DB_INSTANCE_PATH} --instance=pro1;"
LOG.info(init_cmd)
init_msg = self.PrimaryNode.sh(init_cmd).result()
LOG.info(init_msg)
self.assertIn("'pro1' " + self.constant.init_success, init_msg)
LOG.info('-------------step4 执行全量备份---------------')
back_cmd = f"source {macro.DB_ENV_PATH};" \
f"gs_probackup backup -B {self.cluster_path}/testdir " \
f" --instance=pro1 -b full -d {self.PrimaryNode.db_name} -p " \
f"{self.PrimaryNode.db_port} -U {self.PrimaryNode.ssh_user} " \
f"-W {self.PrimaryNode.ssh_password} ; "
LOG.info(back_cmd)
back_msg = self.PrimaryNode.sh(back_cmd).result()
LOG.info(back_msg)
self.assertIn('completed', back_msg)
def tearDown(self):
LOG.info('------------------this is tearDown--------------------')
LOG.info('----------------step5 删除新建目录-------------------')
clear_cmd = f"rm -rf {self.cluster_path}/testdir;"
LOG.info(clear_cmd)
clear_msg = self.PrimaryNode.sh(clear_cmd).result()
LOG.info(clear_msg)
LOG.info('-Opengauss_Function_Tools_Gs_Probackup_Case0043执行完成-')
| 36.184466 | 84 | 0.624094 |
5799c41e83d382a800b17033c97def48243465bc | 167 | py | Python | multiplication.py | AkashM006/the-iron-legions | 9cde6dd97b03463ce0d3d0782f4cf08f97e0e3f9 | [
"MIT"
] | null | null | null | multiplication.py | AkashM006/the-iron-legions | 9cde6dd97b03463ce0d3d0782f4cf08f97e0e3f9 | [
"MIT"
] | 1 | 2021-03-20T15:18:46.000Z | 2021-03-20T15:18:46.000Z | multiplication.py | AkashM006/the-iron-legions | 9cde6dd97b03463ce0d3d0782f4cf08f97e0e3f9 | [
"MIT"
] | null | null | null | def domultiplication(a,b):
return (a*b) # Return the product of a and b
a = int(input("Enter a"))
b = int(input("Enter b"))
res = domultiplication(a,b)
print(res) | 23.857143 | 48 | 0.664671 |
68179ad54c20ae5dad526ac7f369b47993496fab | 9,404 | py | Python | tests/components/ps4/test_init.py | guiguid/core | d43617c41d6507f2d2b77aadf4fa1ebaf0058b14 | [
"Apache-2.0"
] | 1 | 2020-04-07T15:44:54.000Z | 2020-04-07T15:44:54.000Z | tests/components/ps4/test_init.py | guiguid/core | d43617c41d6507f2d2b77aadf4fa1ebaf0058b14 | [
"Apache-2.0"
] | null | null | null | tests/components/ps4/test_init.py | guiguid/core | d43617c41d6507f2d2b77aadf4fa1ebaf0058b14 | [
"Apache-2.0"
] | null | null | null | """Tests for the PS4 Integration."""
from asynctest import MagicMock, patch
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import ps4
from homeassistant.components.media_player.const import (
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_TITLE,
MEDIA_TYPE_GAME,
)
from homeassistant.components.ps4.const import (
ATTR_MEDIA_IMAGE_URL,
COMMANDS,
CONFIG_ENTRY_VERSION as VERSION,
DEFAULT_REGION,
DOMAIN,
PS4_DATA,
)
from homeassistant.const import (
ATTR_COMMAND,
ATTR_ENTITY_ID,
ATTR_LOCKED,
CONF_HOST,
CONF_NAME,
CONF_REGION,
CONF_TOKEN,
)
from homeassistant.exceptions import HomeAssistantError
from homeassistant.setup import async_setup_component
from homeassistant.util import location
from tests.common import MockConfigEntry, mock_registry
MOCK_HOST = "192.168.0.1"
MOCK_NAME = "test_ps4"
MOCK_REGION = "Some Region"
MOCK_CREDS = "1234567890A"
MOCK_DEVICE = {CONF_HOST: MOCK_HOST, CONF_NAME: MOCK_NAME, CONF_REGION: MOCK_REGION}
MOCK_DATA = {CONF_TOKEN: MOCK_CREDS, "devices": [MOCK_DEVICE]}
MOCK_FLOW_RESULT = {
"version": VERSION,
"handler": DOMAIN,
"type": data_entry_flow.RESULT_TYPE_CREATE_ENTRY,
"title": "test_ps4",
"data": MOCK_DATA,
}
MOCK_ENTRY_ID = "SomeID"
MOCK_CONFIG = MockConfigEntry(domain=DOMAIN, data=MOCK_DATA, entry_id=MOCK_ENTRY_ID)
MOCK_LOCATION = location.LocationInfo(
"0.0.0.0",
"US",
"United States",
"CA",
"California",
"San Diego",
"92122",
"America/Los_Angeles",
32.8594,
-117.2073,
True,
)
MOCK_DEVICE_VERSION_1 = {
CONF_HOST: MOCK_HOST,
CONF_NAME: MOCK_NAME,
CONF_REGION: "Some Region",
}
MOCK_DATA_VERSION_1 = {CONF_TOKEN: MOCK_CREDS, "devices": [MOCK_DEVICE_VERSION_1]}
MOCK_DEVICE_ID = "somedeviceid"
MOCK_ENTRY_VERSION_1 = MockConfigEntry(
domain=DOMAIN, data=MOCK_DATA_VERSION_1, entry_id=MOCK_ENTRY_ID, version=1
)
MOCK_UNIQUE_ID = "someuniqueid"
MOCK_ID = "CUSA00123"
MOCK_URL = "http://someurl.jpeg"
MOCK_TITLE = "Some Title"
MOCK_TYPE = MEDIA_TYPE_GAME
MOCK_GAMES_DATA_OLD_STR_FORMAT = {"mock_id": "mock_title", "mock_id2": "mock_title2"}
MOCK_GAMES_DATA = {
ATTR_LOCKED: False,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_GAME,
ATTR_MEDIA_IMAGE_URL: MOCK_URL,
ATTR_MEDIA_TITLE: MOCK_TITLE,
}
MOCK_GAMES_DATA_LOCKED = {
ATTR_LOCKED: True,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_GAME,
ATTR_MEDIA_IMAGE_URL: MOCK_URL,
ATTR_MEDIA_TITLE: MOCK_TITLE,
}
MOCK_GAMES = {MOCK_ID: MOCK_GAMES_DATA}
MOCK_GAMES_LOCKED = {MOCK_ID: MOCK_GAMES_DATA_LOCKED}
async def test_ps4_integration_setup(hass):
"""Test PS4 integration is setup."""
await ps4.async_setup(hass, {})
await hass.async_block_till_done()
assert hass.data[PS4_DATA].protocol is not None
async def test_creating_entry_sets_up_media_player(hass):
"""Test setting up PS4 loads the media player."""
mock_flow = "homeassistant.components.ps4.PlayStation4FlowHandler.async_step_user"
with patch(
"homeassistant.components.ps4.media_player.async_setup_entry",
return_value=True,
) as mock_setup, patch(mock_flow, return_value=MOCK_FLOW_RESULT):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
async def test_config_flow_entry_migrate(hass):
"""Test that config flow entry is migrated correctly."""
# Start with the config entry at Version 1.
manager = hass.config_entries
mock_entry = MOCK_ENTRY_VERSION_1
mock_entry.add_to_manager(manager)
mock_e_registry = mock_registry(hass)
mock_entity_id = f"media_player.ps4_{MOCK_UNIQUE_ID}"
mock_e_entry = mock_e_registry.async_get_or_create(
"media_player",
"ps4",
MOCK_UNIQUE_ID,
config_entry=mock_entry,
device_id=MOCK_DEVICE_ID,
)
assert len(mock_e_registry.entities) == 1
assert mock_e_entry.entity_id == mock_entity_id
assert mock_e_entry.unique_id == MOCK_UNIQUE_ID
with patch(
"homeassistant.util.location.async_detect_location_info",
return_value=MOCK_LOCATION,
), patch(
"homeassistant.helpers.entity_registry.async_get_registry",
return_value=mock_e_registry,
):
await ps4.async_migrate_entry(hass, mock_entry)
await hass.async_block_till_done()
assert len(mock_e_registry.entities) == 1
for entity in mock_e_registry.entities.values():
mock_entity = entity
# Test that entity_id remains the same.
assert mock_entity.entity_id == mock_entity_id
assert mock_entity.device_id == MOCK_DEVICE_ID
# Test that last four of credentials is appended to the unique_id.
assert mock_entity.unique_id == "{}_{}".format(MOCK_UNIQUE_ID, MOCK_CREDS[-4:])
# Test that config entry is at the current version.
assert mock_entry.version == VERSION
assert mock_entry.data[CONF_TOKEN] == MOCK_CREDS
assert mock_entry.data["devices"][0][CONF_HOST] == MOCK_HOST
assert mock_entry.data["devices"][0][CONF_NAME] == MOCK_NAME
assert mock_entry.data["devices"][0][CONF_REGION] == DEFAULT_REGION
async def test_media_player_is_setup(hass):
"""Test media_player is setup correctly."""
await setup_mock_component(hass)
assert len(hass.data[PS4_DATA].devices) == 1
async def setup_mock_component(hass):
"""Set up Mock Media Player."""
entry = MockConfigEntry(domain=ps4.DOMAIN, data=MOCK_DATA, version=VERSION)
entry.add_to_manager(hass.config_entries)
await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
await hass.async_block_till_done()
def test_games_reformat_to_dict(hass):
"""Test old data format is converted to new format."""
with patch(
"homeassistant.components.ps4.load_json",
return_value=MOCK_GAMES_DATA_OLD_STR_FORMAT,
), patch("homeassistant.components.ps4.save_json", side_effect=MagicMock()), patch(
"os.path.isfile", return_value=True
):
mock_games = ps4.load_games(hass)
# New format is a nested dict.
assert isinstance(mock_games, dict)
assert mock_games["mock_id"][ATTR_MEDIA_TITLE] == "mock_title"
assert mock_games["mock_id2"][ATTR_MEDIA_TITLE] == "mock_title2"
for mock_game in mock_games:
mock_data = mock_games[mock_game]
assert isinstance(mock_data, dict)
assert mock_data
assert mock_data[ATTR_MEDIA_IMAGE_URL] is None
assert mock_data[ATTR_LOCKED] is False
assert mock_data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_GAME
def test_load_games(hass):
"""Test that games are loaded correctly."""
with patch(
"homeassistant.components.ps4.load_json", return_value=MOCK_GAMES
), patch("homeassistant.components.ps4.save_json", side_effect=MagicMock()), patch(
"os.path.isfile", return_value=True
):
mock_games = ps4.load_games(hass)
assert isinstance(mock_games, dict)
mock_data = mock_games[MOCK_ID]
assert isinstance(mock_data, dict)
assert mock_data[ATTR_MEDIA_TITLE] == MOCK_TITLE
assert mock_data[ATTR_MEDIA_IMAGE_URL] == MOCK_URL
assert mock_data[ATTR_LOCKED] is False
assert mock_data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_GAME
def test_loading_games_returns_dict(hass):
"""Test that loading games always returns a dict."""
with patch(
"homeassistant.components.ps4.load_json", side_effect=HomeAssistantError
), patch("homeassistant.components.ps4.save_json", side_effect=MagicMock()), patch(
"os.path.isfile", return_value=True
):
mock_games = ps4.load_games(hass)
assert isinstance(mock_games, dict)
assert not mock_games
with patch(
"homeassistant.components.ps4.load_json", return_value="Some String"
), patch("homeassistant.components.ps4.save_json", side_effect=MagicMock()), patch(
"os.path.isfile", return_value=True
):
mock_games = ps4.load_games(hass)
assert isinstance(mock_games, dict)
assert not mock_games
with patch("homeassistant.components.ps4.load_json", return_value=[]), patch(
"homeassistant.components.ps4.save_json", side_effect=MagicMock()
), patch("os.path.isfile", return_value=True):
mock_games = ps4.load_games(hass)
assert isinstance(mock_games, dict)
assert not mock_games
async def test_send_command(hass):
"""Test send_command service."""
await setup_mock_component(hass)
mock_func = "{}{}".format(
"homeassistant.components.ps4", ".media_player.PS4Device.async_send_command"
)
mock_devices = hass.data[PS4_DATA].devices
assert len(mock_devices) == 1
mock_entity = mock_devices[0]
assert mock_entity.entity_id == f"media_player.{MOCK_NAME}"
# Test that all commands call service function.
with patch(mock_func, return_value=True) as mock_service:
for mock_command in COMMANDS:
await hass.services.async_call(
DOMAIN,
"send_command",
{ATTR_ENTITY_ID: mock_entity.entity_id, ATTR_COMMAND: mock_command},
)
await hass.async_block_till_done()
assert len(mock_service.mock_calls) == len(COMMANDS)
| 32.095563 | 87 | 0.720438 |
dc704ffafe07ff565a77f5208975f355d9b20180 | 2,023 | py | Python | Module2/assignment5.py | luuduytung/programming-with-python-for-data-science-microsoft | 87d855020c0f991e38365b4848836e751617d658 | [
"MIT"
] | null | null | null | Module2/assignment5.py | luuduytung/programming-with-python-for-data-science-microsoft | 87d855020c0f991e38365b4848836e751617d658 | [
"MIT"
] | null | null | null | Module2/assignment5.py | luuduytung/programming-with-python-for-data-science-microsoft | 87d855020c0f991e38365b4848836e751617d658 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
#
# TODO:
# Load up the dataset, setting correct header labels.
#
# .. your code here ..
df = pd.read_csv('Datasets/census.data',header=None,
names = ['education', 'age', 'capital-gain', 'race', 'capital-loss', 'hours-per-week', 'sex', 'classification'])
#
# TODO:
# Use basic pandas commands to look through the dataset... get a
# feel for it before proceeding! Do the data-types of each column
# reflect the values you see when you look through the data using
# a text editor / spread sheet program? If you see 'object' where
# you expect to see 'int32' / 'float64', that is a good indicator
# that there is probably a string or missing value in a column.
# use `your_data_frame['your_column'].unique()` to see the unique
# values of each column and identify the rogue values. If these
# should be represented as nans, you can convert them using
# na_values when loading the dataframe.
#
# .. your code here ..
df.dtypes
#
# TODO:
# Look through your data and identify any potential categorical
# features. Ensure you properly encode any ordinal and nominal
# types using the methods discussed in the chapter.
#
# Be careful! Some features can be represented as either categorical
# or continuous (numerical). If you ever get confused, think to yourself
# what makes more sense generally---to represent such features with a
# continuous numeric type... or a series of categories?
#
# .. your code here ..
df['age'] = pd.to_numeric(df['age'],errors='coerc')
df['capital-gain'] = pd.to_numeric(df['capital-gain'],errors='coerc')
df['capital-loss'] = pd.to_numeric(df['capital-loss'],errors='coerc')
df['hours-per-week'] = pd.to_numeric(df['hours-per-week'],errors='coerc')
pd.DataFrame(df.race.unique())
df.sex.astype('category').cat.codes
df=pd.get_dummies(df,columns=['sex'])
classi = ['<=50K','>50K']
df.classification.astype('category',ordered=True,categories=classi).cat.codes
#
# TODO:
# Print out your dataframe
#
# .. your code here ..
print(df)
| 31.123077 | 129 | 0.715769 |
ec1107bad0c6253d6b60cdf273b1764ec23f2bea | 1,521 | py | Python | DesignPattern/pool.py | zzragida/PythonExamples | ed94ae2773a580a42e158ebdc7321a89ca4e991b | [
"MIT"
] | null | null | null | DesignPattern/pool.py | zzragida/PythonExamples | ed94ae2773a580a42e158ebdc7321a89ca4e991b | [
"MIT"
] | null | null | null | DesignPattern/pool.py | zzragida/PythonExamples | ed94ae2773a580a42e158ebdc7321a89ca4e991b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""http://stackoverflow.com/questions/1514120/python-implementation-of-the-object-pool-design-pattern"""
class QueueObject():
def __init__(self, queue, auto_get=False):
self._queue = queue
self.object = self._queue.get() if auto_get else None
def __enter__(self):
if self.object is None:
self.object = self._queue.get()
return self.object
def __exit__(self, Type, value, traceback):
if self.object is not None:
self._queue.put(self.object)
self.object = None
def __del__(self):
if self.object is not None:
self._queue.put(self.object)
self.object = None
def main():
try:
import queue
except ImportError: # python 2.x compatibility
import Queue as queue
def test_object(queue):
queue_object = QueueObject(queue, True)
print('Inside func: {}'.format(queue_object.object))
sample_queue = queue.Queue()
sample_queue.put('yam')
with QueueObject(sample_queue) as obj:
print('Inside with: {}'.format(obj))
print('Outside with: {}'.format(sample_queue.get()))
sample_queue.put('sam')
test_object(sample_queue)
print('Outside func: {}'.format(sample_queue.get()))
if not sample_queue.empty():
print(sample_queue.get())
if __name__ == '__main__':
main()
### OUTPUT ###
# Inside with: yam
# Outside with: yam
# Inside func: sam
# Outside func: sam
| 24.532258 | 104 | 0.625904 |
37bae3636bef81446c938f76a5d4768e40d9deb3 | 24,970 | py | Python | rf-controller/src/nox/lib/core.py | ederlf/RouteFlow | 2955dbd51b0d2f823356341a30caf39c2ef5da5a | [
"Apache-2.0"
] | 1 | 2021-03-15T18:49:51.000Z | 2021-03-15T18:49:51.000Z | rf-controller/src/nox/lib/core.py | ederlf/RouteFlow | 2955dbd51b0d2f823356341a30caf39c2ef5da5a | [
"Apache-2.0"
] | null | null | null | rf-controller/src/nox/lib/core.py | ederlf/RouteFlow | 2955dbd51b0d2f823356341a30caf39c2ef5da5a | [
"Apache-2.0"
] | null | null | null | # Copyright 2008 (C) Nicira, Inc.
#
# This file is part of NOX.
#
# NOX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NOX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NOX. If not, see <http://www.gnu.org/licenses/>.
import logging
import array
import struct
import types
from socket import htons, htonl
import nox.lib.openflow as openflow
from nox.coreapps.pyrt.pycomponent import *
from util import *
from nox.lib.netinet.netinet import Packet_expr
from nox.lib.netinet.netinet import create_eaddr
from nox.lib.packet import *
lg = logging.getLogger('core')
IN_PORT = "in_port"
AP_SRC = "ap_src"
AP_DST = "ap_dst"
DL_SRC = "dl_src"
DL_DST = "dl_dst"
DL_VLAN = "dl_vlan"
DL_VLAN_PCP = "dl_vlan_pcp"
DL_TYPE = "dl_type"
NW_SRC = "nw_src"
NW_SRC_N_WILD = "nw_src_n_wild"
NW_DST = "nw_dst"
NW_DST_N_WILD = "nw_dst_n_wild"
NW_PROTO = "nw_proto"
NW_TOS = "nw_tos"
TP_SRC = "tp_src"
TP_DST = "tp_dst"
GROUP_SRC = "group_src"
GROUP_DST = "group_dst"
N_TABLES = 'n_tables'
N_BUFFERS = 'n_bufs'
CAPABILITES = 'caps'
ACTIONS = 'actions'
PORTS = 'ports'
PORT_NO = 'port_no'
SPEED = 'speed'
CONFIG = 'config'
STATE = 'state'
CURR = 'curr'
ADVERTISED = 'advertised'
SUPPORTED = 'supported'
PEER = 'peer'
HW_ADDR = 'hw_addr'
################################################################################
# API NOTES:
#
# Automatically returns CONTINUE for handlers that do not
# return a value (handlers are supposed to return a Disposition)
#
# All values should be passed in host byte order. The API changes
# values to network byte order based on knowledge of field. (NW_SRC
# --> 32 bit val, TP_SRC --> 16 bit value, etc.). Other than
# DL_SRC/DST and NW_SRC/DST fields, packet header fields should be
# passed as integers. DL_SRC, DL_DST fields should be passed in as
# either vigil.netinet.ethernetaddr objects. They can however also be
# passed in any other type that an ethernetaddr constructor has be
# defined for. NW_SRC/NW_DST fields meanwhile can be passed either
# ints, ip strings, or vigil.netinet.ipaddr objects.
###########################################################################
class Component:
"""Abstract class to inherited by all Python components.
\ingroup noxapi
"""
def __init__(self, ctxt):
self.ctxt = ctxt
self.component_names = None
def configure(self, config):
"""Configure the component.
Once configured, the component has parsed its configuration and
resolve any references to other components it may have.
"""
pass
def install(self):
"""Install the component.
Once installed, the component runs and is usable by other
components.
"""
pass
def getInterface(self):
"""Return the interface (class) component provides. The default
implementation returns the class itself."""
return self.__class__
def resolve(self, interface):
return self.ctxt.resolve(str(interface))
# Interface to allow components to check at runtime without having
# to import them (which will cause linking errors)
def is_component_loaded(self, name):
if not self.component_names:
self.component_names = []
for component in self.ctxt.get_kernel().get_all():
self.component_names.append(component.get_name())
return name in self.component_names
def register_event(self, event_name):
return self.ctxt.register_event(event_name)
def register_python_event(self, event_name):
return self.ctxt.register_python_event(event_name)
def register_handler(self, event_name, handler):
return self.ctxt.register_handler(event_name, handler)
def post_timer(self, event):
return self.ctxt.post_timer(event)
def post(self, event):
# if event is a swigobject, make sure that it doesn't try
# to handle memory deletion
if hasattr(event, 'thisown'):
event.thisown = 0 # just in case
return self.ctxt.post(event)
def make_action_array(self, actions):
action_str = ""
for action in actions:
if action[0] == openflow.OFPAT_OUTPUT \
and isinstance(action[1],list) \
and len(action[1]) == 2:
a = struct.pack("!HHHH", action[0], 8,
action[1][1], action[1][0])
elif action[0] == openflow.OFPAT_SET_VLAN_VID:
a = struct.pack("!HHHH", action[0], 8, action[1], 0)
elif action[0] == openflow.OFPAT_SET_VLAN_PCP:
a = struct.pack("!HHBBH", action[0], 8, action[1], 0, 0)
elif action[0] == openflow.OFPAT_STRIP_VLAN:
a = struct.pack("!HHI", action[0], 8, 0)
elif action[0] == openflow.OFPAT_SET_DL_SRC \
or action[0] == openflow.OFPAT_SET_DL_DST:
eaddr = convert_to_eaddr(action[1])
if eaddr == None:
print 'invalid ethernet addr'
return None
a = struct.pack("!HH6sHI", action[0], 16, eaddr.binary_str(), 0, 0)
elif action[0] == openflow.OFPAT_SET_NW_SRC \
or action[0] == openflow.OFPAT_SET_NW_DST:
iaddr = convert_to_ipaddr(action[1])
if iaddr == None:
print 'invalid ip addr'
return None
a = struct.pack("HHI", htons(action[0]), htons(8), htonl(ipaddr(iaddr).addr))
elif action[0] == openflow.OFPAT_SET_TP_SRC \
or action[0] == openflow.OFPAT_SET_TP_DST:
a = struct.pack("!HHHH", action[0], 8, action[1], 0)
else:
print 'invalid action type', action[0]
return None
action_str = action_str + a
return action_str
def send_port_mod(self, dpid, portno, hwaddr, mask, config):
try:
addr = create_eaddr(str(hwaddr))
return self.ctxt.send_port_mod(dpid, portno, addr, mask, config)
except Exception, e:
print e
#lg.error("unable to send port mod"+str(e))
def send_switch_command(self, dpid, command, arg_list):
return self.ctxt.send_switch_command(dpid, command, ",".join(arg_list))
def switch_reset(self, dpid):
return self.ctxt.switch_reset(dpid)
def switch_update(self, dpid):
return self.ctxt.switch_update(dpid)
def send_openflow_packet(self, dp_id, packet, actions,
inport=openflow.OFPP_CONTROLLER):
"""
sends an openflow packet to a datapath
dp_id - datapath to send packet to
packet - data to put in openflow packet
actions - list of actions or dp port to send out of
inport - dp port to mark as source (defaults to Controller port)
"""
if type(packet) == type(array.array('B')):
packet = packet.tostring()
if type(actions) == types.IntType:
self.ctxt.send_openflow_packet_port(dp_id, packet, actions, inport)
elif type(actions) == types.ListType:
oactions = self.make_action_array(actions)
if oactions == None:
raise Exception('Bad action')
self.ctxt.send_openflow_packet_acts(dp_id, packet, oactions, inport)
else:
raise Exception('Bad argument')
def send_openflow_buffer(self, dp_id, buffer_id, actions,
inport=openflow.OFPP_CONTROLLER):
"""
Tells a datapath to send out a buffer
dp_id - datapath to send packet to
buffer_id - id of buffer to send out
actions - list of actions or dp port to send out of
inport - dp port to mark as source (defaults to Controller port)
"""
if type(actions) == types.IntType:
self.ctxt.send_openflow_buffer_port(dp_id, buffer_id, actions,
inport)
elif type(actions) == types.ListType:
oactions = self.make_action_array(actions)
if oactions == None:
raise Exception('Bad action')
self.ctxt.send_openflow_buffer_acts(dp_id, buffer_id, oactions,
inport)
else:
raise Exception('Bad argument')
def post_callback(self, t, function):
from twisted.internet import reactor
reactor.callLater(t, function)
def send_flow_command(self, dp_id, command, attrs,
priority=openflow.OFP_DEFAULT_PRIORITY,
add_args=None,
hard_timeout=openflow.OFP_FLOW_PERMANENT):
m = set_match(attrs)
if m == None:
return False
if command == openflow.OFPFC_ADD:
(idle_timeout, actions, buffer_id) = add_args
oactions = self.make_action_array(actions)
if oactions == None:
return False
else:
idle_timeout = 0
oactions = ""
buffer_id = UINT32_MAX
self.ctxt.send_flow_command(dp_id, command, m, idle_timeout,
hard_timeout, oactions, buffer_id, priority)
return True
# Former PyAPI methods
def send_openflow(self, dp_id, buffer_id, packet, actions,
inport=openflow.OFPP_CONTROLLER):
"""
Sends an openflow packet to a datapath.
This function is a convenient wrapper for send_openflow_packet
and send_openflow_buffer for situations where it is unknown in
advance whether the packet to be sent is buffered. If
'buffer_id' is -1, it sends 'packet'; otherwise, it sends the
buffer represented by 'buffer_id'.
dp_id - datapath to send packet to
buffer_id - id of buffer to send out
packet - data to put in openflow packet
actions - list of actions or dp port to send out of
inport - dp port to mark as source (defaults to Controller
port)
"""
if buffer_id != None:
self.send_openflow_buffer(dp_id, buffer_id, actions, inport)
else:
self.send_openflow_packet(dp_id, packet, actions, inport)
def delete_datapath_flow(self, dp_id, attrs):
"""
Delete all flow entries matching the passed in (potentially
wildcarded) flow
dp_id - datapath to delete the entries from
attrs - the flow as a dictionary (described above)
"""
return self.send_flow_command(dp_id, openflow.OFPFC_DELETE, attrs)
def delete_strict_datapath_flow(self, dp_id, attrs,
priority=openflow.OFP_DEFAULT_PRIORITY):
"""
Strictly delete the flow entry matching the passed in (potentially
wildcarded) flow. i.e. matched flow have exactly the same
wildcarded fields.
dp_id - datapath to delete the entries from
attrs - the flow as a dictionary (described above)
priority - the priority of the entry to be deleted (only meaningful
for entries with wildcards)
"""
return self.send_flow_command(dp_id, openflow.OFPFC_DELETE_STRICT,
attrs, priority)
###########################################################################
# The following methods manipulate a flow entry in a datapath.
# A flow is defined by a dictionary containing 0 or more of the
# following keys (commented keys have already been defined above):
#
# DL_SRC = "dl_src"
# DL_DST = "dl_dst"
# DL_VLAN = "dl_vlan"
# DL_VLAN_PCP = "dl_vlan_pcp"
# DL_TYPE = "dl_type"
# NW_SRC = "nw_src"
# NW_DST = "nw_dst"
# NW_PROTO = "nw_proto"
# TP_SRC = "tp_src"
# TP_DST = "tp_dst"
#
# Absent keys are interpretted as wildcards
###########################################################################
def install_datapath_flow(self, dp_id, attrs, idle_timeout, hard_timeout,
actions, buffer_id=None,
priority=openflow.OFP_DEFAULT_PRIORITY,
inport=None, packet=None):
"""
Add a flow entry to datapath
dp_id - datapath to add the entry to
attrs - the flow as a dictionary (described above)
idle_timeout - # idle seconds before flow is removed from dp
hard_timeout - # of seconds before flow is removed from dp
actions - a list where each entry is a two-element list representing
an action. Elem 0 of an action list should be an ofp_action_type
and elem 1 should be the action argument (if needed). For
OFPAT_OUTPUT, this should be another two-element list with max_len
as the first elem, and port_no as the second
buffer_id - the ID of the buffer to apply the action(s) to as well.
Defaults to None if the actions should not be applied to a buffer
priority - when wildcards are present, this value determines the
order in which rules are matched in the switch (higher values
take precedence over lower ones)
packet - If buffer_id is None, then a data packet to which the
actions should be applied, or None if none.
inport - When packet is sent, the port on which packet came in as input,
so that it can be omitted from any OFPP_FLOOD outputs.
"""
if buffer_id == None:
buffer_id = UINT32_MAX
self.send_flow_command(dp_id, openflow.OFPFC_ADD, attrs, priority,
(idle_timeout, actions, buffer_id), hard_timeout)
if buffer_id == UINT32_MAX and packet != None:
for action in actions:
if action[0] == openflow.OFPAT_OUTPUT:
self.send_openflow_packet(dp_id, packet, action[1][1], inport)
else:
raise NotImplementedError
def register_for_packet_in(self, handler):
"""
register a handler to be called on every packet_in event
handler will be called with the following args:
handler(dp_id, inport, ofp_reason, total_frame_len, buffer_id,
captured_data)
'buffer_id' == None if the datapath does not have a buffer for
the frame
"""
self.register_handler(Packet_in_event.static_get_name(),
gen_packet_in_cb(handler))
def register_for_flow_removed(self, handler):
self.register_handler(Flow_removed_event.static_get_name(),
handler)
def register_for_flow_mod(self, handler):
self.register_handler(Flow_mod_event.static_get_name(),
handler)
def register_for_bootstrap_complete(self, handler):
self.register_handler(Bootstrap_complete_event.static_get_name(),
handler)
################################################################################
# register a handler to be called on a every switch_features event
# handler will be called with the following args:
#
# handler(dp_id, attrs)
#
# attrs is a dictionary with the following keys:
# the PORTS value is a list of port dictionaries where each dictionary
# has the keys listed in the register_for_port_status message
################################################################################
def register_for_datapath_join(self, handler):
self.register_handler(Datapath_join_event.static_get_name(),
gen_dp_join_cb(handler))
################################################################################
# register a handler to be called whenever table statistics are
# returned by a switch.
#
# handler will be called with the following args:
#
# handler(dp_id, stats)
#
# Stats is a dictionary of table stats with the following keys:
#
# "table_id"
# "name"
# "max_entries"
# "active_count"
# "lookup_count"
# "matched_count"
#
# XXX
#
# We should get away from using strings here eventually.
#
################################################################################
def register_for_table_stats_in(self, handler):
self.register_handler(Table_stats_in_event.static_get_name(),
gen_ts_in_cb(handler))
################################################################################
# register a handler to be called whenever port statistics are
# returned by a switch.
#
# handler will be called with the following args:
#
# handler(dp_id, stats)
#
# Stats is a dictionary of port stats with the following keys:
#
# "port_no"
# "rx_packets"
# "tx_packets"
# "rx_bytes"
# "tx_bytes"
# "rx_dropped"
# "tx_dropped"
# "rx_errors"
# "tx_errors"
# "rx_frame_err"
# "rx_over_err"
# "rx_crc_err"
# "collisions"
#
################################################################################
def register_for_port_stats_in(self, handler):
self.register_handler(Port_stats_in_event.static_get_name(),
gen_ps_in_cb(handler))
################################################################################
# register a handler to be called whenever table aggregate
# statistics are returned by a switch.
#
# handler will be called with the following args:
#
# handler(dp_id, stats)
#
# Stats is a dictionary of aggregate stats with the following keys:
#
# "packet_count"
# "byte_count"
# "flow_count"
#
################################################################################
def register_for_aggregate_stats_in(self, handler):
self.register_handler(Aggregate_stats_in_event.static_get_name(),
gen_as_in_cb(handler))
################################################################################
# register a handler to be called whenever description
# statistics are returned by a switch.
#
# handler will be called with the following args:
#
# handler(dp_id, stats)
#
# Stats is a dictionary of descriptions with the following keys:
#
# "mfr_desc"
# "hw_desc"
# "sw_desc"
# "serial_num"
#
################################################################################
def register_for_desc_stats_in(self, handler):
self.register_handler(Desc_stats_in_event.static_get_name(),
gen_ds_in_cb(handler))
def register_for_datapath_leave(self, handler):
"""
register a handler to be called on a every datapath_leave
event handler will be called with the following args:
handler(dp_id)
"""
self.register_handler(Datapath_leave_event.static_get_name(),
gen_dp_leave_cb(handler))
##########################################################################
# register a handler to be called on a every port_status event
# handler will be called with the following args:
#
# handler(dp_id, ofp_port_reason, attrs)
#
# attrs is a dictionary with the following keys:
###########################################################################
def register_for_port_status(self, handler):
self.register_handler(Port_status_event.static_get_name(),
gen_port_status_cb(handler))
###########################################################################
# register a handler to be called on every packet_in event matching
# the passed in expression.
#
# priority - the priority the installed classifier rule should have
# expr - a dictionary containing 0 or more of the following keys.
# Absent keys will be interpretted as wildcards (i.e. any value is
# accepted for those attributes when checking for a potential match)
#
# handler will be called with the following args:
#
# handler(dp_id, inport, ofp_reason, total_frame_len, buffer_id,
# captured_data)
#
# 'buffer_id' == None if the datapath does not have a buffer for
# the frame
###########################################################################
def register_for_packet_match(self, handler, priority, expr):
e = Packet_expr()
for key, val in expr.items():
if key == AP_SRC:
field = Packet_expr.AP_SRC
val = htons(val)
elif key == AP_DST:
field = Packet_expr.AP_DST
val = htons(val)
elif key == DL_VLAN:
field = Packet_expr.DL_VLAN
val = htons(val)
elif key == DL_VLAN_PCP:
field = Packet_expr.DL_VLAN_PCP
val = val
elif key == DL_TYPE:
field = Packet_expr.DL_TYPE
val = htons(val)
elif key == DL_SRC:
field = Packet_expr.DL_SRC
val = convert_to_eaddr(val)
if val == None:
print 'invalid ethernet addr'
return False
elif key == DL_DST:
field = Packet_expr.DL_DST
val = convert_to_eaddr(val)
if val == None:
print 'invalid ethernet addr'
return False
elif key == NW_SRC:
field = Packet_expr.NW_SRC
val = convert_to_ipaddr(val)
if val == None:
print 'invalid ip addr'
return False
elif key == NW_DST:
field = Packet_expr.NW_DST
val = convert_to_ipaddr(val)
if val == None:
print 'invalid ip addr'
return False
elif key == NW_PROTO:
field = Packet_expr.NW_PROTO
elif key == TP_SRC:
field = Packet_expr.TP_SRC
val = htons(val)
elif key == TP_DST:
field = Packet_expr.TP_DST
val = htons(val)
elif key == GROUP_SRC:
field = Packet_expr.GROUP_SRC
val = htonl(val)
elif key == GROUP_DST:
field = Packet_expr.GROUP_DST
val = htonl(val)
else:
print 'invalid key', key
return False
if isinstance(val, ethernetaddr):
e.set_eth_field(field, val)
else:
# check for max?
if val > UINT32_MAX:
print 'value %u exceeds accepted range', val
return False
e.set_uint32_field(field, val)
return self.ctxt.register_handler_on_match(gen_packet_in_cb(handler), priority, e)
def register_for_switch_mgr_join(self, handler):
"""
register a handler to be called on every switch_mgr_join
event handler will be called with the following args:
handler(mgmt_id)
"""
self.register_handler(Switch_mgr_join_event.static_get_name(),
gen_switch_mgr_join_cb(handler))
def register_for_switch_mgr_leave(self, handler):
"""
register a handler to be called on every switch_mgr_leave
event handler will be called with the following args:
handler(mgmt_id)
"""
self.register_handler(Switch_mgr_leave_event.static_get_name(),
gen_switch_mgr_leave_cb(handler))
def unregister_handler(self, rule_id):
"""
Unregister a handler for match.
"""
return self.ctxt.register_handler(event_type, event_name, handler)
| 37.157738 | 93 | 0.562795 |
d93465e5aa4c104324225ae31cc29b51841fe4f0 | 512 | py | Python | pendulum/locales/pl/custom.py | seandstewart/pendulum | daa4b936daf3f4dfa7d211aa0ac1e9d82d5401d4 | [
"MIT"
] | 5,049 | 2016-07-04T07:16:34.000Z | 2022-03-31T07:41:48.000Z | pendulum/locales/pl/custom.py | seandstewart/pendulum | daa4b936daf3f4dfa7d211aa0ac1e9d82d5401d4 | [
"MIT"
] | 536 | 2016-07-05T22:46:29.000Z | 2022-03-22T12:41:54.000Z | pendulum/locales/pl/custom.py | seandstewart/pendulum | daa4b936daf3f4dfa7d211aa0ac1e9d82d5401d4 | [
"MIT"
] | 373 | 2016-07-05T19:51:51.000Z | 2022-03-23T16:57:46.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
pl custom locale file.
"""
translations = {
"units": {"few_second": "kilka sekund"},
# Relative time
"ago": "{} temu",
"from_now": "za {}",
"after": "{0} po",
"before": "{0} przed",
# Date formats
"date_formats": {
"LTS": "HH:mm:ss",
"LT": "HH:mm",
"L": "DD.MM.YYYY",
"LL": "D MMMM YYYY",
"LLL": "D MMMM YYYY HH:mm",
"LLLL": "dddd, D MMMM YYYY HH:mm",
},
}
| 19.692308 | 44 | 0.480469 |
77a9cf21bae24f3539ef16e9805e646edc5798dc | 385 | py | Python | module_03_numbers/psl_03.01_math_01.py | CodingGearsCourses/Python-3-Standard-Library-Essentials | 8b80bc8b77fa477b6ccbe2886ed9239c2defdfda | [
"Apache-2.0"
] | null | null | null | module_03_numbers/psl_03.01_math_01.py | CodingGearsCourses/Python-3-Standard-Library-Essentials | 8b80bc8b77fa477b6ccbe2886ed9239c2defdfda | [
"Apache-2.0"
] | null | null | null | module_03_numbers/psl_03.01_math_01.py | CodingGearsCourses/Python-3-Standard-Library-Essentials | 8b80bc8b77fa477b6ccbe2886ed9239c2defdfda | [
"Apache-2.0"
] | null | null | null | # --------------------------------
# CodingGears.io
# --------------------------------
# Math Module
import math
# TODO: Square root
result = math.sqrt(7)
print(result)
# TODO: Greatest Common Denominator
result = math.gcd(100, 25)
print(result)
# TODO: pow (x raised to the power y)
result = math.pow(2, 4)
print(result)
# TODO: Factorial
result = math.factorial(6)
print(result) | 17.5 | 37 | 0.587013 |
8b3dbd25860ceb2e102e32153d7a93151664a62b | 1,623 | py | Python | tests/test_pep8.py | cfranken/isofit | a67a26fe59fe0eb3fd5fe3503736294e17172f82 | [
"Apache-2.0"
] | null | null | null | tests/test_pep8.py | cfranken/isofit | a67a26fe59fe0eb3fd5fe3503736294e17172f82 | [
"Apache-2.0"
] | null | null | null | tests/test_pep8.py | cfranken/isofit | a67a26fe59fe0eb3fd5fe3503736294e17172f82 | [
"Apache-2.0"
] | 1 | 2020-07-25T05:36:49.000Z | 2020-07-25T05:36:49.000Z | #! /usr/bin/env python3
#
# Copyright 2018 California Institute of Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ISOFIT: Imaging Spectrometer Optimal FITting
# Author: David R Thompson, david.r.thompson@jpl.nasa.gov
#
import pep8
import sys
from os.path import expandvars, split, abspath, join
from glob import glob
testdir, fname = split(abspath(__file__))
config_file = testdir+'/data/pep8_config.txt'
excludes = ['sunposition.py']
def test_pep8_conformance():
"""Test that we conform to PEP8."""
directories = [testdir + '/../isofit/', testdir + '/../utils/']
files = []
for directory in directories:
for fi in glob(directory+'*py'):
if not any([e in fi for e in excludes]):
files.append(fi)
# After acceptance, this will be uncommented
pep8style = pep8.StyleGuide(config_file = config_file, quiet = False)
result = pep8style.check_files(files)
if result.total_errors != 0:
print('Found PEP8 conformance error.')
print('Please fix your style with autopep8.')
assert result.total_errors == 0
| 33.122449 | 75 | 0.699322 |
56e54db0d77209ba1a8f1aa035138626d2c794fe | 302 | py | Python | leetcode/python/70-ClimbingStairs.py | yu-H-ang/code-backup | ea7469122fd63b0910e2c851f39f9e48d5d54fea | [
"MIT"
] | null | null | null | leetcode/python/70-ClimbingStairs.py | yu-H-ang/code-backup | ea7469122fd63b0910e2c851f39f9e48d5d54fea | [
"MIT"
] | null | null | null | leetcode/python/70-ClimbingStairs.py | yu-H-ang/code-backup | ea7469122fd63b0910e2c851f39f9e48d5d54fea | [
"MIT"
] | null | null | null | class Solution(object):
def __init__(self):
self.dic = {1:1, 2:2}
def climbStairs(self, n):
"""
:type n: int
:rtype: int
"""
if n not in self.dic:
self.dic[n] = self.climbStairs(n-1) + self.climbStairs(n-2)
return self.dic[n]
| 25.166667 | 71 | 0.5 |
fe76aeabd70a2dd444d07f3e373233479e9ed011 | 30,385 | py | Python | tests/kbcr/clutrr/classic/test_classic.py | alex4321/ctp | 22a6a55442a648e5f7d8c10f90708a7340360720 | [
"MIT"
] | null | null | null | tests/kbcr/clutrr/classic/test_classic.py | alex4321/ctp | 22a6a55442a648e5f7d8c10f90708a7340360720 | [
"MIT"
] | null | null | null | tests/kbcr/clutrr/classic/test_classic.py | alex4321/ctp | 22a6a55442a648e5f7d8c10f90708a7340360720 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import multiprocessing
import numpy as np
from itertools import cycle, islice
import torch
from torch import nn, optim, Tensor
from kbcr.kernels import GaussianKernel
from kbcr.clutrr.models.classic import NeuralKB, Hoppy
from kbcr.reformulators import AttentiveReformulator
from kbcr.reformulators import SymbolicReformulator
from kbcr.util import make_batches
from typing import List, Dict, Tuple, Optional
import pytest
def encode_relation(facts: List[Tuple[str, str, str]],
relation_embeddings: nn.Embedding,
relation_to_idx: Dict[str, int],
device: Optional[torch.device] = None) -> Tensor:
indices_np = np.array([relation_to_idx[r] for _, r, _ in facts], dtype=np.int64)
indices = torch.LongTensor(indices_np)
if device is not None:
indices = indices.to(device)
return relation_embeddings(indices)
def encode_arguments(facts: List[Tuple[str, str, str]],
entity_embeddings: nn.Embedding,
entity_to_idx: Dict[str, int],
device: Optional[torch.device] = None) -> Tuple[Tensor, Tensor]:
indices_np = np.array([[entity_to_idx[s], entity_to_idx[o]] for s, _, o in facts], dtype=np.int64)
indices = torch.LongTensor(indices_np)
if device is not None:
indices = indices.to(device)
emb = entity_embeddings(indices)
return emb[:, 0, :], emb[:, 1, :]
@pytest.mark.light
def test_classic_clutrr_v1():
embedding_size = 50
triples, hops = [], []
for i in range(16):
triples += [(f'a{i}', 'p', f'b{i}'), (f'b{i}', 'q', f'c{i}')]
hops += [(f'a{i}', 'r', f'c{i}')]
entity_lst = sorted({e for (e, _, _) in triples + hops} | {e for (e, _, e) in triples + hops})
predicate_lst = sorted({p for (_, p, _) in triples + hops})
nb_entities, nb_predicates = len(entity_lst), len(predicate_lst)
entity_to_index = {e: i for i, e in enumerate(entity_lst)}
predicate_to_index = {p: i for i, p in enumerate(predicate_lst)}
kernel = GaussianKernel()
entity_embeddings = nn.Embedding(nb_entities, embedding_size, sparse=True)
predicate_embeddings = nn.Embedding(nb_predicates, embedding_size, sparse=True)
model = NeuralKB(kernel=kernel)
for s in entity_lst:
for p in predicate_lst:
for o in entity_lst:
xs_np = np.array([entity_to_index[s]])
xp_np = np.array([predicate_to_index[p]])
xo_np = np.array([entity_to_index[o]])
with torch.no_grad():
xs = torch.LongTensor(xs_np)
xp = torch.LongTensor(xp_np)
xo = torch.LongTensor(xo_np)
xs_emb = entity_embeddings(xs)
xp_emb = predicate_embeddings(xp)
xo_emb = entity_embeddings(xo)
rel_emb = encode_relation(facts=triples, relation_embeddings=predicate_embeddings,
relation_to_idx=predicate_to_index)
arg1_emb, arg2_emb = encode_arguments(facts=triples, entity_embeddings=entity_embeddings,
entity_to_idx=entity_to_index)
facts = [rel_emb, arg1_emb, arg2_emb]
inf = model.score(xp_emb, xs_emb, xo_emb, facts=facts)
inf_np = inf.cpu().numpy()
assert inf_np[0] > 0.95 if (s, p, o) in triples else inf_np[0] < 0.01
@pytest.mark.light
def test_classic_clutrr_v2():
embedding_size = 50
triples, hops = [], []
xxx = []
for i in range(16):
triples += [(f'a{i}', 'p', f'b{i}'), (f'b{i}', 'q', f'c{i}')]
hops += [(f'a{i}', 'r', f'c{i}')]
xxx += [(f'a{i}', 'p', f'c{i}'), (f'a{i}', 'q', f'c{i}'), (f'a{i}', 'r', f'c{i}')]
entity_lst = sorted({s for (s, _, _) in triples + hops} | {o for (_, _, o) in triples + hops})
predicate_lst = sorted({p for (_, p, _) in triples + hops})
nb_entities, nb_predicates = len(entity_lst), len(predicate_lst)
entity_to_index = {e: i for i, e in enumerate(entity_lst)}
predicate_to_index = {p: i for i, p in enumerate(predicate_lst)}
kernel = GaussianKernel()
entity_embeddings = nn.Embedding(nb_entities, embedding_size, sparse=True)
predicate_embeddings = nn.Embedding(nb_predicates, embedding_size, sparse=True)
model = NeuralKB(kernel=kernel)
indices = torch.LongTensor(np.array([predicate_to_index['p'], predicate_to_index['q']]))
_hops = SymbolicReformulator(predicate_embeddings, indices)
hoppy = Hoppy(model, hops_lst=[(_hops, False)], max_depth=1)
for s in entity_lst:
for p in predicate_lst:
for o in entity_lst:
xs_np = np.array([entity_to_index[s], entity_to_index[s]])
xp_np = np.array([predicate_to_index[p], predicate_to_index[p]])
xo_np = np.array([entity_to_index[o], entity_to_index[o]])
with torch.no_grad():
xs = torch.LongTensor(xs_np)
xp = torch.LongTensor(xp_np)
xo = torch.LongTensor(xo_np)
xs_emb = entity_embeddings(xs)
xp_emb = predicate_embeddings(xp)
xo_emb = entity_embeddings(xo)
rel_emb = encode_relation(facts=triples, relation_embeddings=predicate_embeddings,
relation_to_idx=predicate_to_index)
arg1_emb, arg2_emb = encode_arguments(facts=triples, entity_embeddings=entity_embeddings,
entity_to_idx=entity_to_index)
facts = [rel_emb, arg1_emb, arg2_emb]
inf = hoppy.score(xp_emb, xs_emb, xo_emb, facts=facts)
inf_np = inf.cpu().numpy()
print(s, p, o, inf_np)
assert inf_np[0] > 0.9 if (s, p, o) in (triples + xxx) else inf_np[0] < 0.1, inf_np
@pytest.mark.light
def test_classic_clutrr_v3():
embedding_size = 40
batch_size = 8
torch.manual_seed(0)
triples, hops = [], []
for i in range(32):
triples += [(f'a{i}', 'p', f'b{i}'), (f'b{i}', 'q', f'c{i}')]
hops += [(f'a{i}', 'r', f'c{i}')]
entity_lst = sorted({s for (s, _, _) in triples + hops} | {o for (_, _, o) in triples + hops})
predicate_lst = sorted({p for (_, p, _) in triples + hops})
nb_entities, nb_predicates = len(entity_lst), len(predicate_lst)
entity_to_index = {e: i for i, e in enumerate(entity_lst)}
predicate_to_index = {p: i for i, p in enumerate(predicate_lst)}
kernel = GaussianKernel(slope=None)
entity_embeddings = nn.Embedding(nb_entities, embedding_size, sparse=True)
predicate_embeddings = nn.Embedding(nb_predicates, embedding_size, sparse=True)
# _hops = LinearReformulator(2, embedding_size)
_hops = AttentiveReformulator(2, predicate_embeddings)
model = NeuralKB(kernel=kernel)
hoppy = Hoppy(model, hops_lst=[(_hops, False)], max_depth=1)
params = [p for p in hoppy.parameters()
if not torch.equal(p, entity_embeddings.weight)
and not torch.equal(p, predicate_embeddings.weight)]
for tensor in params:
print(f'\t{tensor.size()}\t{tensor.device}')
loss_function = nn.BCELoss()
optimizer = optim.Adagrad(params, lr=0.1)
hops_data = []
for i in range(64):
hops_data += hops
batches = make_batches(len(hops_data), batch_size)
rs = np.random.RandomState()
c, d = 0.0, 0.0
p_emb = predicate_embeddings(torch.LongTensor(np.array([predicate_to_index['p']])))
q_emb = predicate_embeddings(torch.LongTensor(np.array([predicate_to_index['q']])))
for batch_start, batch_end in batches:
hops_batch = hops_data[batch_start:batch_end]
s_lst = [s for (s, _, _) in hops_batch]
p_lst = [p for (_, p, _) in hops_batch]
o_lst = [o for (_, _, o) in hops_batch]
nb_positives = len(s_lst)
nb_negatives = nb_positives * 3
s_n_lst = rs.permutation(nb_entities)[:nb_negatives].tolist()
nb_negatives = len(s_n_lst)
o_n_lst = rs.permutation(nb_entities)[:nb_negatives].tolist()
p_n_lst = list(islice(cycle(p_lst), nb_negatives))
xs_np = np.array([entity_to_index[s] for s in s_lst] + s_n_lst)
xp_np = np.array([predicate_to_index[p] for p in p_lst + p_n_lst])
xo_np = np.array([entity_to_index[o] for o in o_lst] + o_n_lst)
xs_emb = entity_embeddings(torch.LongTensor(xs_np))
xp_emb = predicate_embeddings(torch.LongTensor(xp_np))
xo_emb = entity_embeddings(torch.LongTensor(xo_np))
rel_emb = encode_relation(facts=triples,
relation_embeddings=predicate_embeddings,
relation_to_idx=predicate_to_index)
arg1_emb, arg2_emb = encode_arguments(facts=triples,
entity_embeddings=entity_embeddings,
entity_to_idx=entity_to_index)
facts = [rel_emb, arg1_emb, arg2_emb]
scores = hoppy.score(xp_emb, xs_emb, xo_emb, facts=facts)
labels_np = np.zeros(xs_np.shape[0])
labels_np[:nb_positives] = 1
labels = torch.LongTensor(labels_np).float()
loss = loss_function(scores, labels)
hop_1_emb = hoppy.hops_lst[0][0].hops_lst[0](xp_emb)
hop_2_emb = hoppy.hops_lst[0][0].hops_lst[1](xp_emb)
c = kernel.pairwise(p_emb, hop_1_emb).mean().cpu().detach().numpy()
d = kernel.pairwise(q_emb, hop_2_emb).mean().cpu().detach().numpy()
print(c, d)
loss.backward()
optimizer.step()
optimizer.zero_grad()
assert c > 0.95 and d > 0.95
@pytest.mark.light
def test_classic_clutrr_v4():
embedding_size = 50
rs = np.random.RandomState(0)
for _ in range(32):
with torch.no_grad():
triples = [
('a', 'p', 'b'),
('c', 'q', 'd'),
('e', 'q', 'f'),
('g', 'q', 'h'),
('i', 'q', 'l'),
('m', 'q', 'n'),
('o', 'q', 'p'),
('q', 'q', 'r'),
('s', 'q', 't'),
('u', 'q', 'v')
]
entity_lst = sorted({s for (s, _, _) in triples} | {o for (_, _, o) in triples})
predicate_lst = sorted({p for (_, p, _) in triples})
nb_entities, nb_predicates = len(entity_lst), len(predicate_lst)
entity_to_index = {e: i for i, e in enumerate(entity_lst)}
predicate_to_index = {p: i for i, p in enumerate(predicate_lst)}
kernel = GaussianKernel()
entity_embeddings = nn.Embedding(nb_entities, embedding_size * 2, sparse=True)
predicate_embeddings = nn.Embedding(nb_predicates, embedding_size * 2, sparse=True)
rel_emb = encode_relation(facts=triples,
relation_embeddings=predicate_embeddings,
relation_to_idx=predicate_to_index)
arg1_emb, arg2_emb = encode_arguments(facts=triples,
entity_embeddings=entity_embeddings,
entity_to_idx=entity_to_index)
facts = [rel_emb, arg1_emb, arg2_emb]
model = NeuralKB(kernel=kernel)
xs_np = rs.randint(nb_entities, size=32)
xp_np = rs.randint(nb_predicates, size=32)
xo_np = rs.randint(nb_entities, size=32)
xs_np[0] = 0
xp_np[0] = 0
xo_np[0] = 1
xs_np[1] = 2
xp_np[1] = 1
xo_np[1] = 3
xs = torch.LongTensor(xs_np)
xp = torch.LongTensor(xp_np)
xo = torch.LongTensor(xo_np)
xs_emb = entity_embeddings(xs)
xp_emb = predicate_embeddings(xp)
xo_emb = entity_embeddings(xo)
print('xp_emb', xp_emb.shape)
res_sp, res_po = model.forward(xp_emb, xs_emb, xo_emb, facts=facts)
inf = model.score(xp_emb, xs_emb, xo_emb, facts=facts)
assert inf[0] > 0.9
assert inf[1] > 0.9
scores_sp, emb_sp = res_sp
scores_po, emb_po = res_po
print(scores_sp.shape, emb_sp.shape)
print(scores_po.shape, emb_po.shape)
inf = inf.cpu().numpy()
scores_sp = scores_sp.cpu().numpy()
scores_po = scores_po.cpu().numpy()
@pytest.mark.light
def test_classic_clutrr_v5():
torch.set_num_threads(multiprocessing.cpu_count())
embedding_size = 20
torch.manual_seed(0)
rs = np.random.RandomState(0)
triples = [
('a', 'p', 'b'),
('b', 'q', 'c'),
('c', 'p', 'd'),
('d', 'q', 'e'),
('e', 'p', 'f'),
('f', 'q', 'g'),
('g', 'p', 'h'),
('h', 'q', 'i'),
('i', 'p', 'l'),
('l', 'q', 'm'),
('m', 'p', 'n'),
('n', 'q', 'o'),
('o', 'p', 'p'),
('p', 'q', 'q'),
('q', 'p', 'r'),
('r', 'q', 's'),
('s', 'p', 't'),
('t', 'q', 'u'),
('u', 'p', 'v'),
('v', 'q', 'w'),
('x', 'r', 'y'),
('x', 's', 'y')
]
entity_lst = sorted({e for (e, _, _) in triples} | {e for (_, _, e) in triples})
predicate_lst = sorted({p for (_, p, _) in triples})
nb_entities = len(entity_lst)
nb_predicates = len(predicate_lst)
entity_to_index = {e: i for i, e in enumerate(entity_lst)}
predicate_to_index = {p: i for i, p in enumerate(predicate_lst)}
with torch.no_grad():
kernel = GaussianKernel()
entity_embeddings = nn.Embedding(nb_entities, embedding_size * 2, sparse=True)
predicate_embeddings = nn.Embedding(nb_predicates, embedding_size * 2, sparse=True)
rel_emb = encode_relation(facts=triples,
relation_embeddings=predicate_embeddings,
relation_to_idx=predicate_to_index)
arg1_emb, arg2_emb = encode_arguments(facts=triples,
entity_embeddings=entity_embeddings,
entity_to_idx=entity_to_index)
facts = [rel_emb, arg1_emb, arg2_emb]
k = 3
model = NeuralKB(kernel=kernel, k=k)
indices = torch.LongTensor(np.array([predicate_to_index['p'], predicate_to_index['q']]))
reformulator = SymbolicReformulator(predicate_embeddings, indices)
hoppy0 = Hoppy(model, hops_lst=[(reformulator, False), (reformulator, False)], max_depth=0)
hoppy1 = Hoppy(model, hops_lst=[(reformulator, False), (reformulator, False)], max_depth=1)
hoppy2 = Hoppy(model, hops_lst=[(reformulator, False), (reformulator, False)], max_depth=2)
hoppy3 = Hoppy(model, hops_lst=[(reformulator, False), (reformulator, False)], max_depth=3)
# hoppy4 = Hoppy(model, hops_lst=[(reformulator, False), (reformulator, False)], max_depth=4)
xs_np = rs.randint(nb_entities, size=12)
xp_np = rs.randint(nb_predicates, size=12)
xo_np = rs.randint(nb_entities, size=12)
xs_np[0] = entity_to_index['a']
xp_np[0] = predicate_to_index['r']
xo_np[0] = entity_to_index['c']
xs_np[1] = entity_to_index['a']
xp_np[1] = predicate_to_index['r']
xo_np[1] = entity_to_index['e']
xs_np[2] = entity_to_index['a']
xp_np[2] = predicate_to_index['r']
xo_np[2] = entity_to_index['g']
xs_np[3] = entity_to_index['a']
xp_np[3] = predicate_to_index['r']
xo_np[3] = entity_to_index['i']
xs_np[4] = entity_to_index['a']
xp_np[4] = predicate_to_index['r']
xo_np[4] = entity_to_index['m']
xs_np[5] = entity_to_index['a']
xp_np[5] = predicate_to_index['r']
xo_np[5] = entity_to_index['o']
xs_np[6] = entity_to_index['a']
xp_np[6] = predicate_to_index['r']
xo_np[6] = entity_to_index['q']
xs_np[7] = entity_to_index['a']
xp_np[7] = predicate_to_index['r']
xo_np[7] = entity_to_index['s']
xs_np[8] = entity_to_index['a']
xp_np[8] = predicate_to_index['r']
xo_np[8] = entity_to_index['u']
# xs_np[9] = entity_to_index['a']
# xp_np[9] = predicate_to_index['r']
# xo_np[9] = entity_to_index['w']
xs = torch.LongTensor(xs_np)
xp = torch.LongTensor(xp_np)
xo = torch.LongTensor(xo_np)
xs_emb = entity_embeddings(xs)
xp_emb = predicate_embeddings(xp)
xo_emb = entity_embeddings(xo)
# scores0 = hoppy0.forward(xp_emb, xs_emb, xo_emb, facts=facts)
inf0 = hoppy0.score(xp_emb, xs_emb, xo_emb, facts=facts)
# scores1 = hoppy1.forward(xp_emb, xs_emb, xo_emb, facts=facts)
inf1 = hoppy1.score(xp_emb, xs_emb, xo_emb, facts=facts)
# scores2 = hoppy2.forward(xp_emb, xs_emb, xo_emb, facts=facts)
inf2 = hoppy2.score(xp_emb, xs_emb, xo_emb, facts=facts)
# scores3 = hoppy3.forward(xp_emb, xs_emb, xo_emb, facts=facts)
inf3 = hoppy3.score(xp_emb, xs_emb, xo_emb, facts=facts)
# scores4 = hoppy4.forward(xp_emb, xs_emb, xo_emb, facts=facts)
# inf4 = hoppy4.score(xp_emb, xs_emb, xo_emb, facts=facts)
print(inf0)
print(inf1)
print(inf2)
print(inf3)
# print(inf4)
inf0_np = inf0.cpu().numpy()
inf1_np = inf1.cpu().numpy()
inf2_np = inf2.cpu().numpy()
inf3_np = inf3.cpu().numpy()
# inf4_np = inf4.cpu().numpy()
np.testing.assert_allclose(inf0_np, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], rtol=1e-1, atol=1e-1)
np.testing.assert_allclose(inf1_np, [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], rtol=1e-1, atol=1e-1)
np.testing.assert_allclose(inf2_np, [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], rtol=1e-1, atol=1e-1)
np.testing.assert_allclose(inf3_np, [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], rtol=1e-1, atol=1e-1)
# np.testing.assert_allclose(inf4_np, [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], rtol=1e-1, atol=1e-1)
@pytest.mark.light
def test_classic_clutrr_v6():
torch.set_num_threads(multiprocessing.cpu_count())
embedding_size = 50
torch.manual_seed(0)
rs = np.random.RandomState(0)
triples = [
('a', 'p', 'b'),
('b', 'q', 'c'),
('c', 'p', 'd'),
('d', 'q', 'e'),
('e', 'p', 'f'),
('f', 'q', 'g'),
('g', 'p', 'h'),
('h', 'q', 'i'),
('i', 'p', 'l'),
('l', 'q', 'm'),
('m', 'p', 'n'),
('n', 'q', 'o'),
('o', 'p', 'p'),
('p', 'q', 'q'),
('q', 'p', 'r'),
('r', 'q', 's'),
('s', 'p', 't'),
('t', 'q', 'u'),
('u', 'p', 'v'),
('v', 'q', 'w'),
('x', 'r', 'y'),
('x', 's', 'y')
]
entity_lst = sorted({e for (e, _, _) in triples} | {e for (_, _, e) in triples})
predicate_lst = sorted({p for (_, p, _) in triples})
nb_entities = len(entity_lst)
nb_predicates = len(predicate_lst)
entity_to_index = {e: i for i, e in enumerate(entity_lst)}
predicate_to_index = {p: i for i, p in enumerate(predicate_lst)}
with torch.no_grad():
kernel = GaussianKernel()
entity_embeddings = nn.Embedding(nb_entities, embedding_size * 2, sparse=True)
predicate_embeddings = nn.Embedding(nb_predicates, embedding_size * 2, sparse=True)
rel_emb = encode_relation(facts=triples,
relation_embeddings=predicate_embeddings,
relation_to_idx=predicate_to_index)
arg1_emb, arg2_emb = encode_arguments(facts=triples,
entity_embeddings=entity_embeddings,
entity_to_idx=entity_to_index)
facts = [rel_emb, arg1_emb, arg2_emb]
k = 5
model = NeuralKB(kernel=kernel, k=k)
indices = torch.LongTensor(np.array([predicate_to_index['p'], predicate_to_index['q']]))
reformulator = SymbolicReformulator(predicate_embeddings, indices)
hoppy0 = Hoppy(model, hops_lst=[(reformulator, False)], max_depth=0)
hoppy1 = Hoppy(model, hops_lst=[(reformulator, False)], max_depth=1)
hoppy2 = Hoppy(model, hops_lst=[(reformulator, False)], max_depth=2)
hoppy3 = Hoppy(model, hops_lst=[(reformulator, False)], max_depth=3)
# hoppy4 = Hoppy(model, hops_lst=[(reformulator, False)], depth=4)
xs_np = rs.randint(nb_entities, size=12)
xp_np = rs.randint(nb_predicates, size=12)
xo_np = rs.randint(nb_entities, size=12)
xs_np[0] = entity_to_index['a']
xp_np[0] = predicate_to_index['r']
xo_np[0] = entity_to_index['c']
xs_np[1] = entity_to_index['a']
xp_np[1] = predicate_to_index['r']
xo_np[1] = entity_to_index['e']
xs_np[2] = entity_to_index['a']
xp_np[2] = predicate_to_index['r']
xo_np[2] = entity_to_index['g']
xs_np[3] = entity_to_index['a']
xp_np[3] = predicate_to_index['r']
xo_np[3] = entity_to_index['i']
xs_np[4] = entity_to_index['a']
xp_np[4] = predicate_to_index['r']
xo_np[4] = entity_to_index['m']
xs_np[5] = entity_to_index['a']
xp_np[5] = predicate_to_index['r']
xo_np[5] = entity_to_index['o']
xs_np[6] = entity_to_index['a']
xp_np[6] = predicate_to_index['r']
xo_np[6] = entity_to_index['q']
xs_np[7] = entity_to_index['a']
xp_np[7] = predicate_to_index['r']
xo_np[7] = entity_to_index['s']
xs_np[8] = entity_to_index['a']
xp_np[8] = predicate_to_index['r']
xo_np[8] = entity_to_index['u']
xs = torch.LongTensor(xs_np)
xp = torch.LongTensor(xp_np)
xo = torch.LongTensor(xo_np)
xs_emb = entity_embeddings(xs)
xp_emb = predicate_embeddings(xp)
xo_emb = entity_embeddings(xo)
# res0 = hoppy0.forward(xp_emb, xs_emb, xo_emb, facts=facts)
inf0 = hoppy0.score(xp_emb, xs_emb, xo_emb, facts=facts)
# (scores0_sp, subs0_sp), (scores0_po, subs0_po) = res0
# res1 = hoppy1.forward(xp_emb, xs_emb, xo_emb, facts=facts)
inf1 = hoppy1.score(xp_emb, xs_emb, xo_emb, facts=facts)
# (scores1_sp, subs1_sp), (scores1_po, subs1_po) = res1
# res2 = hoppy2.forward(xp_emb, xs_emb, xo_emb, facts=facts)
inf2 = hoppy2.score(xp_emb, xs_emb, xo_emb, facts=facts)
# (scores2_sp, subs2_sp), (scores2_po, subs2_po) = res2
# res3 = hoppy3.forward(xp_emb, xs_emb, xo_emb, facts=facts)
inf3 = hoppy3.score(xp_emb, xs_emb, xo_emb, facts=facts)
# (scores3_sp, subs3_sp), (scores3_po, subs3_po) = res3
# scores4 = hoppy4.forward(xp_emb, xs_emb, xo_emb, facts=facts)
# inf4 = hoppy4.score(xp_emb, xs_emb, xo_emb, facts=facts)
print(inf0)
print(inf1)
print(inf2)
print(inf3)
# print(inf4)
inf0_np = inf0.cpu().numpy()
inf1_np = inf1.cpu().numpy()
inf2_np = inf2.cpu().numpy()
inf3_np = inf3.cpu().numpy()
# inf4_np = inf4.cpu().numpy()
np.testing.assert_allclose(inf0_np, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], rtol=1e-1, atol=1e-1)
np.testing.assert_allclose(inf1_np, [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], rtol=1e-1, atol=1e-1)
np.testing.assert_allclose(inf2_np, [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], rtol=1e-1, atol=1e-1)
np.testing.assert_allclose(inf3_np, [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], rtol=1e-1, atol=1e-1)
# np.testing.assert_allclose(inf4_np, [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], rtol=1e-1, atol=1e-1)
@pytest.mark.light
def test_classic_clutrr_v7():
torch.set_num_threads(multiprocessing.cpu_count())
embedding_size = 50
torch.manual_seed(0)
rs = np.random.RandomState(0)
triples = [
('a', 'p', 'b'),
('b', 'q', 'c'),
('c', 'p', 'd'),
('d', 'q', 'e'),
('e', 'p', 'f'),
('f', 'q', 'g'),
('g', 'p', 'h'),
('h', 'q', 'i'),
('i', 'p', 'l'),
('l', 'q', 'm'),
('m', 'p', 'n'),
('n', 'q', 'o'),
('o', 'p', 'p'),
('p', 'q', 'q'),
('q', 'p', 'r'),
('r', 'q', 's'),
('s', 'p', 't'),
('t', 'q', 'u'),
('u', 'p', 'v'),
('v', 'q', 'w'),
('x', 'r', 'y'),
('x', 's', 'y')
]
entity_lst = sorted({e for (e, _, _) in triples} | {e for (_, _, e) in triples})
predicate_lst = sorted({p for (_, p, _) in triples})
nb_entities = len(entity_lst)
nb_predicates = len(predicate_lst)
entity_to_index = {e: i for i, e in enumerate(entity_lst)}
predicate_to_index = {p: i for i, p in enumerate(predicate_lst)}
with torch.no_grad():
kernel = GaussianKernel()
entity_embeddings = nn.Embedding(nb_entities, embedding_size * 2, sparse=True)
predicate_embeddings = nn.Embedding(nb_predicates, embedding_size * 2, sparse=True)
rel_emb = encode_relation(facts=triples,
relation_embeddings=predicate_embeddings,
relation_to_idx=predicate_to_index)
arg1_emb, arg2_emb = encode_arguments(facts=triples,
entity_embeddings=entity_embeddings,
entity_to_idx=entity_to_index)
facts = [rel_emb, arg1_emb, arg2_emb]
k = 5
model = NeuralKB(kernel=kernel, k=k)
indices = torch.LongTensor(np.array([predicate_to_index['p'], predicate_to_index['q']]))
reformulator = SymbolicReformulator(predicate_embeddings, indices)
hoppy0 = Hoppy(model, hops_lst=[(reformulator, False)], max_depth=0)
hoppy1 = Hoppy(model, hops_lst=[(reformulator, False)], max_depth=1)
hoppy2 = Hoppy(model, hops_lst=[(reformulator, False)], max_depth=2)
hoppy3 = Hoppy(model, hops_lst=[(reformulator, False)], max_depth=3)
# hoppy4 = Hoppy(model, hops_lst=[(reformulator, False)], depth=4)
xs_np = rs.randint(nb_entities, size=12)
xp_np = rs.randint(nb_predicates, size=12)
xo_np = rs.randint(nb_entities, size=12)
xs_np[0] = entity_to_index['a']
xp_np[0] = predicate_to_index['r']
xo_np[0] = entity_to_index['c']
xs_np[1] = entity_to_index['a']
xp_np[1] = predicate_to_index['r']
xo_np[1] = entity_to_index['e']
xs_np[2] = entity_to_index['a']
xp_np[2] = predicate_to_index['r']
xo_np[2] = entity_to_index['g']
xs_np[3] = entity_to_index['a']
xp_np[3] = predicate_to_index['r']
xo_np[3] = entity_to_index['i']
xs_np[4] = entity_to_index['a']
xp_np[4] = predicate_to_index['r']
xo_np[4] = entity_to_index['m']
xs_np[5] = entity_to_index['a']
xp_np[5] = predicate_to_index['r']
xo_np[5] = entity_to_index['o']
xs_np[6] = entity_to_index['a']
xp_np[6] = predicate_to_index['r']
xo_np[6] = entity_to_index['q']
xs_np[7] = entity_to_index['a']
xp_np[7] = predicate_to_index['r']
xo_np[7] = entity_to_index['s']
xs_np[8] = entity_to_index['a']
xp_np[8] = predicate_to_index['r']
xo_np[8] = entity_to_index['u']
xs = torch.LongTensor(xs_np)
xp = torch.LongTensor(xp_np)
xo = torch.LongTensor(xo_np)
xs_emb = entity_embeddings(xs)
xp_emb = predicate_embeddings(xp)
xo_emb = entity_embeddings(xo)
# res0 = hoppy0.forward(xp_emb, xs_emb, xo_emb, facts=facts)
inf0 = hoppy0.score(xp_emb, xs_emb, xo_emb, facts=facts)
# (scores0_sp, subs0_sp), (scores0_po, subs0_po) = res0
# res1 = hoppy1.forward(xp_emb, xs_emb, xo_emb, facts=facts)
inf1 = hoppy1.score(xp_emb, xs_emb, xo_emb, facts=facts)
# (scores1_sp, subs1_sp), (scores1_po, subs1_po) = res1
# res2 = hoppy2.forward(xp_emb, xs_emb, xo_emb, facts=facts)
inf2 = hoppy2.score(xp_emb, xs_emb, xo_emb, facts=facts)
# (scores2_sp, subs2_sp), (scores2_po, subs2_po) = res2
# res3 = hoppy3.forward(xp_emb, xs_emb, xo_emb, facts=facts)
inf3 = hoppy3.score(xp_emb, xs_emb, xo_emb, facts=facts)
# (scores3_sp, subs3_sp), (scores3_po, subs3_po) = res3
# res4 = hoppy4.forward(xp_emb, xs_emb, xo_emb, facts=facts)
# inf4 = hoppy4.score(xp_emb, xs_emb, xo_emb, facts=facts)
# (scores4_sp, subs4_sp), (scores4_po, subs4_po) = res4
inf0_np = inf0.cpu().numpy()
inf1_np = inf1.cpu().numpy()
inf2_np = inf2.cpu().numpy()
inf3_np = inf3.cpu().numpy()
# inf4_np = inf4.cpu().numpy()
np.testing.assert_allclose(inf0_np, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], rtol=1e-1, atol=1e-1)
np.testing.assert_allclose(inf1_np, [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], rtol=1e-1, atol=1e-1)
np.testing.assert_allclose(inf2_np, [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], rtol=1e-1, atol=1e-1)
np.testing.assert_allclose(inf3_np, [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], rtol=1e-1, atol=1e-1)
# np.testing.assert_allclose(inf4_np, [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], rtol=1e-1, atol=1e-1)
print(inf3_np)
print(entity_embeddings.weight[entity_to_index['c'], 0].item())
print(entity_embeddings.weight[entity_to_index['e'], 0].item())
print(entity_embeddings.weight[entity_to_index['g'], 0].item())
print(entity_embeddings.weight[entity_to_index['i'], 0].item())
if __name__ == '__main__':
pytest.main([__file__])
# test_classic_clutrr_v1()
# test_classic_clutrr_v2()
# test_classic_clutrr_v3()
# test_classic_clutrr_v4()
# test_classic_clutrr_v5()
# test_classic_clutrr_v6()
# test_classic_clutrr_v7()
| 35.705053 | 109 | 0.565443 |
51e43f709e6c23fbb21042409829d2ce2edac2e8 | 1,259 | py | Python | main.py | JDJGInc/jdjgapi | 0c25bebaacd498cc6acfc6f85f5f5bda762c2297 | [
"MIT"
] | null | null | null | main.py | JDJGInc/jdjgapi | 0c25bebaacd498cc6acfc6f85f5f5bda762c2297 | [
"MIT"
] | 2 | 2021-12-09T05:26:00.000Z | 2021-12-09T05:26:13.000Z | main.py | JDJGInc/jdjgapi | 0c25bebaacd498cc6acfc6f85f5f5bda762c2297 | [
"MIT"
] | 1 | 2021-12-09T05:13:59.000Z | 2021-12-09T05:13:59.000Z | from quart import Quart
import json, random
app = Quart(__name__)
@app.route('/')
async def handleRoot():
with open('index.html', 'r') as file:
data = file.read(), 200, {'content-type':'text/html'}
return data
@app.route('/api/')
async def handleApi():
with open('api.html', 'r') as file:
data = file.read(), 200, {'content-type':'text/html'}
return data
@app.route('/api/<endpoint>')
async def handleEndpoint(endpoint):
with open('data.json', 'r') as file:
dataJson = json.loads(file.read())
try:
dataArray = dataJson[endpoint]
tempData = str(random.choice(dataArray))
if endpoint == "objection" or endpoint == "opinional":
tempDict = {"url": tempData}
else:
tempDict = {"text": tempData}
data = json.dumps(tempDict), 200, {'content-type':'application/json'}
except KeyError as e:
print(e)
errorDict={"error":f"{endpoint} isn't a valid endpoint"}
data = json.dumps(errorDict), 404, {'content-type':'application/json'}
return data
# hi
@app.errorhandler(404)
async def handle404(error):
with open('404.html', 'r') as file:
data = file.read()
return data, 404, {'content-type':'text/html'}
app.run(host = '0.0.0.0', port=3000) | 27.977778 | 75 | 0.621922 |
c3666294229c53dd7319f50213adffb308a8ee0b | 2,804 | py | Python | week2/task_hasanozdemir_oop.py | hasanozdem1r/patika_yemeksepeti_bootcamp | 59a2a5b615dcd76b9719d98f716cf3d3be764850 | [
"MIT"
] | null | null | null | week2/task_hasanozdemir_oop.py | hasanozdem1r/patika_yemeksepeti_bootcamp | 59a2a5b615dcd76b9719d98f716cf3d3be764850 | [
"MIT"
] | null | null | null | week2/task_hasanozdemir_oop.py | hasanozdem1r/patika_yemeksepeti_bootcamp | 59a2a5b615dcd76b9719d98f716cf3d3be764850 | [
"MIT"
] | null | null | null | """
This script created to apply CRUD operations on CSV files.
Hasan Özdemir 2021
"""
from csv import reader,DictReader,writer
from os import path
class FileOperations(object):
def __init__(self,file_path:str,fields:list=None) -> None:
"""
This is a FileInitialize class constructor and created to initialize important methods.
"""
self.path=file_path
self.fields=fields
# seperate file name and extension
self.file_name,self.file_extension=path.splitext(file_path)
# tested for different csv files and it's works well.
def read_data(self):
"""
This method is used to read all data from csv files.
"""
# read csv file
if str(self.file_extension)=='.csv':
# open csv data in read mood
# always better to use context manager for file operations.
with open(self.path, newline='', mode="r") as csv_file:
csv_data = reader(csv_file, delimiter=';', quotechar='|')
# print line by line
for row in csv_data:
print(row)
# read txt file
elif str(self.file_extension)=='.txt':
with open(self.path,mode="r",encoding="utf-8") as file:
for line in file:
print(line,end='')
# other types
else:
print('Other data types is not supported for current version.')
def search_data(self,search_text:str,row_number:int=0):
"""
This method is used to search in csv file and return the all data if there is match
"""
# read data
csv_data = reader(open(self.path,mode="r",encoding="utf-8"),delimiter=' ', quotechar='|')
# search and print if data is found
for row in csv_data:
print(type(row))
break
if row[row_number]==search_text:
print(row)
def select_operation():
while True:
print('To show all data enter 1, to search data enter 2, to update data enter 3, to delete data enter 4, to exit from program enter 5')
selection=int(input())
if selection==1:
# show all data
pass
elif selection==2:
# search data
pass
elif selection==3:
# update data
pass
elif selection==4:
# delete data
pass
elif selection==5:
# exit from terminal and stop loop
break
else:
continue
if __name__=='__main__':
file_obj=FileOperations('hasan.csv',[1,'Hasan','Özdemir','Computer Engineering'])
#file_obj.read_data()
file_obj.search_data('raspberry,Rubus',0)
#file_obj.convert_csv_to_json('hasan1.csv') | 32.988235 | 143 | 0.577033 |
8133065176d9db57ef092d3746709a8e45af348a | 2,757 | py | Python | restricted_boltzmann_machine/Mnist.py | dodiku/learning_machines_class | d261a3647f678784bd15641e39fbd03de59dc144 | [
"MIT"
] | null | null | null | restricted_boltzmann_machine/Mnist.py | dodiku/learning_machines_class | d261a3647f678784bd15641e39fbd03de59dc144 | [
"MIT"
] | null | null | null | restricted_boltzmann_machine/Mnist.py | dodiku/learning_machines_class | d261a3647f678784bd15641e39fbd03de59dc144 | [
"MIT"
] | 1 | 2019-09-29T12:47:32.000Z | 2019-09-29T12:47:32.000Z | #!/usr/bin/python
'''
Learning Machines
Taught by Patrick Hebron at NYU ITP
MNIST dataset helpers.
'''
import os
import gzip
import pickle
import numpy as np
def mnist_encode_one_hot_label(idx):
'''MNIST one-hot encoder function'''
enc = np.zeros( 10 )
enc[ idx ] = 1.0
return enc
def mnist_decode_one_hot_label(enc):
'''MNIST one-hot decoder function'''
return np.argmax( enc )
def mnist_get_accuracy(labels, guesses):
'''returns percentage of MNIST guesses that match labels'''
return np.mean( np.equal( np.argmax( labels, axis = 1 ), np.argmax( guesses, axis = 1 ) ).astype( np.float64 ) )
class Mnist:
def __init__(self, threshold = True):
# Set pickle path:
mnist_pickle_path = 'mnist.pkl.gz'
# Download pickle, if necessary:
if not os.path.exists( mnist_pickle_path ):
# fix for python3 compatibility. credit: https://stackoverflow.com/a/3969809/5420567
import urllib.request
downloader = urllib.request.URLopener()
downloader.retrieve( 'http://deeplearning.net/data/mnist/mnist.pkl.gz', mnist_pickle_path )
# fix for python3 compatibility. credit: http://www.mlblog.net/2016/09/reading-mnist-in-python3.html
# Load pickle:
with gzip.open(mnist_pickle_path, 'rb') as fh:
u = pickle._Unpickler(fh)
u.encoding = 'latin1'
training_data, validation_data, testing_data = u.load()
# Format dataset:
self.training_digits, self.training_labels = self.format_dataset( training_data, threshold )
self.validation_digits, self.validation_labels = self.format_dataset( validation_data, threshold )
self.testing_digits, self.testing_labels = self.format_dataset( testing_data, threshold )
def getTrainingData(self, count = 0):
if count == 0:
return ( self.training_digits, self.training_labels )
else:
return self.get_batch( count, self.training_digits, self.training_labels )
def getValidationData(self, count = 0):
if count == 0:
return ( self.validation_digits, self.validation_labels )
else:
return self.get_batch( count, self.validation_digits, self.validation_labels )
def getTestingData(self, count = 0):
if count == 0:
return ( self.testing_digits, self.testing_labels )
else:
return self.get_batch( count, self.testing_digits, self.testing_labels )
@staticmethod
def get_batch(count,digits,labels):
total = len( digits )
count = min( count, total )
idxs = np.random.choice( np.arange( total ), count, replace=False )
return ( digits[ idxs ], labels[ idxs ] )
@staticmethod
def format_dataset(dataset, threshold):
digits = np.array( [ np.reshape( x, 784 ) for x in dataset[ 0 ] ] )
labels = np.array( [ mnist_encode_one_hot_label( y ) for y in dataset[ 1 ] ] )
return ( ( digits > 0 ).astype( np.float ) if threshold else digits, labels )
| 33.621951 | 113 | 0.725426 |
dc181475bee3609a5c24ae20daa1f1e6843c9833 | 8,556 | py | Python | theano/sandbox/theano_object.py | ganguli-lab/Theano | d61c929b6d1a5bae314545cba79c879de687ea18 | [
"BSD-3-Clause"
] | 11 | 2016-12-01T19:49:28.000Z | 2021-11-08T11:12:08.000Z | theano/sandbox/theano_object.py | AtousaTorabi/Theano_old | ba2d2f74406243112e813df31429721c791a889a | [
"BSD-3-Clause"
] | null | null | null | theano/sandbox/theano_object.py | AtousaTorabi/Theano_old | ba2d2f74406243112e813df31429721c791a889a | [
"BSD-3-Clause"
] | 6 | 2015-06-21T20:55:55.000Z | 2019-04-24T20:03:25.000Z | """DRAFT: TheanoObject
N.B. the gotcha with this design is listed in the documentation of `TheanoObject`
"""
import theano
from theano import tensor
import numpy
def theano_type(x):
"""Return a theano Type instance suitable for containing value `x`."""
if type(x) is int:
return tensor.lscalar
else:
raise NotImplementedError()
class symbolic_fn_callable(object):
"""This is the class whose instance you get when you access a symbolic function in a
`TheanoObject`.
When you call a symbolic function (`symbolic_fn`) of a TheanoObject the `__call__` of this
class handles your request.
You can also access the symbolic outputs and updates of a symbolic function though this
class.
.. code-block:: python
class T(TheanoObject):
@symbolic_fn
def add(self, x):
...
add_outputs = ...
add_updates = ...
return RVal(add_outputs, add_updates)
t = T()
t.add.outputs(5) # returns `add_outputs` from when `x=theano_type(5)`
t.add.updates(5) # returns `add_updates` from when `x=theano_type(5)`
t.add.theano_function(5) # returns the `Function` compiled when `x=theano_type(5)`
t.add(5) # runs the `Function` compiled when `x=theano_type(5)`
# with arguments `(5,)`
"""
def __init__(self, fn, mode):
self.fn = fn
self.mode = mode
def on(self, o_self):
"""Silly method to work with symbolic_fn.__get__"""
self.o_self = o_self
return self
def run_symbolic(self, *args, **kwargs):
return self.o_self._get_method_impl(self.fn, self.o_self, args, kwargs, mode=self.mode)
def __call__(self, *args, **kwargs):
return self.run_symbolic(*args, **kwargs)['theano_function'](*args, **kwargs)
def theano_function(self, *args, **kwargs):
return self.run_symbolic(*args, **kwargs)['theano_function']
def outputs(self, *args, **kwargs):
return self.run_symbolic(*args, **kwargs)['outputs']
def updates(self, *args, **kwargs):
return self.run_symbolic(*args, **kwargs)['updates']
class symbolic_fn(object):
"""A property-like class for decorating symbolic functions in `TheanoObject`
"""
def __init__(self, fn, mode=None):
self.fn = fn
self.callable = symbolic_fn_callable(fn, mode)
def __get__(self, o_self, o_cls):
return self.callable.on(o_self)
def __set__(self, o_self, new_val):
pass
#return NotImplemented
def symbolic_fn_opts(**kwargs):
"""Return a decorator for symbolic_functions in a `TheanoObject`
`kwargs` passed here are passed to `theano.function` via `symbolic_fn`
"""
def deco(f):
return symbolic_fn(f, **kwargs)
return deco
class RVal(object):
"""A Return-Value object for a `symbolic_fn` """
outputs = []
"""The method will compute values for the variables in this list"""
updates = {}
"""The method will update module variables in this dictionary
For items ``(k,v)`` in this dictionary, ``k`` must be a `symbolic_member` of some module.
On each call to this compiled function, the value of ``k`` will be replaced with the
computed value of the Variable ``v``.
"""
def __init__(self, outputs, updates=None):
if updates is None:
updates = {}
self.outputs = outputs
assert type(updates) is dict
self.updates = updates
class TheanoObject(object):
"""Base for Theano-supported classes
This class provides support for symbolic_fn class attributes.
These will be compiled on demand so that they can be used just like normal (non-symbolic)
methods.
The symbolic functions in a TheanoObject can share member variables that have been created
using the `symbolic_member` method.
:note: Other variables (ones not created using ``self.symbolic_member``) referred to in the
body of a symbolic function will *not* be shared between symbolic functions, or between
symbolic functions and this class. These other variables will be locked away in the
closure of a symbolic function when that function is compiled.
:warning: It is not recommended for code to interleave
(a) changes to non-symbolic instance variables with
(b) calls to symbolic functions that use those instance variables.
A symbolic function may be
compiled multiple times because it must be compiled for each set of argument types.
Each time the function is compiled, the values of non-symbolic variables will be locked
into the compiled function. Subsequent changes to those non-symbolic instance variables
will not have any effect on the behaviour of the already-compiled symbolic function.
:todo: Is there an efficient way of recognizing when a compiled symbolic function is stale,
wrt the current values of the class's instance variables?
- One option is to re-evaluate symbolic functions symbolically and see if the graph can be
completely merged with the original graph. This is not fast enough to do all the time by
default though.
"""
def __init__(self):
self.module_method_cache = {}
def _get_method_impl(self, fn, o_self, args, kwargs, mode):
"""Retrieve information about the symbolic function (`fn`) in TheanoObject instance
`o_self`, being evaluated on arguments `args` and `kwargs`.
:rtype: dict with entries 'theano_function', 'outputs', 'updates'
:return: the theano function compiled for these arguments, the symbolic outputs of that
function, and the symbolic updates performed by that function.
:note: This function caches return values in self.`module_method_cache`.
:todo: This may at some point become a class-level cache rather than an instance-level
cache.
"""
if kwargs:
raise NotImplementedError()
cache = self.module_method_cache
args_types = tuple(theano_type(arg) for arg in args)
key = (fn, args_types)
if key not in cache:
inputs = [a() for a in args_types]
print 'compiling', fn, 'for inputs', inputs
rval = fn(o_self, *inputs)
print 'compiling to compute outputs', rval.outputs
if isinstance(rval.outputs, (tuple, list)):
all_required_inputs = theano.gof.graph.inputs(rval.outputs)
else:
all_required_inputs = theano.gof.graph.inputs([rval.outputs])
# construct In instances for the symbolic_member instances that can automatically be
# included here.
module_inputs = [theano.compile.io.In(
variable=v,
value=v._theanoclass_container,
mutable=(v in rval.updates),
update=rval.updates.get(v, None))
for v in all_required_inputs \
if hasattr(v, '_theanoclass_container') and not (v in inputs)]
cache[key] = dict(theano_function=theano.function(inputs+module_inputs, rval.outputs),
updates=rval.updates,
outputs=rval.outputs,
mode=mode)
return cache[key]
def symbolic_member(self, ival, name=None):
"""Create a Variable instance to hold value `ival`.
This function also immediately creates a Container object for ival.
When the returned Variable is used as input to a `TheanoObject` `symbolic_fn`, (but
does not appear as an argument to that symbolic_fn), then this Container will be used to
retrieve (and store) values for the Variable.
This Variable's Container's contents can be retrieved by its `get()` method.
This Variable's Container's contents can be written using its `set(newval)` method.
"""
if type(ival) is not int:
raise NotImplementedError()
v = tensor.lscalar(name)
v._theanoclass_container = \
theano.gof.Container(v,
storage = [theano._asarray(ival, dtype='int64')],
readonly=False)
assert not hasattr(v, 'set')
assert not hasattr(v, 'get')
v.get = lambda : v._theanoclass_container.data
def setval_in_v(newval):
v._theanoclass_container.data = newval
v.set = setval_in_v
return v
| 37.362445 | 98 | 0.645161 |
cf53dd4df69c630bb359d1442670bcee00044e70 | 6,304 | py | Python | azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/express_route_circuit_peering.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2022-03-30T22:39:15.000Z | 2022-03-30T22:39:15.000Z | azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/express_route_circuit_peering.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 54 | 2016-03-25T17:25:01.000Z | 2018-10-22T17:27:54.000Z | azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/express_route_circuit_peering.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2017-01-20T18:25:46.000Z | 2017-05-12T21:31:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ExpressRouteCircuitPeering(SubResource):
"""Peering in an ExpressRouteCircuit resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:param peering_type: The PeeringType. Possible values are:
'AzurePublicPeering', 'AzurePrivatePeering', and 'MicrosoftPeering'.
Possible values include: 'AzurePublicPeering', 'AzurePrivatePeering',
'MicrosoftPeering'
:type peering_type: str or
~azure.mgmt.network.v2017_08_01.models.ExpressRouteCircuitPeeringType
:param state: The state of peering. Possible values are: 'Disabled' and
'Enabled'. Possible values include: 'Disabled', 'Enabled'
:type state: str or
~azure.mgmt.network.v2017_08_01.models.ExpressRouteCircuitPeeringState
:param azure_asn: The Azure ASN.
:type azure_asn: int
:param peer_asn: The peer ASN.
:type peer_asn: int
:param primary_peer_address_prefix: The primary address prefix.
:type primary_peer_address_prefix: str
:param secondary_peer_address_prefix: The secondary address prefix.
:type secondary_peer_address_prefix: str
:param primary_azure_port: The primary port.
:type primary_azure_port: str
:param secondary_azure_port: The secondary port.
:type secondary_azure_port: str
:param shared_key: The shared key.
:type shared_key: str
:param vlan_id: The VLAN ID.
:type vlan_id: int
:param microsoft_peering_config: The Microsoft peering configuration.
:type microsoft_peering_config:
~azure.mgmt.network.v2017_08_01.models.ExpressRouteCircuitPeeringConfig
:param stats: Gets peering stats.
:type stats:
~azure.mgmt.network.v2017_08_01.models.ExpressRouteCircuitStats
:param provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param gateway_manager_etag: The GatewayManager Etag.
:type gateway_manager_etag: str
:param last_modified_by: Gets whether the provider or the customer last
modified the peering.
:type last_modified_by: str
:param route_filter: The reference of the RouteFilter resource.
:type route_filter: ~azure.mgmt.network.v2017_08_01.models.RouteFilter
:param ipv6_peering_config: The IPv6 peering configuration.
:type ipv6_peering_config:
~azure.mgmt.network.v2017_08_01.models.Ipv6ExpressRouteCircuitPeeringConfig
:param name: Gets name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource
is updated.
:vartype etag: str
"""
_validation = {
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'peering_type': {'key': 'properties.peeringType', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'str'},
'azure_asn': {'key': 'properties.azureASN', 'type': 'int'},
'peer_asn': {'key': 'properties.peerASN', 'type': 'int'},
'primary_peer_address_prefix': {'key': 'properties.primaryPeerAddressPrefix', 'type': 'str'},
'secondary_peer_address_prefix': {'key': 'properties.secondaryPeerAddressPrefix', 'type': 'str'},
'primary_azure_port': {'key': 'properties.primaryAzurePort', 'type': 'str'},
'secondary_azure_port': {'key': 'properties.secondaryAzurePort', 'type': 'str'},
'shared_key': {'key': 'properties.sharedKey', 'type': 'str'},
'vlan_id': {'key': 'properties.vlanId', 'type': 'int'},
'microsoft_peering_config': {'key': 'properties.microsoftPeeringConfig', 'type': 'ExpressRouteCircuitPeeringConfig'},
'stats': {'key': 'properties.stats', 'type': 'ExpressRouteCircuitStats'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'gateway_manager_etag': {'key': 'properties.gatewayManagerEtag', 'type': 'str'},
'last_modified_by': {'key': 'properties.lastModifiedBy', 'type': 'str'},
'route_filter': {'key': 'properties.routeFilter', 'type': 'RouteFilter'},
'ipv6_peering_config': {'key': 'properties.ipv6PeeringConfig', 'type': 'Ipv6ExpressRouteCircuitPeeringConfig'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ExpressRouteCircuitPeering, self).__init__(**kwargs)
self.peering_type = kwargs.get('peering_type', None)
self.state = kwargs.get('state', None)
self.azure_asn = kwargs.get('azure_asn', None)
self.peer_asn = kwargs.get('peer_asn', None)
self.primary_peer_address_prefix = kwargs.get('primary_peer_address_prefix', None)
self.secondary_peer_address_prefix = kwargs.get('secondary_peer_address_prefix', None)
self.primary_azure_port = kwargs.get('primary_azure_port', None)
self.secondary_azure_port = kwargs.get('secondary_azure_port', None)
self.shared_key = kwargs.get('shared_key', None)
self.vlan_id = kwargs.get('vlan_id', None)
self.microsoft_peering_config = kwargs.get('microsoft_peering_config', None)
self.stats = kwargs.get('stats', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.gateway_manager_etag = kwargs.get('gateway_manager_etag', None)
self.last_modified_by = kwargs.get('last_modified_by', None)
self.route_filter = kwargs.get('route_filter', None)
self.ipv6_peering_config = kwargs.get('ipv6_peering_config', None)
self.name = kwargs.get('name', None)
self.etag = None
| 50.83871 | 125 | 0.681631 |
5e590361a58d117f0c36246d8e11269e855831f6 | 257 | py | Python | test/test_utils.py | yarrdiddy/vegamite | 9fdf554e46b09ff23dc98f0df373bcdb3b322510 | [
"MIT"
] | 1 | 2019-04-21T13:46:09.000Z | 2019-04-21T13:46:09.000Z | test/test_utils.py | yarrdiddy/vegamite | 9fdf554e46b09ff23dc98f0df373bcdb3b322510 | [
"MIT"
] | 1 | 2018-03-13T19:46:38.000Z | 2018-03-13T19:46:38.000Z | test/test_utils.py | yarrdiddy/vegamite | 9fdf554e46b09ff23dc98f0df373bcdb3b322510 | [
"MIT"
] | null | null | null | import pytest
from vegamite.utils.timeutil import parse_time_range
def test_parse_time_range():
# import ipdb; ipdb.set_trace()
test1 = parse_time_range('90 days')
test2 = parse_time_range('1 month')
assert test1.days == 90
assert test2.days == 30
| 19.769231 | 52 | 0.754864 |
a1df1c9d8656aa062fc84ab392e913bedb90a1d8 | 303 | py | Python | Python_Exercicios/ex113.py | gabrieldepaiva/Exercicios-CursoEmVideo | 118231c24f040ca0ac3d3b6e6bf633e4eaa06858 | [
"MIT"
] | null | null | null | Python_Exercicios/ex113.py | gabrieldepaiva/Exercicios-CursoEmVideo | 118231c24f040ca0ac3d3b6e6bf633e4eaa06858 | [
"MIT"
] | null | null | null | Python_Exercicios/ex113.py | gabrieldepaiva/Exercicios-CursoEmVideo | 118231c24f040ca0ac3d3b6e6bf633e4eaa06858 | [
"MIT"
] | null | null | null | def leiaint (txt):
while True:
try:
n = int(input(txt))
except:
print('Erro! Por Favor digite um número inteiro válido.')
continue
else:
return n
num = leiaint('Escreva um número - ')
print(f'O número digitado foi {num}.')
| 18.9375 | 69 | 0.521452 |
863e18e361108fd126e625f5db6d9385336e2711 | 12,411 | py | Python | examples/extract_features.py | jayleicn/pytorch-pretrained-BERT | 9f1c253469ce2953f1e6a8eafea4bd7fcb674eb1 | [
"Apache-2.0"
] | 1 | 2020-08-21T00:38:15.000Z | 2020-08-21T00:38:15.000Z | examples/extract_features.py | jayleicn/pytorch-pretrained-BERT | 9f1c253469ce2953f1e6a8eafea4bd7fcb674eb1 | [
"Apache-2.0"
] | null | null | null | examples/extract_features.py | jayleicn/pytorch-pretrained-BERT | 9f1c253469ce2953f1e6a8eafea4bd7fcb674eb1 | [
"Apache-2.0"
] | 2 | 2021-02-24T09:14:29.000Z | 2021-12-16T10:05:22.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract pre-computed feature vectors from a PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import logging
import json
import re
import sys
import torch
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.modeling import BertModel
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class InputExample(object):
def __init__(self, unique_id, text_a, text_b):
self.unique_id = unique_id
self.text_a = text_a
self.text_b = text_b
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids):
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.input_type_ids = input_type_ids
def convert_examples_to_features(examples, seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > seq_length - 2:
tokens_a = tokens_a[0:(seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
input_type_ids.append(1)
tokens.append("[SEP]")
input_type_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < seq_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert len(input_ids) == seq_length
assert len(input_mask) == seq_length
assert len(input_type_ids) == seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (example.unique_id))
logger.info("tokens: %s" % " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"input_type_ids: %s" % " ".join([str(x) for x in input_type_ids]))
features.append(
InputFeatures(
unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def read_examples(input_file):
"""Read a list of `InputExample`s from an input file."""
examples = []
unique_id = 0
with open(input_file, "r", encoding='utf-8') as reader:
while True:
line = reader.readline()
if not line:
break
line = line.strip()
text_a = None
text_b = None
m = re.match(r"^(.*) \|\|\| (.*)$", line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
examples.append(
InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))
unique_id += 1
return examples
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--input_file", default=None, type=str, required=True)
parser.add_argument("--output_file", default=None, type=str, required=True)
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
## Other parameters
parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.")
parser.add_argument("--layers", default="-1,-2,-3,-4", type=str)
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences longer "
"than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--batch_size", default=32, type=int, help="Batch size for predictions.")
parser.add_argument("--local_rank",
type=int,
default=-1,
help = "local_rank for distributed training on gpus")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {} distributed training: {}".format(device, n_gpu, bool(args.local_rank != -1)))
layer_indexes = [int(x) for x in args.layers.split(",")]
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
examples = read_examples(args.input_file)
features = convert_examples_to_features(
examples=examples, seq_length=args.max_seq_length, tokenizer=tokenizer)
unique_id_to_feature = {}
for feature in features:
unique_id_to_feature[feature.unique_id] = feature
model = BertModel.from_pretrained(args.bert_model)
model.to(device)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_example_index)
if args.local_rank == -1:
eval_sampler = SequentialSampler(eval_data)
else:
eval_sampler = DistributedSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size)
model.eval()
with open(args.output_file, "w", encoding='utf-8') as writer:
for input_ids, input_mask, example_indices in eval_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
print("input_ids", input_ids.shape) # (2, 128)
print("input_mask", input_mask.shape)
# sys.exit(1)
all_encoder_layers, _ = model(input_ids, token_type_ids=None, attention_mask=input_mask)
all_encoder_layers = all_encoder_layers
print("all_encoder_layers[-1]", all_encoder_layers[-1])
for b, example_index in enumerate(example_indices):
feature = features[example_index.item()]
unique_id = int(feature.unique_id)
# feature = unique_id_to_feature[unique_id]
output_json = collections.OrderedDict()
output_json["linex_index"] = unique_id
all_out_features = []
for (i, token) in enumerate(feature.tokens):
all_layers = []
for (j, layer_index) in enumerate(layer_indexes):
layer_output = all_encoder_layers[int(layer_index)].detach().cpu().numpy()
layer_output = layer_output[b]
layers = collections.OrderedDict()
layers["index"] = layer_index
layers["values"] = [
round(x.item(), 6) for x in layer_output[i]
]
all_layers.append(layers)
out_features = collections.OrderedDict()
out_features["token"] = token
out_features["layers"] = all_layers
all_out_features.append(out_features)
output_json["features"] = all_out_features
writer.write(json.dumps(output_json) + "\n")
if __name__ == "__main__":
main()
| 40.960396 | 120 | 0.617436 |
aa872f72617c32f12e3ee875164db8100877fb60 | 2,523 | py | Python | services.py | adipopbv/shows-app | 8bd14ee0613b7d7ede246881baf6267569d509fc | [
"BSD-3-Clause"
] | null | null | null | services.py | adipopbv/shows-app | 8bd14ee0613b7d7ede246881baf6267569d509fc | [
"BSD-3-Clause"
] | null | null | null | services.py | adipopbv/shows-app | 8bd14ee0613b7d7ede246881baf6267569d509fc | [
"BSD-3-Clause"
] | null | null | null | from datetime import datetime
from threading import Lock
from repositories import SqliteSalesRepository, SqliteShowsRepository, \
SqliteSoldSeatsRepository, SqliteRepository
repo = SqliteRepository()
sales_repo = SqliteSalesRepository()
shows_repo = SqliteShowsRepository()
sold_seats_repo = SqliteSoldSeatsRepository()
lock = Lock()
def sell_seats(show_id: int, seats_count: int, seats_numbers: list) -> None:
lock.acquire()
show = shows_repo.get(show_id)
sold_seats_to_show = sold_seats_repo.get_for_show(show_id)
if show.room.seats_count < len(sold_seats_to_show):
lock.release()
raise Exception('all seats taken')
if seats_count > show.room.seats_count - len(sold_seats_to_show):
lock.release()
raise Exception('not enough seats available')
for wanted_seat_number in seats_numbers:
for sold_seat in sold_seats_to_show:
if wanted_seat_number == sold_seat.seat_number:
lock.release()
raise Exception('seat already taken')
sale_id = sales_repo.add(datetime.now(), show)
shows_repo.update(show_id, seats_count, seats_count * show.ticket_price)
sale = sales_repo.get(sale_id)
for wanted_seat_number in seats_numbers:
sold_seats_repo.add(wanted_seat_number, sale)
lock.release()
def verify_sells():
file = open("sales.txt", "a")
file.write("Date and time: " + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + "\n")
shows = shows_repo.get_all()
for show in shows:
correct_transactions = True
file.write(f"Show: {show.show_id} {show.title}, price for one ticket: {show.ticket_price} \n")
sold_seats = sold_seats_repo.get_for_show(show.show_id)
nr_sold_seats = len(sold_seats)
balance = nr_sold_seats * show.ticket_price
file.write("Balance: " + str(balance) + "\n")
file.write("Number of sold sets: " + str(nr_sold_seats) + "\n")
file.write("Sold seats: ")
for sold_seat in sold_seats:
file.write(str(sold_seat.seat_number) + " ")
if show.room.seats_count - nr_sold_seats != show.available_seats_count:
correct_transactions = False
if show.balance - balance != 0:
correct_transactions = False
if correct_transactions:
file.write("\nCorrect\n")
else:
file.write("\nWrong\n")
file.write("\n")
file.close()
def reset_database():
repo.reset_data()
repo.close_connection()
| 31.5375 | 102 | 0.667856 |
bb23bf4597c4efd890768d61115ed3066fb13665 | 652 | py | Python | setup.py | vndee/python-options | f518ec4e13c529a12a997a9bddbb54f86e21b55d | [
"MIT"
] | null | null | null | setup.py | vndee/python-options | f518ec4e13c529a12a997a9bddbb54f86e21b55d | [
"MIT"
] | null | null | null | setup.py | vndee/python-options | f518ec4e13c529a12a997a9bddbb54f86e21b55d | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
with open("README.md", "r") as f:
readme = f.read()
setup(
name='python-options',
version='1.0.0-3',
packages=find_packages(),
url='https://github.com/TinDang97/python-options',
license='LICENSE',
author='tindang',
author_email='rainstone1029x@gmail.com',
description="Make python class with option base on property. "
"Using build option for client like kafka, command option like ffmpeg.",
long_description=readme,
long_description_content_type='text/markdown',
keywords=["option", "pyopt", "python-options"],
python_requires=">=3.6"
)
| 31.047619 | 88 | 0.677914 |
4cee88027f12d0c81c247d88bfd651538cfe31b0 | 704 | py | Python | model_zoo/official/cv/resnext101/src/backbone/__init__.py | kungfu-team/mindspore-bert | 71501cf52ae01db9d6a73fb64bcfe68a6509dc32 | [
"Apache-2.0"
] | 2 | 2021-07-08T13:10:42.000Z | 2021-11-08T02:48:57.000Z | model_zoo/official/cv/resnext101/src/backbone/__init__.py | peixinhou/mindspore | fcb2ec2779b753e95c762cf292b23bd81d1f561b | [
"Apache-2.0"
] | null | null | null | model_zoo/official/cv/resnext101/src/backbone/__init__.py | peixinhou/mindspore | fcb2ec2779b753e95c762cf292b23bd81d1f561b | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""resnext"""
from .resnext import *
| 41.411765 | 78 | 0.676136 |
e37cc72266127d512768b9234ce2637c15b92cb4 | 291 | py | Python | Assignment1/Push_Image_to_wandb.py | utsavdey/Fundamentals_Of_Deep_Learning_Assignments | c1b2fc49e929ab09760f083aa8b052845afad48f | [
"MIT"
] | null | null | null | Assignment1/Push_Image_to_wandb.py | utsavdey/Fundamentals_Of_Deep_Learning_Assignments | c1b2fc49e929ab09760f083aa8b052845afad48f | [
"MIT"
] | null | null | null | Assignment1/Push_Image_to_wandb.py | utsavdey/Fundamentals_Of_Deep_Learning_Assignments | c1b2fc49e929ab09760f083aa8b052845afad48f | [
"MIT"
] | null | null | null | # CODE to push plot as an image to wandb
import wandb
from matplotlib import pyplot as plt
path_to_img = "<Provide Absolute file Path >"
im = plt.imread(path_to_img)
# Initialize run
wandb.init(project="<Provide project name in wandb>")
# Log image(s)
wandb.log({"img": [wandb.Image(im)]}) | 26.454545 | 53 | 0.735395 |
34c25dc5474dfcd7c19c5e71f40119a87b895430 | 3,024 | py | Python | naslib/optimizers/oneshot/gdas/optimizer.py | az2104nas/sztnb302alsr2bs21on | 6084c82c59a4a89498a191d96c231f47df10317d | [
"Apache-2.0"
] | null | null | null | naslib/optimizers/oneshot/gdas/optimizer.py | az2104nas/sztnb302alsr2bs21on | 6084c82c59a4a89498a191d96c231f47df10317d | [
"Apache-2.0"
] | 4 | 2021-06-08T21:32:32.000Z | 2022-03-12T00:29:33.000Z | naslib/optimizers/oneshot/gdas/optimizer.py | az2104nas/sztnb302alsr2bs21on | 6084c82c59a4a89498a191d96c231f47df10317d | [
"Apache-2.0"
] | null | null | null | import logging
import torch
from naslib.optimizers.core.operations import GDASMixedOp
from naslib.optimizers.oneshot.darts.optimizer import DARTSOptimizer
class GDASOptimizer(DARTSOptimizer):
def __init__(self, tau_max, tau_min, *args, **kwargs):
super(GDASOptimizer, self).__init__(*args, **kwargs)
# Linear tau schedule
self.tau_max = tau_max
self.tau_min = tau_min
self.tau_step = (self.tau_min - self.tau_max) / self.epochs
self.tau_curr = self.tau_max
def new_epoch(self, epoch):
super(GDASOptimizer, self).new_epoch(epoch)
self.tau_curr += self.tau_step
logging.info('TAU {}'.format(self.tau_curr))
def replace_function(self, edge, graph):
graph.architectural_weights = self.architectural_weights
if 'op_choices' in edge:
edge_key = 'cell_{}_from_{}_to_{}'.format(graph.cell_type, edge['from_node'], edge['to_node'])
weights = self.architectural_weights[edge_key] if edge_key in self.architectural_weights else \
torch.nn.Parameter(1e-3 * torch.randn(size=[len(edge['op_choices'])], requires_grad=True))
self.architectural_weights[edge_key] = weights
edge['arch_weight'] = self.architectural_weights[edge_key]
edge['op'] = GDASMixedOp(primitives=edge['op_choices'], **edge['op_kwargs'])
if edge_key not in self.edges:
self.edges[edge_key] = []
self.edges[edge_key].append(edge)
return edge
def forward_pass_adjustment(self, *args, **kwargs):
"""
Replaces the architectural weights in the edges with gumbel softmax near one-hot encodings.
"""
for arch_key, arch_weight in self.architectural_weights.items():
# gumbel sample arch weights and assign them in self.edges
sampled_arch_weight = torch.nn.functional.gumbel_softmax(
arch_weight, tau=self.tau_curr, hard=False
)
# random perturbation part
if self.perturb_alphas == 'random':
softmaxed_arch_weight = sampled_arch_weight.clone()
perturbation = torch.zeros_like(softmaxed_arch_weight).uniform_(
-self.epsilon_alpha,
self.epsilon_alpha
)
softmaxed_arch_weight.data.add_(perturbation)
# clipping
max_index = softmaxed_arch_weight.argmax()
softmaxed_arch_weight.data.clamp_(0, 1)
if softmaxed_arch_weight.sum() == 0.0:
softmaxed_arch_weight.data[max_index] = 1.0
softmaxed_arch_weight.data.div_(softmaxed_arch_weight.sum())
for edge in self.edges[arch_key]:
edge['sampled_arch_weight'] = sampled_arch_weight
if self.perturb_alphas == 'random':
edge['softmaxed_arch_weight'] = softmaxed_arch_weight
edge['perturb_alphas'] = True
| 41.424658 | 107 | 0.627976 |
b0cbc9db9f5e1dd6b0342069e0ee7037b2b8e224 | 650 | py | Python | Chapter4_OpenAIGym/cartpoleSpace.py | franneck94/UdemyAI | bb3decc35ec626a09edf0abdbfbe7c36dac6179a | [
"MIT"
] | 2 | 2021-02-10T19:50:27.000Z | 2021-12-30T06:15:55.000Z | Chapter4_OpenAIGym/cartpoleSpace.py | franneck94/UdemyAI | bb3decc35ec626a09edf0abdbfbe7c36dac6179a | [
"MIT"
] | 1 | 2020-12-21T15:29:20.000Z | 2022-01-15T12:06:09.000Z | Chapter4_OpenAIGym/cartpoleSpace.py | franneck94/UdemyAI | bb3decc35ec626a09edf0abdbfbe7c36dac6179a | [
"MIT"
] | 4 | 2020-11-08T17:07:53.000Z | 2022-02-07T06:40:55.000Z | import gym
env = gym.make("CartPole-v1")
env.reset()
act_space = env.action_space
obs_space = env.observation_space
print("Action Space: ", act_space)
print("Observation Space: ", obs_space)
act_space_n = env.action_space.n
obs_space_low = env.observation_space.low
obs_space_high = env.observation_space.high
obs_space_shape = env.observation_space.shape
print("Action Space N: ", act_space_n)
print("Observation Space Low: ", obs_space_low)
print("Observation Space High: ", obs_space_high)
print("Observation Space Shape: ", obs_space_shape)
for i in range(10):
act_sample = env.action_space.sample()
print("Sample: ", act_sample)
| 24.074074 | 51 | 0.763077 |
c834de1bd5ab608f4debc44e745a712e9d1b1f54 | 3,236 | py | Python | homeassistant/components/solax/sensor.py | zalke/home-assistant | a31e49c857722c0723dc5297cd83cbce0f8716f6 | [
"Apache-2.0"
] | 4 | 2019-07-03T22:36:57.000Z | 2019-08-10T15:33:25.000Z | homeassistant/components/solax/sensor.py | zalke/home-assistant | a31e49c857722c0723dc5297cd83cbce0f8716f6 | [
"Apache-2.0"
] | 7 | 2019-08-23T05:26:02.000Z | 2022-03-11T23:57:18.000Z | homeassistant/components/solax/sensor.py | zalke/home-assistant | a31e49c857722c0723dc5297cd83cbce0f8716f6 | [
"Apache-2.0"
] | 1 | 2018-04-29T02:14:32.000Z | 2018-04-29T02:14:32.000Z | """Support for Solax inverter via local API."""
import asyncio
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.const import (
TEMP_CELSIUS,
CONF_IP_ADDRESS
)
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.event import async_track_time_interval
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_IP_ADDRESS): cv.string,
})
SCAN_INTERVAL = timedelta(seconds=30)
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Platform setup."""
import solax
api = solax.RealTimeAPI(config[CONF_IP_ADDRESS])
endpoint = RealTimeDataEndpoint(hass, api)
resp = await api.get_data()
serial = resp.serial_number
hass.async_add_job(endpoint.async_refresh)
async_track_time_interval(hass, endpoint.async_refresh, SCAN_INTERVAL)
devices = []
for sensor in solax.INVERTER_SENSORS:
idx, unit = solax.INVERTER_SENSORS[sensor]
if unit == 'C':
unit = TEMP_CELSIUS
uid = '{}-{}'.format(serial, idx)
devices.append(Inverter(uid, serial, sensor, unit))
endpoint.sensors = devices
async_add_entities(devices)
class RealTimeDataEndpoint:
"""Representation of a Sensor."""
def __init__(self, hass, api):
"""Initialize the sensor."""
self.hass = hass
self.api = api
self.ready = asyncio.Event()
self.sensors = []
async def async_refresh(self, now=None):
"""Fetch new state data for the sensor.
This is the only method that should fetch new data for Home Assistant.
"""
from solax import SolaxRequestError
try:
api_response = await self.api.get_data()
self.ready.set()
except SolaxRequestError:
if now is not None:
self.ready.clear()
else:
raise PlatformNotReady
data = api_response.data
for sensor in self.sensors:
if sensor.key in data:
sensor.value = data[sensor.key]
sensor.async_schedule_update_ha_state()
class Inverter(Entity):
"""Class for a sensor."""
def __init__(self, uid, serial, key, unit):
"""Initialize an inverter sensor."""
self.uid = uid
self.serial = serial
self.key = key
self.value = None
self.unit = unit
@property
def state(self):
"""State of this inverter attribute."""
return self.value
@property
def unique_id(self):
"""Return unique id."""
return self.uid
@property
def name(self):
"""Name of this inverter attribute."""
return 'Solax {} {}'.format(self.serial, self.key)
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self.unit
@property
def should_poll(self):
"""No polling needed."""
return False
| 27.65812 | 78 | 0.640297 |
658fdb40c1bd71dc3bd9d161fa6363df61c4d22f | 3,913 | py | Python | Framework/generateAOall.py | lishi0927/FrameWork | 8ff62a38ebd05d088721acca4b622b448cbe2978 | [
"MIT"
] | null | null | null | Framework/generateAOall.py | lishi0927/FrameWork | 8ff62a38ebd05d088721acca4b622b448cbe2978 | [
"MIT"
] | null | null | null | Framework/generateAOall.py | lishi0927/FrameWork | 8ff62a38ebd05d088721acca4b622b448cbe2978 | [
"MIT"
] | null | null | null | import struct
import os
import os.path
import sys
import numpy
import scipy
import random
import math
from PIL import Image
import matplotlib.pyplot as plt
def readHeight(fname): # 90 degree
#print 'rotating', fname
f = open(fname, 'rb')
header = f.read(256) # read headers
version, w, h, spacing, ntex, texnamesize = struct.unpack('LLLfLL', header[:4*6])
version, w, h, spacing, ntex, texnamesize,dw,dh,rt,rt2,rt3,rt4 = struct.unpack('LLLfLLLL4B', header[:36])
texnames = []
for i in range(ntex): # extract tex names
texname = f.read(texnamesize)
texname = texname.replace('\x00','')
texnames.append(texname)
#print w,h
assert(w==h)
assert(w>0)
assert(ntex>0)
assert(texnamesize==128)
heights = struct.unpack('f'*w*h, f.read(4*w*h))
#print w,h,heights
return w,h,heights
def normalize(vector3):
sum = math.sqrt(vector3[0] * vector3[0] + vector3[1] * vector3[1] + vector3[2] * vector3[2])
if(sum == 0):
return [0.0,0.0,0.0]
vector3[0] = vector3[0] / sum
vector3[1] = vector3[1] / sum
vector3[2] = vector3[2] / sum
return vector3
def GenerateAO(offsetx, offsety, heights):
height = heights[(offsetx + 1) * 53 + offsety + 1]
height01 = heights[(offsetx + 1) * 53 + offsety]
height21 = heights[(offsetx + 1) * 53 + offsety + 2]
height10 = heights[offsetx * 53 + offsety + 1]
height12 = heights[(offsetx + 2) * 53 + offsety + 1]
x = [2.0,0.0,height21-height01]
y = [0.0,2.0,height12-height10]
normal = numpy.cross(x,y)
normal = normalize(normal)
if math.fabs(normal[0]) > math.fabs(normal[2]):
binormal = [normal[2], 0.0, -normal[0]]
else:
binormal = [0.0, -normal[2], normal[1]]
binormal = normalize(binormal)
tangent = numpy.cross(binormal, normal)
visibility = 0
maxdistance = 6.0
for i in range(4):
for j in range(4):
u0 = (i + random.random()) / 4
v0 = (j + random.random()) / 4
r = math.sqrt(u0)
phi = 2.0 * math.pi * v0
p = numpy.array([0.0,0.0,0.0])
p[0] = r * math.cos(phi)
p[1] = r * math.sin(phi)
p[2] = math.sqrt(max(0.0, 1.0 - p[0] * p[0] - p[1]* p[1]))
q = numpy.array([0.0,0.0,0.0])
q[0] = p[0] * tangent[0] + p[1] * binormal[0] + p[2] * normal[0]
q[1] = p[0] * tangent[1] + p[1] * binormal[1] + p[2] * normal[1]
q[2] = p[0] * tangent[2] + p[1] * binormal[2] + p[2] * normal[2]
p = normalize(q)
origin = numpy.array([1.0 * offsety, 1.0* offsetx,1.0 *height])
ray_visibility = 1.0;
for samples in range(8):
new_pos = numpy.array([0.0,0.0,0.0])
new_pos[0] = origin[0] + (samples+1)*(maxdistance / 8.0) * p[0]
new_pos[1] = origin[1] + (samples+1)*(maxdistance / 8.0) * p[1]
new_pos[2] = origin[2] + (samples+1)*(maxdistance / 8.0) * p[2]
if(new_pos[0] < -1 or new_pos[0] >= 52 or new_pos[1] < -1 or new_pos[1] >= 52):
continue
new_height = heights[(int(new_pos[1] + 1)) * 53 + (int(new_pos[0] + 1))]
if new_height >= new_pos[2]:
ray_visibility *= 0
else:
ray_visibility *= 1
visibility += ray_visibility
return 1.0 - float(visibility / 16.0)
rootdir = "data\\"
for parent,dirnames,filenames in os.walk(rootdir):
for dirname in dirnames:
filename = filename = "data\\" + dirname + "\\terrain"
print dirname,filename
AOfile = "data\\" + dirname + "\\AO"
AOfiletxt = "data\\" + dirname + "\\AO.txt"
w,h,heights = readHeight(filename)
newheights = numpy.zeros(w*h)
for i in range(h):
for j in range(w):
height = heights[i*w+j]
if height < 0:
height = -height
newheights[i*w+j] = height
file_object = open(AOfiletxt, 'w')
file_binary = open(AOfile, 'wb')
aovalue = numpy.zeros((51,51))
for i in range(51):
for j in range(51):
aovalue[i,j] = GenerateAO(i,j,newheights)
file_object.write("%f " % aovalue[i,j])
file_binary.write(struct.pack('f',aovalue[i,j]))
file_object.write("\n")
file_object.close()
file_binary.close()
| 31.813008 | 109 | 0.61104 |
6dbb52538aba4d67c3475d528a88aba5fe25ce56 | 601 | py | Python | locallibrary/catalog/migrations/0004_bookinstance_borrower.py | ren-zxcyq/django_collection | b4811ae225326787baab9baa6ebe27bd3896f9fc | [
"MIT"
] | null | null | null | locallibrary/catalog/migrations/0004_bookinstance_borrower.py | ren-zxcyq/django_collection | b4811ae225326787baab9baa6ebe27bd3896f9fc | [
"MIT"
] | null | null | null | locallibrary/catalog/migrations/0004_bookinstance_borrower.py | ren-zxcyq/django_collection | b4811ae225326787baab9baa6ebe27bd3896f9fc | [
"MIT"
] | null | null | null | # Generated by Django 3.1.1 on 2020-10-10 12:33
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('catalog', '0003_book_genre'),
]
operations = [
migrations.AddField(
model_name='bookinstance',
name='borrower',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
| 27.318182 | 134 | 0.678869 |
29c293b82af0cc962e143565dddf3e20234f4e7b | 9,468 | py | Python | orange3/doc/data-mining-library/source/tutorial/conf.py | rgschmitz1/BioDepot-workflow-builder | f74d904eeaf91ec52ec9b703d9fb38e9064e5a66 | [
"MIT"
] | 54 | 2017-01-08T17:21:49.000Z | 2021-11-02T08:46:07.000Z | orange3/doc/data-mining-library/source/tutorial/conf.py | Synthia-3/BioDepot-workflow-builder | 4ee93abe2d79465755e82a145af3b6a6e1e79fd4 | [
"MIT"
] | 22 | 2017-03-28T06:03:14.000Z | 2021-07-28T05:43:55.000Z | orange3/doc/data-mining-library/source/tutorial/conf.py | Synthia-3/BioDepot-workflow-builder | 4ee93abe2d79465755e82a145af3b6a6e1e79fd4 | [
"MIT"
] | 21 | 2017-01-26T21:12:09.000Z | 2022-01-31T21:34:59.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# scripting-tutorial documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 22 18:51:25 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "scripting-tutorial"
copyright = "2015, orange data mining"
author = "orange data mining"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "scripting-tutorialdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"scripting-tutorial.tex",
"scripting-tutorial Documentation",
"orange data mining",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "scripting-tutorial", "scripting-tutorial Documentation", [author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"scripting-tutorial",
"scripting-tutorial Documentation",
author,
"scripting-tutorial",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| 32.313993 | 87 | 0.706907 |
edde6362e2038a44a45705f226c4ba569b0886ed | 24,121 | py | Python | unstructuredmesh/ddm/localstructure.py | imadki/unstructuredmesh | 08cd9346ca052622698ad376da1098fd634c9a6f | [
"MIT"
] | null | null | null | unstructuredmesh/ddm/localstructure.py | imadki/unstructuredmesh | 08cd9346ca052622698ad376da1098fd634c9a6f | [
"MIT"
] | null | null | null | unstructuredmesh/ddm/localstructure.py | imadki/unstructuredmesh | 08cd9346ca052622698ad376da1098fd634c9a6f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 22 16:08:27 2021
@author: kissami
"""
#from numba.typed import Dict
import timeit
from numpy import zeros, ones, asarray, double, int64, unique, where, array, sort, dot
from collections import OrderedDict
from mpi4py import MPI
from unstructuredmesh.ddm.module import (create_info_2dfaces, create_info_3dfaces,
Compute_2dcentervolumeOfCell, Compute_3dcentervolumeOfCell,
create_cellsOfFace, create_2dfaces, create_cell_faceid,
create_3dfaces, create_NormalFacesOfCell)
from unstructuredmesh.ddm.utils import (create_2doppNodeOfFaces, create_3doppNodeOfFaces,
create_NeighborCellByFace, create_node_cellid,
oriente_3dfacenodeid)
COMM = MPI.COMM_WORLD
SIZE = COMM.Get_size()
RANK = COMM.Get_rank()
__all__ = ["generate_structure"]
class Cells:
nodeid = []
faceid = []
center = []
volume = []
cellfid = []
cellnid = []
halonid = []
nf = []
globalindex = OrderedDict()
name = []
def __init__(self, nodeid, faceid, center, volume, cellfid, cellnid, nf, globalindex, halonid, name):
self.nodeid = nodeid # instance variable unique to each instance
self.faceid = faceid
self.center = center
self.volume = volume
self.cellfid = cellfid
self.cellnid = cellnid
self.halonid = halonid
self.nf = nf
self.globalindex = globalindex
self.name = name
class Nodes:
vertex = []
name = []
cellid = []
halonid = []
ghostcenter = []
globalindex = OrderedDict()
def __init__(self, vertex, name, cellid, ghostcenter, globalindex, halonid):
self.vertex = vertex #instance variable unique to each instance
self.name = name
self.cellid = cellid
self.ghostcenter = ghostcenter
self.globalindex = globalindex
self.halonid = halonid
class Faces:
nodeid = []
cellid = []
name = []
normal = []
mesure = []
bound = 0
center = []
ghostcenter = []
oppnodeid = []
halofid = []
def __init__(self, nodeid, cellid, name, normal, mesure, center, bound, ghostcenter, oppnodeid, halofid):
self.nodeid = nodeid # instance variable unique to each instance
self.cellid = cellid
self.name = name
self.normal = normal
self.mesure = mesure
self.bound = bound
self.center = center
self.ghostcenter = ghostcenter
self.oppnodeid = oppnodeid
self.halofid = halofid
class Halo:
halosint = []
halosext = []
neigh = []
centvol = []
faces = OrderedDict()
nodes = OrderedDict()
def __init__(self, halosint, halosext, centvol, neigh, faces, nodes):
self.halosint = halosint # instance variable unique to each instance
self.halosext = halosext
self.neigh = neigh
self.centvol = centvol
self.faces = faces
self.nodes = nodes
def func_unique(array):
uniq, index = unique(array, return_index=True)
return uniq[index.argsort()]
def CreateStructure(file, dim):
#Lecture des cellules à partir du fichier mesh..txt
for line in file:
#read elements
if line == "elements\n":
continue
if line == "endelements\n":
continue
if line == "nodes\n":
break
Cells.nodeid.append([int64(x) for x in line.split()])
#Lecture des coordonnées des noeuds à partir du fichier mesh..txt
for line in file:
#read Nodes
if line == "nodes\n":
continue
if line == "endnodes\n":
continue
if line == "halosint\n":
break
Nodes.vertex.append([double(x) for x in line.split()])
Cells.name = zeros(len(Cells.nodeid))
for i in range(len(Cells.nodeid)):
Cells.name[i] = Cells.nodeid[i][3]
Cells.nodeid[i] = Cells.nodeid[i][0:3]
Cells.nodeid = asarray(Cells.nodeid, dtype=int64)
Nodes.vertex = asarray(Nodes.vertex, dtype=double)
nbelements = len(Cells.nodeid)
nbnodes = len(Nodes.vertex)
Nodes.name = zeros(nbnodes, dtype=int64)
for i in range(nbnodes):
Nodes.name[i] = int64(Nodes.vertex[i][3])
#Create center and volume for each cell (pycceliser)
Cells.center = zeros((nbelements, 3), dtype=double)
Cells.volume = zeros(nbelements, dtype=double)
if dim == 2:
Compute_2dcentervolumeOfCell(Cells.nodeid, Nodes.vertex, nbelements, Cells.center, Cells.volume)
elif dim == 3:
Compute_3dcentervolumeOfCell(Cells.nodeid, Nodes.vertex, nbelements, Cells.center, Cells.volume)
#create cells over each node (still numba function)
Nodes.cellid, Cells.cellnid = create_node_cellid(Cells.nodeid, Nodes.vertex, nbelements, nbnodes, dim=dim)
#creating faces (pycceliser)
faces = zeros(((dim+1)*nbelements, dim), dtype=int64)
cellf = zeros((nbelements, dim+1), dtype=int64)
if dim == 2:
create_2dfaces(Cells.nodeid, nbelements, faces, cellf)
elif dim == 3:
create_3dfaces(Cells.nodeid, nbelements, faces, cellf)
Faces.nodeid, oldTonewIndex = unique(sort(faces), axis=0, return_inverse=True)
nbfaces = len(Faces.nodeid)
Cells.faceid = zeros((nbelements, (dim+1)), dtype=int64)
create_cell_faceid(nbelements, oldTonewIndex, cellf, Cells.faceid, dim=dim)
############################################################################
#creater cells left and right of each face (pycceliser)
Faces.cellid = -1*ones((nbfaces, 2), dtype=int64)
create_cellsOfFace(Cells.faceid, nbelements, nbfaces, Faces.cellid, dim=dim)
############################################################################
Cells.cellfid = create_NeighborCellByFace(Cells.faceid, Faces.cellid, nbelements, dim=dim)
############################################################################
#create info of faces (pycceliser)
Faces.name = zeros(nbfaces, dtype=int64)
Faces.normal = zeros((nbfaces, 3), dtype=double)
Faces.mesure = zeros(nbfaces, dtype=double)
Faces.center = zeros((nbfaces, 3), dtype=double)
if dim == 2:
create_info_2dfaces(Faces.cellid, Faces.nodeid, Nodes.name, Nodes.vertex, Cells.center,
nbfaces, Faces.normal, Faces.mesure, Faces.center, Faces.name)
elif dim == 3:
create_info_3dfaces(Faces.cellid, Faces.nodeid, Nodes.name, Nodes.vertex, Cells.center,
nbfaces, Faces.normal, Faces.mesure, Faces.center, Faces.name)
#Number of boundary faces
Faces.bound = len(Faces.name[Faces.name !=0])
############################################################################
#Create outgoing normal vectors (pycceliser)
Cells.nf = zeros((nbelements, dim+1, 3), dtype=double)
create_NormalFacesOfCell(Cells.center, Faces.center, Cells.faceid, Faces.normal, nbelements, Cells.nf, dim=dim)
###########################################################################
#still numba function
if dim == 2:
Faces.oppnodeid = create_2doppNodeOfFaces(Cells.nodeid, Cells.faceid, Faces.nodeid, nbelements, nbfaces)
elif dim == 3:
Faces.oppnodeid = create_3doppNodeOfFaces(Cells.nodeid, Cells.faceid, Faces.nodeid, nbelements, nbfaces)
Faces.nodeid = oriente_3dfacenodeid(Faces.nodeid, Faces.normal, Nodes.vertex)
return 0
def create_2d_halo_structure(file):
for line in file:
if "endhalosint" in line:
break
Halo.halosint.append(int(line))
for line in file:
# process the line
if "halosext" in line:
continue
if "centvol" in line:
break
Halo.halosext.append([int(x) for x in line.split()])
for line in file:
if "centvol" in line:
continue
if "globalcelltolocal" in line:
break
Halo.centvol.append([float(x) for x in line.split()])
cmpt = 0
for line in file:
#read Global cell to local
if line == "globalcelltomocal\n":
continue
if line == "endglobalcelltolocal\n":
break
Cells.globalindex[int(line)] = cmpt
cmpt += 1
cmpt = 0
for line in file:
#read Local Node To Global
if line == "localnodetoglobal\n":
continue
if line == "endlocalnodetoglobal\n":
break
Nodes.globalindex[cmpt] = int(line)
cmpt += 1
for line in file:
#read LocalToGlobal
if line == "neigh\n":
continue
if line == "endneigh\n":
break
Halo.neigh.append([int64(x) for x in line.split()])
Faces.halofid = zeros(len(Faces.nodeid), dtype=int)
if SIZE > 1:
k = 1
for i in range(len(Halo.halosext)):
Halo.faces[tuple([Halo.halosext[i][0], Halo.halosext[i][1]])] = k
Halo.faces[tuple([Halo.halosext[i][1], Halo.halosext[i][2]])] = k + 1
Halo.faces[tuple([Halo.halosext[i][0], Halo.halosext[i][2]])] = k + 2
k = k+3
for i in range(len(Faces.nodeid)):
n1 = Nodes.globalindex[Faces.nodeid[i][0]]
n2 = Nodes.globalindex[Faces.nodeid[i][1]]
if Halo.faces.get(tuple([n1, n2])):
Faces.cellid[i] = (Faces.cellid[i][0], -10)
Faces.name[i] = 10
Nodes.name[Faces.nodeid[i][0]] = 10
Nodes.name[Faces.nodeid[i][1]] = 10
Faces.halofid[i] = int((-1+Halo.faces.get(tuple([n1, n2])))/3)
if Halo.faces.get(tuple([n2, n1])):
Faces.cellid[i] = (Faces.cellid[i][0], -10)
Faces.name[i] = 10
Nodes.name[Faces.nodeid[i][0]] = 10
Nodes.name[Faces.nodeid[i][1]] = 10
Faces.halofid[i] = int((-1+Halo.faces.get(tuple([n2, n1])))/3)
longueur = 0
longh = []
tmp = [[] for i in range(len(Nodes.name))]
for i in range(len(Nodes.name)):
if Nodes.name[i] == 10:
Halo.nodes[i] = Nodes.globalindex[i]
arg = where(asarray(Halo.halosext) == Nodes.globalindex[i])
tmp[i].append(arg[0])
longueur = max(longueur, len(arg[0]))
longh.append(len(arg[0]))
else:
tmp[i].append(array([-1]))
longh.append(0)
Nodes.halonid = [[-1]*longueur for i in range(len(Nodes.name))]
for i in range(len(tmp)):
for j in range(len(tmp[i][0])):
Nodes.halonid[i][j] = tmp[i][0][j]
for i in range(len(Nodes.halonid)):
Nodes.halonid[i].append(longh[i])
if SIZE == 1 :
Nodes.halonid = zeros((len(Nodes.name),2), dtype=int)
Halo.centvol = zeros((2,2))
Halo.halosint = zeros((2,2))
# A vérifier !!!!!!
Faces.ghostcenter = [[] for i in range(len(Faces.name))]
Nodes.ghostcenter = [[] for i in range(len(Nodes.name))]
#compute the ghostcenter for each face and each node
for i in range(len(Faces.name)):
nod1 = Faces.nodeid[i][1]
nod2 = Faces.nodeid[i][0]
if Faces.name[i] != 0 and Faces.name[i] != 10:
x_1 = Nodes.vertex[nod1]
x_2 = Nodes.vertex[nod2]
c_left = Faces.cellid[i][0]
v_1 = Cells.center[c_left]
gamma = ((v_1[0] - x_2[0])*(x_1[0]-x_2[0]) + (v_1[1]-x_2[1])*(x_1[1]-x_2[1]))/((x_1[0]-x_2[0])**2 + (x_1[1]-x_2[1])**2)
kk = array([gamma * x_1[0] + (1 - gamma) * x_2[0], gamma * x_1[1] + (1 - gamma) * x_2[1]])
v_2 = array([2 * kk[0] + ( -1 * v_1[0]), 2 * kk[1] + ( -1 * v_1[1])])
Faces.ghostcenter[i] = [v_2[0], v_2[1], gamma]
Nodes.ghostcenter[nod1].append([v_2[0], v_2[1], i])
Nodes.ghostcenter[nod2].append([v_2[0], v_2[1], i])
else:
Faces.ghostcenter[i] = [0.,0., 0.]
for i in range(len(Nodes.name)):
if len(Nodes.ghostcenter[i]) == 0 :
Nodes.ghostcenter[i].append([0.,0.,-1.])
Nodes.ghostcenter[i].append([0.,0.,-1.])
elif len(Nodes.ghostcenter[i]) == 1 :
Nodes.ghostcenter[i].append([0.,0.,-1.])
Faces.ghostcenter = asarray(Faces.ghostcenter)
Nodes.ghostcenter = asarray(Nodes.ghostcenter)
#define halo cells neighbor by nodes
maxhalonid = 0
Cells.halonid = [[] for i in range(len(Cells.nodeid))]
for i in range(len(Cells.nodeid)):
for j in range(3):
nod = Cells.nodeid[i][j]
k = Nodes.halonid[nod][-1]
Cells.halonid[i].extend(Nodes.halonid[nod][:k])
Cells.halonid[i] = list(set(Cells.halonid[i]))
maxhalonid = max(maxhalonid, len(Cells.halonid[i]))
for i in range(len(Cells.nodeid)):
numb = len(Cells.halonid[i])
iterator = maxhalonid - len(Cells.halonid[i])
for k in range(iterator):
Cells.halonid[i].append(-1)
Cells.halonid[i].append(numb)
if SIZE == 1 :
Cells.halonid = zeros((len(Cells.nodeid),2), dtype=int)
cells = Cells(Cells.nodeid, Cells.faceid, Cells.center,
Cells.volume, Cells.cellfid, Cells.cellnid,
Cells.nf, Cells.globalindex, asarray(Cells.halonid),
Cells.name)
nodes = Nodes(Nodes.vertex, Nodes.name, Nodes.cellid,
Nodes.ghostcenter, Nodes.globalindex, asarray(Nodes.halonid))
faces = Faces(Faces.nodeid, Faces.cellid, Faces.name,
Faces.normal, Faces.mesure, Faces.center, Faces.bound,
Faces.ghostcenter, Faces.oppnodeid, asarray(Faces.halofid))
halos = Halo(asarray(Halo.halosint), asarray(Halo.halosext), asarray(Halo.centvol),
asarray(Halo.neigh), Halo.faces, Halo.nodes)
return cells, faces, nodes, halos
def create_3d_halo_structure(file):
for line in file:
if "endhalosint" in line:
break
Halo.halosint.append(int(line))#.append(int(line))#for x in line.split()])
for line in file:
# process the line
if "halosext" in line:
continue
if "centvol" in line:
break
Halo.halosext.append([int(x) for x in line.split()])
for line in file:
if "centvol" in line:
continue
if "globalcelltolocal" in line:
break
Halo.centvol.append([float(x) for x in line.split()])
#Cells.globalindex = [-1]*len(Cells.nodeid)
cmpt = 0
for line in file:
#read Global cell to local
if line == "globalcelltomocal\n":
continue
if line == "endglobalcelltolocal\n":
break
Cells.globalindex[int(line)] = cmpt
cmpt += 1
cmpt = 0
for line in file:
#read Local Node To Global
if line == "localnodetoglobal\n":
continue
if line == "endlocalnodetoglobal\n":
break
Nodes.globalindex[cmpt] = int(line)
cmpt += 1
for line in file:
#read LocalToGlobal
if line == "neigh\n":
continue
if line == "endneigh\n":
break
Halo.neigh.append([int(x) for x in line.split()])
Faces.halofid = zeros(len(Faces.nodeid), dtype=int)
if SIZE > 1:
k = 1
for i in range(len(Halo.halosext)):
Halo.faces[tuple([Halo.halosext[i][0], Halo.halosext[i][1], Halo.halosext[i][2] ])] = k
Halo.faces[tuple([Halo.halosext[i][2], Halo.halosext[i][3], Halo.halosext[i][0] ])] = k + 1
Halo.faces[tuple([Halo.halosext[i][0], Halo.halosext[i][1], Halo.halosext[i][3] ])] = k + 2
Halo.faces[tuple([Halo.halosext[i][3], Halo.halosext[i][1], Halo.halosext[i][2] ])] = k + 3
k = k+4
for i in range(len(Faces.nodeid)):
n1 = Nodes.globalindex[Faces.nodeid[i][0]]
n2 = Nodes.globalindex[Faces.nodeid[i][1]]
n3 = Nodes.globalindex[Faces.nodeid[i][2]]
if Halo.faces.get(tuple([n1, n2, n3])):
Faces.cellid[i] = (Faces.cellid[i][0], -10)
Faces.name[i] = 10
Nodes.name[Faces.nodeid[i][0]] = 10
Nodes.name[Faces.nodeid[i][1]] = 10
Nodes.name[Faces.nodeid[i][2]] = 10
Faces.halofid[i] = int((-1+Halo.faces.get(tuple([n1, n2, n3])))/4)
if Halo.faces.get(tuple([n1, n3, n2])):
Faces.cellid[i] = (Faces.cellid[i][0], -10)
Faces.name[i] = 10
Nodes.name[Faces.nodeid[i][0]] = 10
Nodes.name[Faces.nodeid[i][1]] = 10
Nodes.name[Faces.nodeid[i][2]] = 10
Faces.halofid[i] = int((-1+Halo.faces.get(tuple([n1, n3, n2])))/4)
if Halo.faces.get(tuple([n2, n1, n3])):
Faces.cellid[i] = (Faces.cellid[i][0], -10)
Faces.name[i] = 10
Nodes.name[Faces.nodeid[i][0]] = 10
Nodes.name[Faces.nodeid[i][1]] = 10
Nodes.name[Faces.nodeid[i][2]] = 10
Faces.halofid[i] = int((-1+Halo.faces.get(tuple([n2, n1, n3])))/4)
if Halo.faces.get(tuple([n2, n3, n1])):
Faces.cellid[i] = (Faces.cellid[i][0], -10)
Faces.name[i] = 10
Nodes.name[Faces.nodeid[i][0]] = 10
Nodes.name[Faces.nodeid[i][1]] = 10
Nodes.name[Faces.nodeid[i][2]] = 10
Faces.halofid[i] = int((-1+Halo.faces.get(tuple([n2, n3, n1])))/4)
if Halo.faces.get(tuple([n3, n1, n2])):
Faces.cellid[i] = (Faces.cellid[i][0], -10)
Faces.name[i] = 10
Nodes.name[Faces.nodeid[i][0]] = 10
Nodes.name[Faces.nodeid[i][1]] = 10
Nodes.name[Faces.nodeid[i][2]] = 10
Faces.halofid[i] = int((-1+Halo.faces.get(tuple([n3, n1, n2])))/4)
if Halo.faces.get(tuple([n3, n2, n1])):
Faces.cellid[i] = (Faces.cellid[i][0], -10)
Faces.name[i] = 10
Nodes.name[Faces.nodeid[i][0]] = 10
Nodes.name[Faces.nodeid[i][1]] = 10
Nodes.name[Faces.nodeid[i][2]] = 10
Faces.halofid[i] = int((-1+Halo.faces.get(tuple([n3, n2, n1])))/4)
longueur = 0
longh = []
tmp = [[] for i in range(len(Nodes.name))]
for i in range(len(Nodes.name)):
if Nodes.name[i] == 10:
Halo.nodes[i] = Nodes.globalindex[i]
arg = where(asarray(Halo.halosext) == Nodes.globalindex[i])
tmp[i].append(arg[0])
longueur = max(longueur, len(arg[0]))
longh.append(len(arg[0]))
else:
tmp[i].append(array([-1]))
longh.append(0)
Nodes.halonid = [[-1]*longueur for i in range(len(Nodes.name))]
for i in range(len(tmp)):
for j in range(len(tmp[i][0])):
Nodes.halonid[i][j] = tmp[i][0][j]
for i in range(len(Nodes.halonid)):
Nodes.halonid[i].append(longh[i])
if SIZE == 1 :
Nodes.halonid = zeros((len(Nodes.name),2), dtype=int)
Halo.centvol = zeros((2,2))
Halo.halosint = zeros((2,2))
Faces.ghostcenter = zeros((len(Faces.name), 4))#[[] for i in range(len(Faces.name))]
Nodes.ghostcenter = [[] for i in range(len(Nodes.name))]
kk = zeros(3)
#TODO ghost center à verifier
#compute the ghostcenter for each face and each node
for i in range(len(Faces.name)):
if Faces.name[i] != 0 and Faces.name[i] != 10:
nod1 = Faces.nodeid[i][1]
nod2 = Faces.nodeid[i][0]
nod3 = Faces.nodeid[i][2]
n = Faces.normal[i]/Faces.mesure[i]
c_left = Faces.cellid[i][0]
v_1 = Cells.center[c_left]
u = Faces.center[i][:] - v_1[:]
gamma = dot(u, n)
kk[0] = v_1[0] + gamma*n[0];
kk[1] = v_1[1] + gamma*n[1];
kk[2] = v_1[2] + gamma*n[2];
v_2 = array([2 * kk[0] + ( -1 * v_1[0]), 2 * kk[1] + ( -1 * v_1[1]), 2 * kk[2] + ( -1 * v_1[2])])
Faces.ghostcenter[i] = [v_2[0], v_2[1], v_2[2], gamma]
Nodes.ghostcenter[nod1].append([v_2[0], v_2[1], v_2[2], i])
Nodes.ghostcenter[nod2].append([v_2[0], v_2[1], v_2[2], i])
Nodes.ghostcenter[nod3].append([v_2[0], v_2[1], v_2[2], i])
else:
Faces.ghostcenter[i] = [0.,0.,0., 0.]
maxGhostCell = 0
for i in range(len(Nodes.name)):
maxGhostCell = max(maxGhostCell, len(Nodes.ghostcenter[i]))
for i in range(len(Nodes.name)):
iterator = maxGhostCell - len(Nodes.ghostcenter[i])
for k in range(iterator):
Nodes.ghostcenter[i].append([-1., -1., -1., -1.])
Nodes.ghostcenter = asarray( Nodes.ghostcenter)
#define halo cells neighbor by nodes
maxhalonid = 0
Cells.halonid = [[] for i in range(len(Cells.nodeid))]
for i in range(len(Cells.nodeid)):
for j in range(4):
nod = Cells.nodeid[i][j]
k = Nodes.halonid[nod][-1]
Cells.halonid[i].extend(Nodes.halonid[nod][:k])
Cells.halonid[i] = list(set(Cells.halonid[i]))
maxhalonid = max(maxhalonid, len(Cells.halonid[i]))
for i in range(len(Cells.nodeid)):
numb = len(Cells.halonid[i])
iterator = maxhalonid - len(Cells.halonid[i])
for k in range(iterator):
Cells.halonid[i].append(-1)
Cells.halonid[i].append(numb)
if SIZE == 1 :
Cells.halonid = zeros((len(Cells.nodeid),2), dtype=int)
cells = Cells(Cells.nodeid, Cells.faceid, Cells.center,
Cells.volume, Cells.cellfid, Cells.cellnid,
Cells.nf, Cells.globalindex, asarray(Cells.halonid))
nodes = Nodes(Nodes.vertex, Nodes.name, Nodes.cellid,
Nodes.ghostcenter, Nodes.globalindex, asarray(Nodes.halonid))
faces = Faces(Faces.nodeid, Faces.cellid, Faces.name,
Faces.normal, Faces.mesure, Faces.center, Faces.bound,
Faces.ghostcenter, Faces.oppnodeid, asarray(Faces.halofid))
halos = Halo(asarray(Halo.halosint), asarray(Halo.halosext), asarray(Halo.centvol),
asarray(Halo.neigh), Halo.faces, Halo.nodes)
return cells, faces, nodes, halos
from mpi4py import MPI
COMM = MPI.COMM_WORLD
SIZE = COMM.Get_size()
RANK = COMM.Get_rank()
def generate_structure(dim, size):
import os
dim = int(dim)
MESH_DIR = "meshes"+str(SIZE)+"PROC"
filename = os.path.join(MESH_DIR, 'mesh'+str(RANK)+'.txt')
txt_file = open(filename)
start = timeit.default_timer()
CreateStructure(txt_file, dim=dim)
if dim == 2:
cells, faces, nodes, halos = create_2d_halo_structure(txt_file)
elif dim == 3:
cells, faces, nodes, halos = create_3d_halo_structure(txt_file)
stop = timeit.default_timer()
if RANK==0:
print("CPU time for creating "+str(dim)+"d structure ", stop-start)
grid = {}
grid["cells"] = cells
grid["nodes"] = nodes
grid["faces"] = faces
grid["halos"] = halos
txt_file.close()
return grid
| 36.163418 | 131 | 0.53982 |
63b773840349f977f8ee11367abcb8cf358d893b | 6,381 | py | Python | neutron/notifiers/ironic.py | tankertyp/openstack-learning | d729672663f170d0138ecf23b3c23df225c1b1b8 | [
"Apache-2.0"
] | 4 | 2015-11-07T01:58:32.000Z | 2019-10-08T06:18:36.000Z | neutron/notifiers/ironic.py | tankertyp/openstack-learning | d729672663f170d0138ecf23b3c23df225c1b1b8 | [
"Apache-2.0"
] | 5 | 2018-05-31T13:09:00.000Z | 2022-01-13T15:23:29.000Z | neutron/notifiers/ironic.py | tankertyp/openstack-learning | d729672663f170d0138ecf23b3c23df225c1b1b8 | [
"Apache-2.0"
] | 2 | 2017-12-05T15:05:26.000Z | 2019-09-09T16:03:49.000Z | # Copyright (c) 2019 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneauth1 import loading as ks_loading
from neutron_lib.api.definitions import port as port_def
from neutron_lib.api.definitions import portbindings as portbindings_def
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants as n_const
from openstack import connection
from openstack import exceptions as os_exc
from oslo_config import cfg
from oslo_log import log as logging
from neutron.notifiers import batch_notifier
LOG = logging.getLogger(__name__)
BAREMETAL_EVENT_TYPE = 'network'
IRONIC_API_VERSION = 'latest'
IRONIC_SESSION = None
IRONIC_CONF_SECTION = 'ironic'
IRONIC_CLIENT_VERSION = 1
@registry.has_registry_receivers
class Notifier(object):
_instance = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self.batch_notifier = batch_notifier.BatchNotifier(
cfg.CONF.send_events_interval, self.send_events)
self.irclient = self._get_ironic_client()
def _get_session(self, group):
auth = ks_loading.load_auth_from_conf_options(cfg.CONF, group)
session = ks_loading.load_session_from_conf_options(
cfg.CONF, group, auth=auth)
return session
def _get_ironic_client(self):
"""Get Ironic client instance."""
global IRONIC_SESSION
if not IRONIC_SESSION:
IRONIC_SESSION = self._get_session(IRONIC_CONF_SECTION)
return connection.Connection(
session=IRONIC_SESSION, oslo_conf=cfg.CONF,
connect_retries=cfg.CONF.http_retries).baremetal
def send_events(self, batched_events):
try:
response = self.irclient.post('/events',
json={'events': batched_events},
microversion='1.54')
os_exc.raise_from_response(response)
except Exception as e:
LOG.exception('Error encountered posting the event to '
'ironic. {error}'.format(error=e))
@registry.receives(resources.PORT, [events.AFTER_UPDATE])
def process_port_update_event(self, resource, event, trigger,
original_port=None, port=None,
**kwargs):
# We only want to notify about baremetal ports.
if not (port[portbindings_def.VNIC_TYPE] ==
portbindings_def.VNIC_BAREMETAL):
# TODO(TheJulia): Add the smartnic flag at some point...
return
original_port_status = original_port['status']
current_port_status = port['status']
port_event = None
if (original_port_status == n_const.PORT_STATUS_ACTIVE and
current_port_status in [n_const.PORT_STATUS_DOWN,
n_const.PORT_STATUS_ERROR]):
port_event = 'unbind_port'
elif (original_port_status == n_const.PORT_STATUS_DOWN and
current_port_status in [n_const.PORT_STATUS_ACTIVE,
n_const.PORT_STATUS_ERROR]):
port_event = 'bind_port'
LOG.debug('Queuing event for {event_type} for port {port} '
'for status {status}.'.format(event_type=port_event,
port=port['id'],
status=current_port_status))
if port_event:
notify_event = {
'event': '.'.join([BAREMETAL_EVENT_TYPE, port_event]),
'port_id': port['id'],
'mac_address': port[port_def.PORT_MAC_ADDRESS],
'status': current_port_status,
'device_id': port['device_id'],
'binding:host_id': port[portbindings_def.HOST_ID],
'binding:vnic_type': port[portbindings_def.VNIC_TYPE]
}
# Filter keys with empty string as value. In case a type UUID field
# or similar is not set the API won't accept empty string.
self.batch_notifier.queue_event(
{k: v for k, v in notify_event.items() if v != ''})
@registry.receives(resources.PORT, [events.AFTER_DELETE])
def process_port_delete_event(self, resource, event, trigger,
original_port=None, port=None,
**kwargs):
# We only want to notify about baremetal ports.
if not (port[portbindings_def.VNIC_TYPE] ==
portbindings_def.VNIC_BAREMETAL):
# TODO(TheJulia): Add the smartnic flag at some point...
return
port_event = 'delete_port'
LOG.debug('Queuing event for {event_type} for port {port} '
'for status {status}.'.format(event_type=port_event,
port=port['id'],
status='DELETED'))
notify_event = {
'event': '.'.join([BAREMETAL_EVENT_TYPE, port_event]),
'port_id': port['id'],
'mac_address': port[port_def.PORT_MAC_ADDRESS],
'status': 'DELETED',
'device_id': port['device_id'],
'binding:host_id': port[portbindings_def.HOST_ID],
'binding:vnic_type': port[portbindings_def.VNIC_TYPE]
}
# Filter keys with empty string as value. In case a type UUID field
# or similar is not set the API won't accept empty string.
self.batch_notifier.queue_event(
{k: v for k, v in notify_event.items() if v != ''})
| 42.258278 | 79 | 0.614951 |
5fa36135ebd6bba2e7d01908f93f053c321425e1 | 10,640 | py | Python | fna_seg/furnace/engine/evaluator.py | BaiYuYuan/FNA | d86fc5feb516190cb9622e1a84dfbad341e770c5 | [
"Apache-2.0"
] | 173 | 2019-12-25T04:44:25.000Z | 2021-12-20T01:48:46.000Z | fna_seg/furnace/engine/evaluator.py | BaiYuYuan/FNA | d86fc5feb516190cb9622e1a84dfbad341e770c5 | [
"Apache-2.0"
] | 22 | 2020-03-02T13:23:10.000Z | 2022-01-21T09:38:16.000Z | fna_seg/furnace/engine/evaluator.py | BaiYuYuan/FNA | d86fc5feb516190cb9622e1a84dfbad341e770c5 | [
"Apache-2.0"
] | 26 | 2020-01-09T15:15:17.000Z | 2021-07-29T07:30:06.000Z | import os
import os.path as osp
import cv2
import numpy as np
import time
from tqdm import tqdm
import torch
import torch.nn.functional as F
import torch.multiprocessing as mp
from engine.logger import get_logger
from utils.pyt_utils import load_model, link_file, ensure_dir
from utils.img_utils import pad_image_to_shape, normalize
logger = get_logger()
class Evaluator(object):
def __init__(self, dataset, class_num, image_mean, image_std, network,
multi_scales, is_flip, devices,
verbose=False, save_path=None, show_image=False):
self.dataset = dataset
self.ndata = self.dataset.get_length()
self.class_num = class_num
self.image_mean = image_mean
self.image_std = image_std
self.multi_scales = multi_scales
self.is_flip = is_flip
self.network = network
self.devices = devices
self.context = mp.get_context('spawn')
self.val_func = None
self.results_queue = self.context.Queue(self.ndata)
self.verbose = verbose
self.save_path = save_path
if save_path is not None:
ensure_dir(save_path)
self.show_image = show_image
def run(self, model_path, model_indice, log_file, log_file_link):
"""There are four evaluation modes:
1.only eval a .pth model: -e *.pth
2.only eval a certain epoch: -e epoch
3.eval all epochs in a given section: -e start_epoch-end_epoch
4.eval all epochs from a certain started epoch: -e start_epoch-
"""
if '.pth' in model_indice:
models = [model_indice, ]
elif "-" in model_indice:
start_epoch = int(model_indice.split("-")[0])
end_epoch = model_indice.split("-")[1]
models = os.listdir(model_path)
models.remove("epoch-last.pth")
sorted_models = [None] * len(models)
model_idx = [0] * len(models)
for idx, m in enumerate(models):
num = m.split(".")[0].split("-")[1]
model_idx[idx] = num
sorted_models[idx] = m
model_idx = np.array([int(i) for i in model_idx])
down_bound = model_idx >= start_epoch
up_bound = [True] * len(sorted_models)
if end_epoch:
end_epoch = int(end_epoch)
assert start_epoch < end_epoch
up_bound = model_idx <= end_epoch
bound = up_bound * down_bound
model_slice = np.array(sorted_models)[bound]
models = [os.path.join(model_path, model) for model in
model_slice]
else:
models = [os.path.join(model_path,
'epoch-%s.pth' % model_indice), ]
# results = open(log_file, 'a')
# link_file(log_file, log_file_link)
# print(model_indice)
for model in models:
logger.info("Load Model: %s" % model)
self.val_func = load_model(self.network, model)
result_line = self.multi_process_evaluation()
# results.write('Model: ' + model + '\n')
# results.write(result_line)
# results.write('\n')
# results.flush()
# results.close()
def multi_process_evaluation(self):
start_eval_time = time.perf_counter()
nr_devices = len(self.devices)
stride = int(np.ceil(self.ndata / nr_devices))
# start multi-process on multi-gpu
procs = []
for d in range(nr_devices):
e_record = min((d + 1) * stride, self.ndata)
shred_list = list(range(d * stride, e_record))
device = self.devices[d]
logger.info(
'GPU %s handle %d data.' % (device, len(shred_list)))
p = self.context.Process(target=self.worker,
args=(shred_list, device))
procs.append(p)
for p in procs:
p.start()
all_results = []
for _ in tqdm(range(self.ndata)):
t = self.results_queue.get()
all_results.append(t)
if self.verbose:
self.compute_metric(all_results)
for p in procs:
p.join()
result_line = self.compute_metric(all_results)
logger.info(
'Evaluation Elapsed Time: %.2fs' % (
time.perf_counter() - start_eval_time))
return result_line
def worker(self, shred_list, device):
start_load_time = time.time()
logger.info('Load Model on Device %d: %.2fs' % (
device, time.time() - start_load_time))
for idx in shred_list:
# import pdb; pdb.set_trace()
dd = self.dataset[idx]
results_dict = self.func_per_iteration(dd, device)
self.results_queue.put(results_dict)
def func_per_iteration(self, data, device):
raise NotImplementedError
def compute_metric(self, results):
raise NotImplementedError
# evaluate the whole image at once
def whole_eval(self, img, output_size, input_size=None, device=None):
if input_size is not None:
img, margin = self.process_image(img, input_size)
else:
img = self.process_image(img, input_size)
pred = self.val_func_process(img, device)
if input_size is not None:
pred = pred[:, margin[0]:(pred.shape[1] - margin[1]),
margin[2]:(pred.shape[2] - margin[3])]
pred = pred.permute(1, 2, 0)
pred = pred.cpu().numpy()
if output_size is not None:
pred = cv2.resize(pred,
(output_size[1], output_size[0]),
interpolation=cv2.INTER_LINEAR)
pred = pred.argmax(2)
return pred
# slide the window to evaluate the image
def sliding_eval(self, img, crop_size, stride_rate, device=None):
ori_rows, ori_cols, c = img.shape
processed_pred = np.zeros((ori_rows, ori_cols, self.class_num))
for s in self.multi_scales:
img_scale = cv2.resize(img, None, fx=s, fy=s,
interpolation=cv2.INTER_LINEAR)
new_rows, new_cols, _ = img_scale.shape
processed_pred += self.scale_process(img_scale,
(ori_rows, ori_cols),
crop_size, stride_rate, device)
pred = processed_pred.argmax(2)
return pred
def scale_process(self, img, ori_shape, crop_size, stride_rate,
device=None):
new_rows, new_cols, c = img.shape
long_size = new_cols if new_cols > new_rows else new_rows
if long_size <= crop_size:
input_data, margin = self.process_image(img, crop_size)
score = self.val_func_process(input_data, device)
score = score[:, margin[0]:(score.shape[1] - margin[1]),
margin[2]:(score.shape[2] - margin[3])]
else:
stride = int(np.ceil(crop_size * stride_rate))
img_pad, margin = pad_image_to_shape(img, crop_size,
cv2.BORDER_CONSTANT, value=0)
pad_rows = img_pad.shape[0]
pad_cols = img_pad.shape[1]
r_grid = int(np.ceil((pad_rows - crop_size) / stride)) + 1
c_grid = int(np.ceil((pad_cols - crop_size) / stride)) + 1
data_scale = torch.zeros(self.class_num, pad_rows, pad_cols).cuda(
device)
count_scale = torch.zeros(self.class_num, pad_rows, pad_cols).cuda(
device)
for grid_yidx in range(r_grid):
for grid_xidx in range(c_grid):
s_x = grid_xidx * stride
s_y = grid_yidx * stride
e_x = min(s_x + crop_size, pad_cols)
e_y = min(s_y + crop_size, pad_rows)
s_x = e_x - crop_size
s_y = e_y - crop_size
img_sub = img_pad[s_y:e_y, s_x: e_x, :]
count_scale[:, s_y: e_y, s_x: e_x] += 1
input_data, tmargin = self.process_image(img_sub, crop_size)
temp_score = self.val_func_process(input_data, device)
temp_score = temp_score[:,
tmargin[0]:(temp_score.shape[1] - tmargin[1]),
tmargin[2]:(temp_score.shape[2] - tmargin[3])]
data_scale[:, s_y: e_y, s_x: e_x] += temp_score
# score = data_scale / count_scale
score = data_scale
score = score[:, margin[0]:(score.shape[1] - margin[1]),
margin[2]:(score.shape[2] - margin[3])]
score = score.permute(1, 2, 0)
data_output = cv2.resize(score.cpu().numpy(),
(ori_shape[1], ori_shape[0]),
interpolation=cv2.INTER_LINEAR)
return data_output
def val_func_process(self, input_data, device=None):
input_data = np.ascontiguousarray(input_data[None, :, :, :],
dtype=np.float32)
input_data = torch.FloatTensor(input_data).cuda(device)
with torch.cuda.device(input_data.get_device()):
self.val_func.eval()
self.val_func.to(input_data.get_device())
with torch.no_grad():
score = self.val_func(input_data)
score = score[0]
if self.is_flip:
input_data = input_data.flip(-1)
score_flip = self.val_func(input_data)
score_flip = score_flip[0]
score += score_flip.flip(-1)
score = torch.exp(score)
# score = score.data
return score
def process_image(self, img, crop_size=None):
p_img = img
if img.shape[2] < 3:
im_b = p_img
im_g = p_img
im_r = p_img
p_img = np.concatenate((im_b, im_g, im_r), axis=2)
p_img = normalize(p_img, self.image_mean, self.image_std)
if crop_size is not None:
p_img, margin = pad_image_to_shape(p_img, crop_size,
cv2.BORDER_CONSTANT, value=0)
p_img = p_img.transpose(2, 0, 1)
return p_img, margin
p_img = p_img.transpose(2, 0, 1)
return p_img
| 37.464789 | 80 | 0.546805 |
f7808469984c5766ae0c484b8c03535af4f97eb2 | 55,768 | py | Python | scripts/object_tracker_generator.py | ZandroFargnoli/Vulkan-ValidationLayers | 45c73d999ed8cc46f62754649cd78c3a917575c5 | [
"Apache-2.0"
] | null | null | null | scripts/object_tracker_generator.py | ZandroFargnoli/Vulkan-ValidationLayers | 45c73d999ed8cc46f62754649cd78c3a917575c5 | [
"Apache-2.0"
] | null | null | null | scripts/object_tracker_generator.py | ZandroFargnoli/Vulkan-ValidationLayers | 45c73d999ed8cc46f62754649cd78c3a917575c5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3 -i
#
# Copyright (c) 2015-2020 The Khronos Group Inc.
# Copyright (c) 2015-2020 Valve Corporation
# Copyright (c) 2015-2020 LunarG, Inc.
# Copyright (c) 2015-2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Mark Lobodzinski <mark@lunarg.com>
# Author: Dave Houlton <daveh@lunarg.com>
import os,re,sys,string,json
import xml.etree.ElementTree as etree
from generator import *
from collections import namedtuple
from common_codegen import *
# This is a workaround to use a Python 2.7 and 3.x compatible syntax.
from io import open
# ObjectTrackerGeneratorOptions - subclass of GeneratorOptions.
#
# Adds options used by ObjectTrackerOutputGenerator objects during
# object_tracker layer generation.
#
# Additional members
# prefixText - list of strings to prefix generated header with
# (usually a copyright statement + calling convention macros).
# protectFile - True if multiple inclusion protection should be
# generated (based on the filename) around the entire header.
# protectFeature - True if #ifndef..#endif protection should be
# generated around a feature interface in the header file.
# genFuncPointers - True if function pointer typedefs should be
# generated
# protectProto - If conditional protection should be generated
# around prototype declarations, set to either '#ifdef'
# to require opt-in (#ifdef protectProtoStr) or '#ifndef'
# to require opt-out (#ifndef protectProtoStr). Otherwise
# set to None.
# protectProtoStr - #ifdef/#ifndef symbol to use around prototype
# declarations, if protectProto is set
# apicall - string to use for the function declaration prefix,
# such as APICALL on Windows.
# apientry - string to use for the calling convention macro,
# in typedefs, such as APIENTRY.
# apientryp - string to use for the calling convention macro
# in function pointer typedefs, such as APIENTRYP.
# indentFuncProto - True if prototype declarations should put each
# parameter on a separate line
# indentFuncPointer - True if typedefed function pointers should put each
# parameter on a separate line
# alignFuncParam - if nonzero and parameters are being put on a
# separate line, align parameter names at the specified column
class ObjectTrackerGeneratorOptions(GeneratorOptions):
def __init__(self,
conventions = None,
filename = None,
directory = '.',
genpath = None,
apiname = None,
profile = None,
versions = '.*',
emitversions = '.*',
defaultExtensions = None,
addExtensions = None,
removeExtensions = None,
emitExtensions = None,
sortProcedure = regSortFeatures,
prefixText = "",
genFuncPointers = True,
protectFile = True,
protectFeature = True,
apicall = '',
apientry = '',
apientryp = '',
indentFuncProto = True,
indentFuncPointer = False,
alignFuncParam = 0,
expandEnumerants = True,
valid_usage_path = ''):
GeneratorOptions.__init__(self,
conventions = conventions,
filename = filename,
directory = directory,
genpath = genpath,
apiname = apiname,
profile = profile,
versions = versions,
emitversions = emitversions,
defaultExtensions = defaultExtensions,
addExtensions = addExtensions,
removeExtensions = removeExtensions,
emitExtensions = emitExtensions,
sortProcedure = sortProcedure)
self.prefixText = prefixText
self.genFuncPointers = genFuncPointers
self.protectFile = protectFile
self.protectFeature = protectFeature
self.apicall = apicall
self.apientry = apientry
self.apientryp = apientryp
self.indentFuncProto = indentFuncProto
self.indentFuncPointer = indentFuncPointer
self.alignFuncParam = alignFuncParam
self.expandEnumerants = expandEnumerants
self.valid_usage_path = valid_usage_path
# ObjectTrackerOutputGenerator - subclass of OutputGenerator.
# Generates object_tracker layer object validation code
#
# ---- methods ----
# ObjectTrackerOutputGenerator(errFile, warnFile, diagFile) - args as for OutputGenerator. Defines additional internal state.
# ---- methods overriding base class ----
# beginFile(genOpts)
# endFile()
# beginFeature(interface, emit)
# endFeature()
# genCmd(cmdinfo)
# genStruct()
# genType()
class ObjectTrackerOutputGenerator(OutputGenerator):
"""Generate ObjectTracker code based on XML element attributes"""
# This is an ordered list of sections in the header file.
ALL_SECTIONS = ['command']
def __init__(self,
errFile = sys.stderr,
warnFile = sys.stderr,
diagFile = sys.stdout):
OutputGenerator.__init__(self, errFile, warnFile, diagFile)
self.INDENT_SPACES = 4
self.prototypes = []
self.instance_extensions = []
self.device_extensions = []
# Commands which are not autogenerated but still intercepted
self.no_autogen_list = [
'vkDestroyInstance',
'vkCreateInstance',
'vkCreateDevice',
'vkEnumeratePhysicalDevices',
'vkGetPhysicalDeviceQueueFamilyProperties',
'vkGetPhysicalDeviceQueueFamilyProperties2',
'vkGetPhysicalDeviceQueueFamilyProperties2KHR',
'vkGetDeviceQueue',
'vkGetDeviceQueue2',
'vkCreateDescriptorSetLayout',
'vkDestroyDescriptorPool',
'vkDestroyCommandPool',
'vkAllocateCommandBuffers',
'vkAllocateDescriptorSets',
'vkFreeDescriptorSets',
'vkFreeCommandBuffers',
'vkUpdateDescriptorSets',
'vkBeginCommandBuffer',
'vkGetDescriptorSetLayoutSupport',
'vkGetDescriptorSetLayoutSupportKHR',
'vkDestroySwapchainKHR',
'vkGetSwapchainImagesKHR',
'vkCmdPushDescriptorSetKHR',
'vkDestroyDevice',
'vkResetDescriptorPool',
'vkGetPhysicalDeviceDisplayPropertiesKHR',
'vkGetPhysicalDeviceDisplayProperties2KHR',
'vkGetDisplayModePropertiesKHR',
'vkGetDisplayModeProperties2KHR',
'vkAcquirePerformanceConfigurationINTEL',
'vkReleasePerformanceConfigurationINTEL',
'vkQueueSetPerformanceConfigurationINTEL',
'vkCreateFramebuffer',
'vkSetDebugUtilsObjectNameEXT',
'vkSetDebugUtilsObjectTagEXT',
'vkCreateDescriptorUpdateTemplate',
'vkCreateDescriptorUpdateTemplateKHR',
]
# These VUIDS are not implicit, but are best handled in this layer. Codegen for vkDestroy calls will generate a key
# which is translated here into a good VU. Saves ~40 checks.
self.manual_vuids = dict()
self.manual_vuids = {
"fence-compatalloc": "\"VUID-vkDestroyFence-fence-01121\"",
"fence-nullalloc": "\"VUID-vkDestroyFence-fence-01122\"",
"event-compatalloc": "\"VUID-vkDestroyEvent-event-01146\"",
"event-nullalloc": "\"VUID-vkDestroyEvent-event-01147\"",
"buffer-compatalloc": "\"VUID-vkDestroyBuffer-buffer-00923\"",
"buffer-nullalloc": "\"VUID-vkDestroyBuffer-buffer-00924\"",
"image-compatalloc": "\"VUID-vkDestroyImage-image-01001\"",
"image-nullalloc": "\"VUID-vkDestroyImage-image-01002\"",
"shaderModule-compatalloc": "\"VUID-vkDestroyShaderModule-shaderModule-01092\"",
"shaderModule-nullalloc": "\"VUID-vkDestroyShaderModule-shaderModule-01093\"",
"pipeline-compatalloc": "\"VUID-vkDestroyPipeline-pipeline-00766\"",
"pipeline-nullalloc": "\"VUID-vkDestroyPipeline-pipeline-00767\"",
"sampler-compatalloc": "\"VUID-vkDestroySampler-sampler-01083\"",
"sampler-nullalloc": "\"VUID-vkDestroySampler-sampler-01084\"",
"renderPass-compatalloc": "\"VUID-vkDestroyRenderPass-renderPass-00874\"",
"renderPass-nullalloc": "\"VUID-vkDestroyRenderPass-renderPass-00875\"",
"descriptorUpdateTemplate-compatalloc": "\"VUID-vkDestroyDescriptorUpdateTemplate-descriptorSetLayout-00356\"",
"descriptorUpdateTemplate-nullalloc": "\"VUID-vkDestroyDescriptorUpdateTemplate-descriptorSetLayout-00357\"",
"imageView-compatalloc": "\"VUID-vkDestroyImageView-imageView-01027\"",
"imageView-nullalloc": "\"VUID-vkDestroyImageView-imageView-01028\"",
"pipelineCache-compatalloc": "\"VUID-vkDestroyPipelineCache-pipelineCache-00771\"",
"pipelineCache-nullalloc": "\"VUID-vkDestroyPipelineCache-pipelineCache-00772\"",
"pipelineLayout-compatalloc": "\"VUID-vkDestroyPipelineLayout-pipelineLayout-00299\"",
"pipelineLayout-nullalloc": "\"VUID-vkDestroyPipelineLayout-pipelineLayout-00300\"",
"descriptorSetLayout-compatalloc": "\"VUID-vkDestroyDescriptorSetLayout-descriptorSetLayout-00284\"",
"descriptorSetLayout-nullalloc": "\"VUID-vkDestroyDescriptorSetLayout-descriptorSetLayout-00285\"",
"semaphore-compatalloc": "\"VUID-vkDestroySemaphore-semaphore-01138\"",
"semaphore-nullalloc": "\"VUID-vkDestroySemaphore-semaphore-01139\"",
"queryPool-compatalloc": "\"VUID-vkDestroyQueryPool-queryPool-00794\"",
"queryPool-nullalloc": "\"VUID-vkDestroyQueryPool-queryPool-00795\"",
"bufferView-compatalloc": "\"VUID-vkDestroyBufferView-bufferView-00937\"",
"bufferView-nullalloc": "\"VUID-vkDestroyBufferView-bufferView-00938\"",
"surface-compatalloc": "\"VUID-vkDestroySurfaceKHR-surface-01267\"",
"surface-nullalloc": "\"VUID-vkDestroySurfaceKHR-surface-01268\"",
"framebuffer-compatalloc": "\"VUID-vkDestroyFramebuffer-framebuffer-00893\"",
"framebuffer-nullalloc": "\"VUID-vkDestroyFramebuffer-framebuffer-00894\"",
"VkGraphicsPipelineCreateInfo-basePipelineHandle": "\"VUID-VkGraphicsPipelineCreateInfo-flags-00722\"",
"VkComputePipelineCreateInfo-basePipelineHandle": "\"VUID-VkComputePipelineCreateInfo-flags-00697\"",
"VkRayTracingPipelineCreateInfoNV-basePipelineHandle": "\"VUID-VkRayTracingPipelineCreateInfoNV-flags-03421\"",
"VkRayTracingPipelineCreateInfoKHR-basePipelineHandle": "\"VUID-VkRayTracingPipelineCreateInfoKHR-flags-03421\"",
}
# Commands shadowed by interface functions and are not implemented
self.interface_functions = [
]
self.headerVersion = None
# Internal state - accumulators for different inner block text
self.sections = dict([(section, []) for section in self.ALL_SECTIONS])
self.cmd_list = [] # list of commands processed to maintain ordering
self.cmd_info_dict = {} # Per entry-point data for code generation and validation
self.structMembers = [] # List of StructMemberData records for all Vulkan structs
self.extension_structs = [] # List of all structs or sister-structs containing handles
# A sister-struct may contain no handles but shares <validextensionstructs> with one that does
self.structTypes = dict() # Map of Vulkan struct typename to required VkStructureType
self.struct_member_dict = dict()
# Named tuples to store struct and command data
self.StructType = namedtuple('StructType', ['name', 'value'])
self.CmdInfoData = namedtuple('CmdInfoData', ['name', 'cmdinfo', 'members', 'extra_protect', 'alias', 'iscreate', 'isdestroy', 'allocator'])
self.CommandParam = namedtuple('CommandParam', ['type', 'name', 'isconst', 'isoptional', 'iscount', 'iscreate', 'len', 'extstructs', 'cdecl', 'islocal'])
self.StructMemberData = namedtuple('StructMemberData', ['name', 'members'])
self.object_types = [] # List of all handle types
self.valid_vuids = set() # Set of all valid VUIDs
self.vuid_dict = dict() # VUID dictionary (from JSON)
#
# Check if the parameter passed in is optional
def paramIsOptional(self, param):
# See if the handle is optional
isoptional = False
# Simple, if it's optional, return true
optString = param.attrib.get('optional')
if optString:
if optString == 'true':
isoptional = True
elif ',' in optString:
opts = []
for opt in optString.split(','):
val = opt.strip()
if val == 'true':
opts.append(True)
elif val == 'false':
opts.append(False)
else:
print('Unrecognized len attribute value',val)
isoptional = opts
if not isoptional:
# Matching logic in parameter validation and ValidityOutputGenerator.isHandleOptional
optString = param.attrib.get('noautovalidity')
if optString and optString == 'true':
if param.attrib.get('len'):
isoptional = [True, True]
else:
isoptional = True
return isoptional
#
# Get VUID identifier from implicit VUID tag
def GetVuid(self, parent, suffix):
vuid_string = 'VUID-%s-%s' % (parent, suffix)
vuid = "kVUIDUndefined"
if '->' in vuid_string:
return vuid
if vuid_string in self.valid_vuids:
vuid = "\"%s\"" % vuid_string
else:
alias = self.cmd_info_dict[parent].alias if parent in self.cmd_info_dict else None
if alias:
alias_string = 'VUID-%s-%s' % (alias, suffix)
if alias_string in self.valid_vuids:
vuid = "\"%s\"" % alias_string
return vuid
#
# Increases indent by 4 spaces and tracks it globally
def incIndent(self, indent):
inc = ' ' * self.INDENT_SPACES
if indent:
return indent + inc
return inc
#
# Decreases indent by 4 spaces and tracks it globally
def decIndent(self, indent):
if indent and (len(indent) > self.INDENT_SPACES):
return indent[:-self.INDENT_SPACES]
return ''
#
# Override makeProtoName to drop the "vk" prefix
def makeProtoName(self, name, tail):
return self.genOpts.apientry + name[2:] + tail
#
# Check if the parameter passed in is a pointer to an array
def paramIsArray(self, param):
return param.attrib.get('len') is not None
#
# Generate the object tracker undestroyed object validation function
def GenReportFunc(self):
output_func = ''
for objtype in ['instance', 'device']:
upper_objtype = objtype.capitalize();
output_func += 'bool ObjectLifetimes::ReportUndestroyed%sObjects(Vk%s %s, const std::string& error_code) const {\n' % (upper_objtype, upper_objtype, objtype)
output_func += ' bool skip = false;\n'
if objtype == 'device':
output_func += ' skip |= ReportLeaked%sObjects(%s, kVulkanObjectTypeCommandBuffer, error_code);\n' % (upper_objtype, objtype)
for handle in self.object_types:
if self.handle_types.IsNonDispatchable(handle) and not self.is_aliased_type[handle]:
if (objtype == 'device' and self.handle_parents.IsParentDevice(handle)) or (objtype == 'instance' and not self.handle_parents.IsParentDevice(handle)):
output_func += ' skip |= ReportLeaked%sObjects(%s, %s, error_code);\n' % (upper_objtype, objtype, self.GetVulkanObjType(handle))
output_func += ' return skip;\n'
output_func += '}\n'
return output_func
#
# Generate the object tracker undestroyed object destruction function
def GenDestroyFunc(self):
output_func = ''
for objtype in ['instance', 'device']:
upper_objtype = objtype.capitalize();
output_func += 'void ObjectLifetimes::DestroyLeaked%sObjects() {\n' % upper_objtype
if objtype == 'device':
output_func += ' DestroyUndestroyedObjects(kVulkanObjectTypeCommandBuffer);\n'
for handle in self.object_types:
if self.handle_types.IsNonDispatchable(handle) and not self.is_aliased_type[handle]:
if (objtype == 'device' and self.handle_parents.IsParentDevice(handle)) or (objtype == 'instance' and not self.handle_parents.IsParentDevice(handle)):
output_func += ' DestroyUndestroyedObjects(%s);\n' % self.GetVulkanObjType(handle)
output_func += '}\n'
return output_func
#
# Walk the JSON-derived dict and find all "vuid" key values
def ExtractVUIDs(self, d):
if hasattr(d, 'items'):
for k, v in d.items():
if k == "vuid":
yield v
elif isinstance(v, dict):
for s in self.ExtractVUIDs(v):
yield s
elif isinstance (v, list):
for l in v:
for s in self.ExtractVUIDs(l):
yield s
#
# Separate content for validation source and header files
def otwrite(self, dest, formatstring):
if 'object_tracker.h' in self.genOpts.filename and (dest == 'hdr' or dest == 'both'):
write(formatstring, file=self.outFile)
elif 'object_tracker.cpp' in self.genOpts.filename and (dest == 'cpp' or dest == 'both'):
write(formatstring, file=self.outFile)
#
# Called at beginning of processing as file is opened
def beginFile(self, genOpts):
OutputGenerator.beginFile(self, genOpts)
# Initialize members that require the tree
self.handle_types = GetHandleTypes(self.registry.tree)
self.handle_parents = GetHandleParents(self.registry.tree)
self.type_categories = GetTypeCategories(self.registry.tree)
self.is_aliased_type = GetHandleAliased(self.registry.tree)
header_file = (genOpts.filename == 'object_tracker.h')
source_file = (genOpts.filename == 'object_tracker.cpp')
if not header_file and not source_file:
print("Error: Output Filenames have changed, update generator source.\n")
sys.exit(1)
self.valid_usage_path = genOpts.valid_usage_path
vu_json_filename = os.path.join(self.valid_usage_path + os.sep, 'validusage.json')
if os.path.isfile(vu_json_filename):
json_file = open(vu_json_filename, 'r', encoding='utf-8')
self.vuid_dict = json.load(json_file)
json_file.close()
if len(self.vuid_dict) == 0:
print("Error: Could not find, or error loading %s/validusage.json\n", vu_json_filename)
sys.exit(1)
# Build a set of all vuid text strings found in validusage.json
for json_vuid_string in self.ExtractVUIDs(self.vuid_dict):
self.valid_vuids.add(json_vuid_string)
# File Comment
file_comment = '// *** THIS FILE IS GENERATED - DO NOT EDIT ***\n'
file_comment += '// See object_tracker_generator.py for modifications\n'
self.otwrite('both', file_comment)
# Copyright Statement
copyright = ''
copyright += '\n'
copyright += '/***************************************************************************\n'
copyright += ' *\n'
copyright += ' * Copyright (c) 2015-2020 The Khronos Group Inc.\n'
copyright += ' * Copyright (c) 2015-2020 Valve Corporation\n'
copyright += ' * Copyright (c) 2015-2020 LunarG, Inc.\n'
copyright += ' * Copyright (c) 2015-2020 Google Inc.\n'
copyright += ' *\n'
copyright += ' * Licensed under the Apache License, Version 2.0 (the "License");\n'
copyright += ' * you may not use this file except in compliance with the License.\n'
copyright += ' * You may obtain a copy of the License at\n'
copyright += ' *\n'
copyright += ' * http://www.apache.org/licenses/LICENSE-2.0\n'
copyright += ' *\n'
copyright += ' * Unless required by applicable law or agreed to in writing, software\n'
copyright += ' * distributed under the License is distributed on an "AS IS" BASIS,\n'
copyright += ' * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n'
copyright += ' * See the License for the specific language governing permissions and\n'
copyright += ' * limitations under the License.\n'
copyright += ' *\n'
copyright += ' * Author: Mark Lobodzinski <mark@lunarg.com>\n'
copyright += ' * Author: Dave Houlton <daveh@lunarg.com>\n'
copyright += ' *\n'
copyright += ' ****************************************************************************/\n'
self.otwrite('both', copyright)
self.newline()
self.otwrite('cpp', '#include "chassis.h"')
self.otwrite('cpp', '#include "object_lifetime_validation.h"')
#
# Now that the data is all collected and complete, generate and output the object validation routines
def endFile(self):
self.struct_member_dict = dict(self.structMembers)
# Generate the list of APIs that might need to handle wrapped extension structs
# self.GenerateCommandWrapExtensionList()
self.WrapCommands()
# Build undestroyed objects reporting function
report_func = self.GenReportFunc()
self.newline()
# Build undestroyed objects destruction function
destroy_func = self.GenDestroyFunc()
self.otwrite('cpp', '\n')
self.otwrite('cpp', '// ObjectTracker undestroyed objects validation function')
self.otwrite('cpp', '%s' % report_func)
self.otwrite('cpp', '%s' % destroy_func)
# Actually write the interface to the output file.
if (self.emit):
self.newline()
if self.featureExtraProtect is not None:
prot = '#ifdef %s' % self.featureExtraProtect
self.otwrite('both', '%s' % prot)
# Write the object_tracker code to the file
if self.sections['command']:
source = ('\n'.join(self.sections['command']))
self.otwrite('both', '%s' % source)
if (self.featureExtraProtect is not None):
prot = '\n#endif // %s', self.featureExtraProtect
self.otwrite('both', prot)
else:
self.otwrite('both', '\n')
self.otwrite('hdr', 'void PostCallRecordDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator);')
self.otwrite('hdr', 'void PreCallRecordResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags);')
self.otwrite('hdr', 'void PostCallRecordGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount, VkQueueFamilyProperties *pQueueFamilyProperties);')
self.otwrite('hdr', 'void PreCallRecordFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers);')
self.otwrite('hdr', 'void PreCallRecordFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet *pDescriptorSets);')
self.otwrite('hdr', 'void PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount, VkQueueFamilyProperties2KHR *pQueueFamilyProperties);')
self.otwrite('hdr', 'void PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount, VkQueueFamilyProperties2KHR *pQueueFamilyProperties);')
self.otwrite('hdr', 'void PostCallRecordGetPhysicalDeviceDisplayPropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount, VkDisplayPropertiesKHR *pProperties, VkResult result);')
self.otwrite('hdr', 'void PostCallRecordGetDisplayModePropertiesKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t *pPropertyCount, VkDisplayModePropertiesKHR *pProperties, VkResult result);')
self.otwrite('hdr', 'void PostCallRecordGetPhysicalDeviceDisplayProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount, VkDisplayProperties2KHR *pProperties, VkResult result);')
self.otwrite('hdr', 'void PostCallRecordGetDisplayModeProperties2KHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t *pPropertyCount, VkDisplayModeProperties2KHR *pProperties, VkResult result);')
OutputGenerator.endFile(self)
#
# Processing point at beginning of each extension definition
def beginFeature(self, interface, emit):
# Start processing in superclass
OutputGenerator.beginFeature(self, interface, emit)
self.headerVersion = None
self.featureExtraProtect = GetFeatureProtect(interface)
if interface.tag == 'extension':
white_list_entry = []
if (self.featureExtraProtect is not None):
white_list_entry += [ '#ifdef %s' % self.featureExtraProtect ]
white_list_entry += [ '"%s"' % self.featureName ]
if (self.featureExtraProtect is not None):
white_list_entry += [ '#endif' ]
featureType = interface.get('type')
if featureType == 'instance':
self.instance_extensions += white_list_entry
elif featureType == 'device':
self.device_extensions += white_list_entry
#
# Processing point at end of each extension definition
def endFeature(self):
# Finish processing in superclass
OutputGenerator.endFeature(self)
#
# Process enums, structs, etc.
def genType(self, typeinfo, name, alias):
OutputGenerator.genType(self, typeinfo, name, alias)
typeElem = typeinfo.elem
# If the type is a struct type, traverse the imbedded <member> tags generating a structure.
# Otherwise, emit the tag text.
category = typeElem.get('category')
if (category == 'struct' or category == 'union'):
self.genStruct(typeinfo, name, alias)
if category == 'handle':
self.object_types.append(name)
#
# Append a definition to the specified section
def appendSection(self, section, text):
# self.sections[section].append('SECTION: ' + section + '\n')
self.sections[section].append(text)
#
# Check if the parameter passed in is a pointer
def paramIsPointer(self, param):
ispointer = False
for elem in param:
if elem.tag == 'type' and elem.tail is not None and '*' in elem.tail:
ispointer = True
return ispointer
#
# Retrieve the type and name for a parameter
def getTypeNameTuple(self, param):
type = ''
name = ''
for elem in param:
if elem.tag == 'type':
type = noneStr(elem.text)
elif elem.tag == 'name':
name = noneStr(elem.text)
return (type, name)
#
# Retrieve the value of the len tag
def getLen(self, param):
result = None
len = param.attrib.get('len')
if len and len != 'null-terminated':
# For string arrays, 'len' can look like 'count,null-terminated', indicating that we
# have a null terminated array of strings. We strip the null-terminated from the
# 'len' field and only return the parameter specifying the string count
if 'null-terminated' in len:
result = len.split(',')[0]
else:
result = len
# Spec has now notation for len attributes, using :: instead of platform specific pointer symbol
result = str(result).replace('::', '->')
return result
#
# Generate a VkStructureType based on a structure typename
def genVkStructureType(self, typename):
# Add underscore between lowercase then uppercase
value = re.sub('([a-z0-9])([A-Z])', r'\1_\2', typename)
# Change to uppercase
value = value.upper()
# Add STRUCTURE_TYPE_
return re.sub('VK_', 'VK_STRUCTURE_TYPE_', value)
#
# Struct parameter check generation.
# This is a special case of the <type> tag where the contents are interpreted as a set of
# <member> tags instead of freeform C type declarations. The <member> tags are just like
# <param> tags - they are a declaration of a struct or union member. Only simple member
# declarations are supported (no nested structs etc.)
def genStruct(self, typeinfo, typeName, alias):
OutputGenerator.genStruct(self, typeinfo, typeName, alias)
members = typeinfo.elem.findall('.//member')
# Iterate over members once to get length parameters for arrays
lens = set()
for member in members:
len = self.getLen(member)
if len:
lens.add(len)
# Generate member info
membersInfo = []
for member in members:
# Get the member's type and name
info = self.getTypeNameTuple(member)
type = info[0]
name = info[1]
cdecl = self.makeCParamDecl(member, 0)
# Process VkStructureType
if type == 'VkStructureType':
# Extract the required struct type value from the comments
# embedded in the original text defining the 'typeinfo' element
rawXml = etree.tostring(typeinfo.elem).decode('ascii')
result = re.search(r'VK_STRUCTURE_TYPE_\w+', rawXml)
if result:
value = result.group(0)
else:
value = self.genVkStructureType(typeName)
# Store the required type value
self.structTypes[typeName] = self.StructType(name=name, value=value)
# Store pointer/array/string info
extstructs = member.attrib.get('validextensionstructs') if name == 'pNext' else None
membersInfo.append(self.CommandParam(type=type,
name=name,
isconst=True if 'const' in cdecl else False,
isoptional=self.paramIsOptional(member),
iscount=True if name in lens else False,
len=self.getLen(member),
extstructs=extstructs,
cdecl=cdecl,
islocal=False,
iscreate=False))
self.structMembers.append(self.StructMemberData(name=typeName, members=membersInfo))
#
# Insert a lock_guard line
def lock_guard(self, indent):
return '%sstd::lock_guard<std::mutex> lock(global_lock);\n' % indent
#
# Determine if a struct has an object as a member or an embedded member
def struct_contains_object(self, struct_item):
struct_member_dict = dict(self.structMembers)
struct_members = struct_member_dict[struct_item]
for member in struct_members:
if member.type in self.handle_types:
return True
# recurse for member structs, guard against infinite recursion
elif member.type in struct_member_dict and member.type != struct_item:
if self.struct_contains_object(member.type):
return True
return False
#
# Return list of struct members which contain, or whose sub-structures contain an obj in a given list of parameters or members
def getParmeterStructsWithObjects(self, item_list):
struct_list = set()
for item in item_list:
paramtype = item.find('type')
typecategory = self.type_categories[paramtype.text]
if typecategory == 'struct':
if self.struct_contains_object(paramtype.text) == True:
struct_list.add(item)
return struct_list
#
# Return list of objects from a given list of parameters or members
def getObjectsInParameterList(self, item_list, create_func):
object_list = set()
if create_func == True:
member_list = item_list[0:-1]
else:
member_list = item_list
for item in member_list:
if paramtype.text in self.handle_types:
object_list.add(item)
return object_list
#
# Construct list of extension structs containing handles, or extension structs that share a <validextensionstructs>
# tag WITH an extension struct containing handles.
def GenerateCommandWrapExtensionList(self):
for struct in self.structMembers:
if (len(struct.members) > 1) and struct.members[1].extstructs is not None:
found = False;
for item in struct.members[1].extstructs.split(','):
if item != '' and self.struct_contains_object(item) == True:
found = True
if found == True:
for item in struct.members[1].extstructs.split(','):
if item != '' and item not in self.extension_structs:
self.extension_structs.append(item)
#
# Returns True if a struct may have a pNext chain containing an object
def StructWithExtensions(self, struct_type):
if struct_type in self.struct_member_dict:
param_info = self.struct_member_dict[struct_type]
if (len(param_info) > 1) and param_info[1].extstructs is not None:
for item in param_info[1].extstructs.split(','):
if item in self.extension_structs:
return True
return False
#
# Generate VulkanObjectType from object type
def GetVulkanObjType(self, type):
return 'kVulkanObjectType%s' % type[2:]
#
# Return correct dispatch table type -- instance or device
def GetDispType(self, type):
return 'instance' if type in ['VkInstance', 'VkPhysicalDevice'] else 'device'
#
# Generate source for creating a Vulkan object
def generate_create_object_code(self, indent, proto, params, cmd_info, allocator):
create_obj_code = ''
handle_type = params[-1].find('type')
is_create_pipelines = False
if handle_type.text in self.handle_types:
# Check for special case where multiple handles are returned
object_array = False
if cmd_info[-1].len is not None:
object_array = True;
handle_name = params[-1].find('name')
object_dest = '*%s' % handle_name.text
if object_array == True:
if 'CreateGraphicsPipelines' in proto.text or 'CreateComputePipelines' in proto.text or 'CreateRayTracingPipelines' in proto.text:
is_create_pipelines = True
create_obj_code += '%sif (VK_ERROR_VALIDATION_FAILED_EXT == result) return;\n' % indent
create_obj_code += '%sif (%s) {\n' % (indent, handle_name.text)
indent = self.incIndent(indent)
countispointer = ''
if 'uint32_t*' in cmd_info[-2].cdecl:
countispointer = '*'
create_obj_code += '%sfor (uint32_t index = 0; index < %s%s; index++) {\n' % (indent, countispointer, cmd_info[-1].len)
indent = self.incIndent(indent)
object_dest = '%s[index]' % cmd_info[-1].name
dispobj = params[0].find('type').text
if is_create_pipelines:
create_obj_code += '%sif (!pPipelines[index]) continue;\n' % indent
create_obj_code += '%sCreateObject(%s, %s, %s);\n' % (indent, object_dest, self.GetVulkanObjType(cmd_info[-1].type), allocator)
if object_array == True:
indent = self.decIndent(indent)
create_obj_code += '%s}\n' % indent
indent = self.decIndent(indent)
create_obj_code += '%s}\n' % indent
indent = self.decIndent(indent)
return create_obj_code
#
# Generate source for destroying a non-dispatchable object
def generate_destroy_object_code(self, indent, proto, cmd_info):
validate_code = ''
record_code = ''
object_array = False
if True in [destroy_txt in proto.text for destroy_txt in ['Destroy', 'Free']]:
# Check for special case where multiple handles are returned
if cmd_info[-1].len is not None:
object_array = True;
param = -1
else:
param = -2
compatalloc_vuid_string = '%s-compatalloc' % cmd_info[param].name
nullalloc_vuid_string = '%s-nullalloc' % cmd_info[param].name
compatalloc_vuid = self.manual_vuids.get(compatalloc_vuid_string, "kVUIDUndefined")
nullalloc_vuid = self.manual_vuids.get(nullalloc_vuid_string, "kVUIDUndefined")
if cmd_info[param].type in self.handle_types:
if object_array == True:
# This API is freeing an array of handles -- add loop control
validate_code += 'HEY, NEED TO DESTROY AN ARRAY\n'
else:
dispobj = cmd_info[0].type
# Call Destroy a single time
validate_code += '%sskip |= ValidateDestroyObject(%s, %s, pAllocator, %s, %s);\n' % (indent, cmd_info[param].name, self.GetVulkanObjType(cmd_info[param].type), compatalloc_vuid, nullalloc_vuid)
record_code += '%sRecordDestroyObject(%s, %s);\n' % (indent, cmd_info[param].name, self.GetVulkanObjType(cmd_info[param].type))
return object_array, validate_code, record_code
#
# Output validation for a single object (obj_count is NULL) or a counted list of objects
def outputObjects(self, obj_type, obj_name, obj_count, prefix, index, indent, disp_name, parent_name, null_allowed, top_level):
pre_call_code = ''
param_suffix = '%s-parameter' % (obj_name)
parent_suffix = '%s-parent' % (obj_name)
param_vuid = self.GetVuid(parent_name, param_suffix)
parent_vuid = self.GetVuid(parent_name, parent_suffix)
# If no parent VUID for this member, look for a commonparent VUID
if parent_vuid == 'kVUIDUndefined':
parent_vuid = self.GetVuid(parent_name, 'commonparent')
if obj_count is not None:
pre_call_code += '%sif (%s%s) {\n' % (indent, prefix, obj_name)
indent = self.incIndent(indent)
pre_call_code += '%sfor (uint32_t %s = 0; %s < %s; ++%s) {\n' % (indent, index, index, obj_count, index)
indent = self.incIndent(indent)
pre_call_code += '%sskip |= ValidateObject(%s%s[%s], %s, %s, %s, %s);\n' % (indent, prefix, obj_name, index, self.GetVulkanObjType(obj_type), null_allowed, param_vuid, parent_vuid)
indent = self.decIndent(indent)
pre_call_code += '%s}\n' % indent
indent = self.decIndent(indent)
pre_call_code += '%s}\n' % indent
else:
bonus_indent = ''
if 'basePipelineHandle' in obj_name:
pre_call_code += '%sif ((%sflags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) && (%sbasePipelineIndex == -1))\n' % (indent, prefix, prefix)
bonus_indent = ' '
null_allowed = 'false'
manual_vuid_index = parent_name + '-' + obj_name
param_vuid = self.manual_vuids.get(manual_vuid_index, "kVUIDUndefined")
pre_call_code += '%s%sskip |= ValidateObject(%s%s, %s, %s, %s, %s);\n' % (bonus_indent, indent, prefix, obj_name, self.GetVulkanObjType(obj_type), null_allowed, param_vuid, parent_vuid)
return pre_call_code
#
# first_level_param indicates if elements are passed directly into the function else they're below a ptr/struct
def validate_objects(self, members, indent, prefix, array_index, disp_name, parent_name, first_level_param):
pre_code = ''
index = 'index%s' % str(array_index)
array_index += 1
# Process any objects in this structure and recurse for any sub-structs in this struct
for member in members:
# Handle objects
if member.iscreate and first_level_param and member == members[-1]:
continue
if member.type in self.handle_types:
if member.len:
count_name = '%s%s' % (prefix, member.len)
# isoptional may be a list for array types: [the array, the array elements]
if type(member.isoptional) == list:
null_allowed = member.isoptional[1]
else:
# Default to false if a value is not provided for the array elements
null_allowed = False
else:
count_name = None
null_allowed = member.isoptional
tmp_pre = self.outputObjects(member.type, member.name, count_name, prefix, index, indent, disp_name, parent_name, str(null_allowed).lower(), first_level_param)
pre_code += tmp_pre
# Handle Structs that contain objects at some level
elif member.type in self.struct_member_dict:
# Structs at first level will have an object
if self.struct_contains_object(member.type) == True:
struct_info = self.struct_member_dict[member.type]
# TODO (jbolz): Can this use paramIsPointer?
ispointer = '*' in member.cdecl;
# Struct Array
if member.len is not None:
# Update struct prefix
new_prefix = '%s%s' % (prefix, member.name)
pre_code += '%sif (%s%s) {\n' % (indent, prefix, member.name)
indent = self.incIndent(indent)
pre_code += '%sfor (uint32_t %s = 0; %s < %s%s; ++%s) {\n' % (indent, index, index, prefix, member.len, index)
indent = self.incIndent(indent)
local_prefix = '%s[%s].' % (new_prefix, index)
# Process sub-structs in this struct
tmp_pre = self.validate_objects(struct_info, indent, local_prefix, array_index, disp_name, member.type, False)
pre_code += tmp_pre
indent = self.decIndent(indent)
pre_code += '%s}\n' % indent
indent = self.decIndent(indent)
pre_code += '%s}\n' % indent
# Single Struct Pointer
elif ispointer:
# Update struct prefix
new_prefix = '%s%s->' % (prefix, member.name)
# Declare safe_VarType for struct
pre_code += '%sif (%s%s) {\n' % (indent, prefix, member.name)
indent = self.incIndent(indent)
# Process sub-structs in this struct
tmp_pre = self.validate_objects(struct_info, indent, new_prefix, array_index, disp_name, member.type, False)
pre_code += tmp_pre
indent = self.decIndent(indent)
pre_code += '%s}\n' % indent
# Single Nested Struct
else:
# Update struct prefix
new_prefix = '%s%s.' % (prefix, member.name)
# Process sub-structs
tmp_pre = self.validate_objects(struct_info, indent, new_prefix, array_index, disp_name, member.type, False)
pre_code += tmp_pre
return pre_code
#
# For a particular API, generate the object handling code
def generate_wrapping_code(self, cmd):
indent = ' '
pre_call_validate = ''
pre_call_record = ''
post_call_record = ''
destroy_array = False
validate_destroy_code = ''
record_destroy_code = ''
proto = cmd.find('proto/name')
params = cmd.findall('param')
if proto.text is not None:
cmddata = self.cmd_info_dict[proto.text]
cmd_info = cmddata.members
disp_name = cmd_info[0].name
# Handle object create operations if last parameter is created by this call
if cmddata.iscreate:
post_call_record += self.generate_create_object_code(indent, proto, params, cmd_info, cmddata.allocator)
# Handle object destroy operations
if cmddata.isdestroy:
(destroy_array, validate_destroy_code, record_destroy_code) = self.generate_destroy_object_code(indent, proto, cmd_info)
pre_call_record += record_destroy_code
pre_call_validate += self.validate_objects(cmd_info, indent, '', 0, disp_name, proto.text, True)
pre_call_validate += validate_destroy_code
return pre_call_validate, pre_call_record, post_call_record
#
# Capture command parameter info needed to create, destroy, and validate objects
def genCmd(self, cmdinfo, cmdname, alias):
# Add struct-member type information to command parameter information
OutputGenerator.genCmd(self, cmdinfo, cmdname, alias)
members = cmdinfo.elem.findall('.//param')
# Iterate over members once to get length parameters for arrays
lens = set()
for member in members:
length = self.getLen(member)
if length:
lens.add(length)
struct_member_dict = dict(self.structMembers)
# Set command invariant information needed at a per member level in validate...
is_create_command = any(filter(lambda pat: pat in cmdname, ('Create', 'Allocate', 'Enumerate', 'RegisterDeviceEvent', 'RegisterDisplayEvent')))
last_member_is_pointer = len(members) and self.paramIsPointer(members[-1])
iscreate = is_create_command or ('vkGet' in cmdname and last_member_is_pointer)
isdestroy = any([destroy_txt in cmdname for destroy_txt in ['Destroy', 'Free']])
# Generate member info
membersInfo = []
allocator = 'nullptr'
for member in members:
# Get type and name of member
info = self.getTypeNameTuple(member)
type = info[0]
name = info[1]
# Skip fake parameters
if type == '' or name == '':
continue
cdecl = self.makeCParamDecl(member, 0)
# Check for parameter name in lens set
iscount = True if name in lens else False
length = self.getLen(member)
isconst = True if 'const' in cdecl else False
# Mark param as local if it is an array of objects
islocal = False;
if type in self.handle_types:
if (length is not None) and (isconst == True):
islocal = True
# Or if it's a struct that contains an object
elif type in struct_member_dict:
if self.struct_contains_object(type) == True:
islocal = True
if type == 'VkAllocationCallbacks':
allocator = name
extstructs = member.attrib.get('validextensionstructs') if name == 'pNext' else None
membersInfo.append(self.CommandParam(type=type,
name=name,
isconst=isconst,
isoptional=self.paramIsOptional(member),
iscount=iscount,
len=length,
extstructs=extstructs,
cdecl=cdecl,
islocal=islocal,
iscreate=iscreate))
self.cmd_list.append(cmdname)
self.cmd_info_dict[cmdname] =self.CmdInfoData(name=cmdname, cmdinfo=cmdinfo, members=membersInfo, iscreate=iscreate, isdestroy=isdestroy, allocator=allocator, extra_protect=self.featureExtraProtect, alias=alias)
#
# Create code Create, Destroy, and validate Vulkan objects
def WrapCommands(self):
for cmdname in self.cmd_list:
cmddata = self.cmd_info_dict[cmdname]
cmdinfo = cmddata.cmdinfo
if cmdname in self.interface_functions:
continue
manual = False
if cmdname in self.no_autogen_list:
manual = True
# Generate object handling code
(pre_call_validate, pre_call_record, post_call_record) = self.generate_wrapping_code(cmdinfo.elem)
feature_extra_protect = cmddata.extra_protect
if (feature_extra_protect is not None):
self.appendSection('command', '')
self.appendSection('command', '#ifdef '+ feature_extra_protect)
self.prototypes += [ '#ifdef %s' % feature_extra_protect ]
# Add intercept to procmap
self.prototypes += [ ' {"%s", (void*)%s},' % (cmdname,cmdname[2:]) ]
decls = self.makeCDecls(cmdinfo.elem)
# Gather the parameter items
params = cmdinfo.elem.findall('param/name')
# Pull out the text for each of the parameters, separate them by commas in a list
paramstext = ', '.join([str(param.text) for param in params])
# Generate the API call template
fcn_call = cmdinfo.elem.attrib.get('name').replace('vk', 'TOKEN', 1) + '(' + paramstext + ');'
func_decl_template = decls[0][:-1].split('VKAPI_CALL ')
func_decl_template = func_decl_template[1]
result_type = cmdinfo.elem.find('proto/type')
if 'object_tracker.h' in self.genOpts.filename:
# Output PreCallValidateAPI prototype if necessary
if pre_call_validate:
pre_cv_func_decl = 'bool PreCallValidate' + func_decl_template + ' const;'
self.appendSection('command', pre_cv_func_decl)
# Output PreCallRecordAPI prototype if necessary
if pre_call_record:
pre_cr_func_decl = 'void PreCallRecord' + func_decl_template + ';'
self.appendSection('command', pre_cr_func_decl)
# Output PosCallRecordAPI prototype if necessary
if post_call_record:
post_cr_func_decl = 'void PostCallRecord' + func_decl_template + ';'
if result_type.text == 'VkResult':
post_cr_func_decl = post_cr_func_decl.replace(')', ',\n VkResult result)')
elif result_type.text == 'VkDeviceAddress':
post_cr_func_decl = post_cr_func_decl.replace(')', ',\n VkDeviceAddress result)')
self.appendSection('command', post_cr_func_decl)
if 'object_tracker.cpp' in self.genOpts.filename:
# Output PreCallValidateAPI function if necessary
if pre_call_validate and not manual:
pre_cv_func_decl = 'bool ObjectLifetimes::PreCallValidate' + func_decl_template + ' const {'
self.appendSection('command', '')
self.appendSection('command', pre_cv_func_decl)
self.appendSection('command', ' bool skip = false;')
self.appendSection('command', pre_call_validate)
self.appendSection('command', ' return skip;')
self.appendSection('command', '}')
# Output PreCallRecordAPI function if necessary
if pre_call_record and not manual:
pre_cr_func_decl = 'void ObjectLifetimes::PreCallRecord' + func_decl_template + ' {'
self.appendSection('command', '')
self.appendSection('command', pre_cr_func_decl)
self.appendSection('command', pre_call_record)
self.appendSection('command', '}')
# Output PosCallRecordAPI function if necessary
if post_call_record and not manual:
post_cr_func_decl = 'void ObjectLifetimes::PostCallRecord' + func_decl_template + ' {'
self.appendSection('command', '')
if result_type.text == 'VkResult':
post_cr_func_decl = post_cr_func_decl.replace(')', ',\n VkResult result)')
# The two createpipelines APIs may create on failure -- skip the success result check
if 'CreateGraphicsPipelines' not in cmdname and 'CreateComputePipelines' not in cmdname and 'CreateRayTracingPipelines' not in cmdname:
post_cr_func_decl = post_cr_func_decl.replace('{', '{\n if (result != VK_SUCCESS) return;')
elif result_type.text == 'VkDeviceAddress':
post_cr_func_decl = post_cr_func_decl.replace(')', ',\n VkDeviceAddress result)')
self.appendSection('command', post_cr_func_decl)
self.appendSection('command', post_call_record)
self.appendSection('command', '}')
if (feature_extra_protect is not None):
self.appendSection('command', '#endif // '+ feature_extra_protect)
self.prototypes += [ '#endif' ]
| 52.961064 | 219 | 0.604289 |
4a81a78194953478d3d410d6e13f6e215a0c029f | 4,430 | py | Python | _depreciated/Analyser and Visualisers/iris set reader.py | albert118/Data-Analytics | 31245940d08cf5bd07059f02242441d86d2a2ddc | [
"MIT"
] | 1 | 2020-02-10T10:21:24.000Z | 2020-02-10T10:21:24.000Z | _depreciated/Analyser and Visualisers/iris set reader.py | albert118/Data-Analytics | 31245940d08cf5bd07059f02242441d86d2a2ddc | [
"MIT"
] | 5 | 2021-06-05T00:11:16.000Z | 2022-03-12T00:57:54.000Z | _depreciated/Analyser and Visualisers/iris set reader.py | albert118/Data-Analytics | 31245940d08cf5bd07059f02242441d86d2a2ddc | [
"MIT"
] | null | null | null | # IRIS DATA SET TESTING
# Three copies of the same data set, one doesnt work due to line-delimiter error
# \r instead of \r\n
import csv
import matplotlib.pyplot as plt
import numpy as np
import math
################################################################################
# class def's
################################################################################
class Iris:
""" The Iris class object """
ID = 0
def __init__(self, sep_len, sep_wid, pet_len, pet_wid, species):
self.sepal_length = sep_len
self.sepal_width = sep_wid
self.petal_length = pet_len
self.petal_width = pet_wid
self.species = species
self.id = Iris.ID
Iris.ID += 1
def print_iris(self):
spec = "ID: [{id}] SPECIES: [{species}]".format(species=self.species, id=self.id)
length = "LENGTH:\n\tPETAL: [{pet}]\n\tSEPAL: [{sep}]".format(pet=self.petal_length, sep=self.sepal_length)
width = "WIDTH:\n\tPETAL: [{pet}]\n\tSEPAL: [{sep}]".format(pet=self.petal_width, sep=self.sepal_width)
iris = '----------------------\n' + spec + '\n' + length + '\n' + width + '\n----------------------'
print(iris)
return
################################################################################
# method def's
################################################################################
def get_data():
print("Filename:\t", file1)
name = 'C:\\Users\\Albert\\Desktop\\' + file1
with open(name, newline = '') as csvfile:
line_count = 0
delim = ','
iris_set = []
rdr = csv.reader(csvfile, delimiter = ' ', quotechar='|', dialect='excel')
for row in rdr:
if line_count == 0:
fields = delim.join(row)
fields = fields.split(delim)
print('FIELDS: ', fields)
line_count += 1
else:
atts = delim.join(row)
atts = atts.split(delim)
new_iris = Iris(atts[1], atts[2], atts[3], atts[4], atts[5])
iris_set.append(new_iris)
line_count += 1
return(iris_set)
def gen_histogram(inputs, **kwargs):
"""
Generate a histogram of the input list data.
Possible kwargs:
axis var name, index position
title val
"""
# possible kwargs: axis/subplots name, axs coord
arg_defaults = {
'facecolor' : 'g',
'alpha' : 0.5
}
# replace any parsed arg vals to override defaults
for key, val in kwargs.items():
if key in arg_defaults:
arg_defaults[key] = val
if 'axis' in kwargs.items():
# get the axis object
plots = axis['axis']
# now get the index of the subplot
plots[axis['row'], axis['col']].hist(inputs, **arg_defaults)
if "title" in kwargs.keys():
plots[axis['row'], axis['col']].set_title(kwargs['title'])
else:
plots[axis['row'], axis['col']].set_title("Probability Density Histogram of Data")
else:
plt.hist(inputs, **arg_defaults)
# now test for global settings of
if "title" in kwargs.keys():
plt.title(kwargs['title'])
else:
plt.title("Probability Density Histogram of Data")
# binning, partition into equi-depth bins
# smooth by bin means,
# replace every bin val with mean of bin
# or smooth by boundaries: leave lower and upper values (first and last)
# then replace all other values by closest boundary (diff of value - boundary)
# min-max, zscore, sigmoidal, etc... normalisations
# sigmoidal used when outliers exist (or nonlinear). Using sigmoidal avoids stepwise
# functions "weighting" outliers overly.
plt.show()
return
############################## DATA FILES
file1 = 'iris-d12.csv'
file2 = 'iris-m1.csv'
file3 = 'iris-u2.csv'
############################## DATA FILES
fig, axs = plt.subplots(2, 2)
# fig.suptitle("Iris Test Visualisation")
# Testing and returning the data for printing
data = get_data()
seplen = []
sepwid = []
petwid = []
petlen = []
for iris in data:
# iris.print_iris()
seplen.append(iris.sepal_length)
sepwid.append(iris.sepal_width)
petwid.append(iris.petal_width)
petlen.append(iris.petal_length)
# axs[0, 0].scatter(seplen, sepwid, c=(0,0,1))
# axs[0, 0].set_title('Scatter of Sepal Dimensions')
# axs[0, 1].scatter(petlen, petwid, c=(1,0,0))
# axs[0, 1].set_title('Scatter of Petal Dimensions')
# axs[1, 0].hist(seplen)
# axs[1, 0].set_title('Histogram of Sepal Lengths')
# axs[1, 1].hist(sepwid)
# axs[1, 1].set_title('Histogram of Sepal Widths')
# plt.show()
gen_histogram(seplen, title="Histogram of Sepal Lengths", axis={'axis':axs, 'row':0, 'col':0})
# TODO, add graphing and visualisation
# Scatters
# Frequency Curve(s)
# Data Cubes??
| 29.144737 | 109 | 0.610609 |
3606e2b9f838bd2f29c68a5f9254efb9c33020c5 | 822 | py | Python | autocorrect/__init__.py | brandonmpace/autocorrect | 4bef4a1607e7a54cf50cbe4047ab3dd58ebdc943 | [
"MIT"
] | null | null | null | autocorrect/__init__.py | brandonmpace/autocorrect | 4bef4a1607e7a54cf50cbe4047ab3dd58ebdc943 | [
"MIT"
] | null | null | null | autocorrect/__init__.py | brandonmpace/autocorrect | 4bef4a1607e7a54cf50cbe4047ab3dd58ebdc943 | [
"MIT"
] | null | null | null | # Python 3 Spelling Corrector
#
# Copyright 2014 Jonas McCallum.
# Updated for Python 3, based on Peter Norvig's
# 2007 version: http://norvig.com/spell-correct.html
#
# Open source, MIT license
# http://www.opensource.org/licenses/mit-license.php
"""
Spell function
Author: Jonas McCallum
https://github.com/foobarmus/autocorrect
"""
from autocorrect.nlp_parser import NLP_COUNTS
from autocorrect.word import Word, common, exact, known, get_case
def spell(word):
"""most likely correction for everything up to a double typo"""
w = Word(word)
candidates = (common([word]) or exact([word]) or known([word]) or
known(w.typos()) or common(w.double_typos()) or
[word])
correction = max(candidates, key=lambda item: NLP_COUNTS[item])
return get_case(word, correction)
| 30.444444 | 69 | 0.70073 |
f93ea58f1777fcc1d07a6eccd8a349350de3ed8d | 1,202 | py | Python | tests/parsers/test_parser_create_user.py | tableau/tabcmd | a6a44795b2568933505dfc8c443ee16542c9e4c0 | [
"MIT"
] | 3 | 2022-02-15T03:07:51.000Z | 2022-03-09T13:14:52.000Z | tests/parsers/test_parser_create_user.py | tableau/tabcmd | a6a44795b2568933505dfc8c443ee16542c9e4c0 | [
"MIT"
] | 57 | 2022-01-31T22:33:17.000Z | 2022-03-28T22:05:53.000Z | tests/parsers/test_parser_create_user.py | tableau/tabcmd | a6a44795b2568933505dfc8c443ee16542c9e4c0 | [
"MIT"
] | 2 | 2022-02-23T23:05:35.000Z | 2022-03-03T21:32:53.000Z | import unittest
from unittest import mock
from tabcmd.commands.user.create_users_command import CreateUsersCommand
from .common_setup import *
commandname = "createusers"
class CreateUsersTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser_under_test = initialize_test_pieces(commandname, CreateUsersCommand)
def test_create_users_parser_users_file(self):
with mock.patch("builtins.open", mock.mock_open(read_data="test")) as open_file:
mock_args = [commandname, "users.csv"]
args = self.parser_under_test.parse_args(mock_args)
open_file.assert_called_with("users.csv", "r", -1, "UTF-8", None)
def test_create_user_parser_missing_arguments(self):
mock_args = [commandname]
with self.assertRaises(SystemExit):
args = self.parser_under_test.parse_args(mock_args)
def test_create_user_parser_role(self):
with mock.patch("builtins.open", mock.mock_open(read_data="test")):
mock_args = [commandname, "users.csv", "-r", "SiteAdministrator"]
args = self.parser_under_test.parse_args(mock_args)
assert args.role == "SiteAdministrator", args
| 38.774194 | 88 | 0.710483 |
2f04de359c9dbc0b194227f136cb0d625b7c9101 | 4,222 | py | Python | conans/errors.py | datalogics-kam/conan | 7bf230cd5f8ef68eb804908777ebaad75e951b16 | [
"MIT"
] | 1 | 2021-06-14T01:39:27.000Z | 2021-06-14T01:39:27.000Z | conans/errors.py | datalogics-kam/conan | 7bf230cd5f8ef68eb804908777ebaad75e951b16 | [
"MIT"
] | 2 | 2018-02-22T21:28:04.000Z | 2018-09-28T13:51:47.000Z | conans/errors.py | datalogics-kam/conan | 7bf230cd5f8ef68eb804908777ebaad75e951b16 | [
"MIT"
] | 1 | 2021-06-03T23:08:43.000Z | 2021-06-03T23:08:43.000Z | """
Exceptions raised and handled in Conan server.
These exceptions are mapped between server (as an HTTP response) and client
through the REST API. When an error happens in server its translated to an HTTP
error code that its sent to client. Client reads the server code and raise the
matching exception.
see return_plugin.py
"""
from contextlib import contextmanager
from conans.util.env_reader import get_env
@contextmanager
def conanfile_exception_formatter(conanfile_name, func_name):
"""
Decorator to throw an exception formatted with the line of the conanfile where the error ocurrs.
:param reference: Reference of the conanfile
:return:
"""
try:
yield
except ConanInvalidConfiguration as exc:
msg = "{}: Invalid configuration: {}".format(conanfile_name, exc) # TODO: Move from here?
raise ConanInvalidConfiguration(msg)
except Exception as exc:
msg = _format_conanfile_exception(conanfile_name, func_name, exc)
raise ConanExceptionInUserConanfileMethod(msg)
def _format_conanfile_exception(scope, method, exception):
"""
It will iterate the traceback lines, when it finds that the source code is inside the users
conanfile it "start recording" the messages, when the trace exits the conanfile we return
the traces.
"""
import sys
import traceback
if get_env("CONAN_VERBOSE_TRACEBACK", False):
return traceback.format_exc()
try:
conanfile_reached = False
tb = sys.exc_info()[2]
index = 0
content_lines = []
while True: # If out of index will raise and will be captured later
filepath, line, name, contents = traceback.extract_tb(tb, 40)[index] # 40 levels of nested functions max, get the latest
if "conanfile.py" not in filepath: # Avoid show trace from internal conan source code
if conanfile_reached: # The error goes to internal code, exit print
break
else:
if not conanfile_reached: # First line
msg = "%s: Error in %s() method" % (scope, method)
msg += ", line %d\n\t%s" % (line, contents)
else:
msg = "while calling '%s', line %d\n\t%s" % (name, line, contents) if line else "\n\t%s" % contents
content_lines.append(msg)
conanfile_reached = True
index += 1
except:
pass
ret = "\n".join(content_lines)
ret += "\n\t%s: %s" % (exception.__class__.__name__, str(exception))
return ret
class ConanException(Exception):
"""
Generic conans exception
"""
def __init__(self, *args, **kwargs):
self.info = None
super(ConanException, self).__init__(*args, **kwargs)
class NoRemoteAvailable(ConanException):
""" No default remote configured or the specified remote do not exists
"""
pass
class InvalidNameException(ConanException):
pass
class ConanConnectionError(ConanException):
pass
class ConanOutdatedClient(ConanException):
pass
class ConanExceptionInUserConanfileMethod(ConanException):
pass
class ConanInvalidConfiguration(ConanExceptionInUserConanfileMethod):
pass
# Remote exceptions #
class InternalErrorException(ConanException):
"""
Generic 500 error
"""
pass
class RequestErrorException(ConanException):
"""
Generic 400 error
"""
pass
class AuthenticationException(ConanException): # 401
"""
401 error
"""
pass
class ForbiddenException(ConanException): # 403
"""
403 error
"""
pass
class NotFoundException(ConanException): # 404
"""
404 error
"""
pass
class UserInterfaceErrorException(RequestErrorException):
"""
420 error
"""
pass
EXCEPTION_CODE_MAPPING = {InternalErrorException: 500,
RequestErrorException: 400,
AuthenticationException: 401,
ForbiddenException: 403,
NotFoundException: 404,
UserInterfaceErrorException: 420}
| 27.415584 | 133 | 0.638797 |
57d4679381357016d3c76ed5d9a20b52adc41ab2 | 15,754 | py | Python | bels/id_utils.py | VertNet/bels | 5d3a2424e14d54e620fb45e05b4a9733996510cf | [
"Apache-2.0"
] | 6 | 2020-12-19T06:19:25.000Z | 2022-02-20T18:44:47.000Z | bels/id_utils.py | VertNet/bels | 5d3a2424e14d54e620fb45e05b4a9733996510cf | [
"Apache-2.0"
] | 26 | 2021-01-07T15:48:10.000Z | 2021-09-27T15:47:24.000Z | bels/id_utils.py | VertNet/bels | 5d3a2424e14d54e620fb45e05b4a9733996510cf | [
"Apache-2.0"
] | 1 | 2020-12-29T23:12:40.000Z | 2020-12-29T23:12:40.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "John Wieczorek"
__copyright__ = "Copyright 2021 Rauthiflor LLC"
__filename__ = "id_utils.py"
__version__ = __filename__ + ' ' + "2021-07-19T23:36-03:00"
from .dwca_terms import locationmatchwithcoordstermlist
from .dwca_terms import locationkeytermlist
from .dwca_vocab_utils import darwinize_dict
from .dwca_utils import lower_dict_keys
from decimal import *
import hashlib
import base64
import re
import unicodedata
def location_match_str(termlist, inputdict):
''' Constructs a string to use to match Darwin Core Locations. Fields not matching
Darwin Core term names are ignored, so it is best to Darwinize any field names
before constructing the matching string.
parameters:
termlist - list of the terms to use in the construction of the string
inputdict - the dict of fields from which to construct a Location-matching string.
returns:
str - the Location-matching string
'''
functionname = 'location_match_str()'
idstr = ''
# Make sure that the rounding strategy for Decimals rounds halves away from 0
# For example, 1.235 rounded to 2 places would be 1.24 and -1.235 rounded to 2 places
# would be -1.24. This matches the behavior of ROUND() in BigQuery.
getcontext().rounding = ROUND_HALF_UP
for term in termlist:
# print('term: %s inputdict[%s]: %s' % (term, term, inputdict[term]))
try:
if term=='decimallatitude' or term=='decimallongitude':
rawvalue = inputdict[term]
numericvalue = Decimal(rawvalue)
valuestr = str(numericvalue.quantize(Decimal('1.0000000'))).strip('0').strip('.')
# print('term: %s numericvalue: %s valuestr: %s' % (term, numericvalue, valuestr))
# In BigQuery the coordinates for matching are truncated versions using:
# SAFE_CAST(round(10000000*safe_cast(v_decimallatitude as NUMERIC))/10000000 AS STRING)
idstr += valuestr
else:
idstr += inputdict[term]
except:
pass
idstr += ' '
return idstr
def location_str(inputdict):
''' Constructs a string from Darwin Core Locations to use in the construction of an
identifier.
parameters:
inputdict - the dict of fields from which to construct a Location identifier
string.
returns:
str - the Location identifier string
'''
functionname = 'location_str()'
idstr = ''
for term in locationkeytermlist:
try:
idstr += term
if inputdict[term] is not None:
idstr += inputdict[term]
except:
pass
return idstr
def dwc_location_hash(inputdict, darwincloudfile):
''' Constructs a base64 str representation of the sha256 hash from a Darwin Core
Location dict.
parameters:
inputdict - the dict of fields from which to construct a Location identifier.
darwincloudfile - the vocabulary file for the Darwin Cloud (required)
returns:
locid - the base64 representation of the Location hash
'''
functionname = 'dwc_location_hash()'
darwindict = darwinize_dict(inputdict,darwincloudfile,namespace=True)
locstr=location_str(lower_dict_keys(darwindict))
hash = hashlib.sha256(locstr.encode('utf-8'))
return base64.b64encode(hash.digest()).decode('utf-8')
def super_simplify(idstr):
''' Prepares an input location string for matching
parameters:
inputstr - the location string to create a match string for
returns:
id - the location match id
'''
functionname = 'super_simplify()'
# In BigQuery, this is achieved with
# REGEXP_REPLACE(saveNumbers(NORMALIZE_AND_CASEFOLD(removeSymbols(simplifyDiacritics(for_match)),NFKC)),r"[\s]+",'')
sd = simplify_diacritics(idstr)
rs = remove_symbols(sd)
ncsd = casefold_and_normalize(rs)
sn = save_numbers(ncsd)
simplified = remove_whitespace(sn)
return simplified
def remove_symbols(inputstr):
''' Removes most punctuation and symbols. Does not remove . , / - or +, which can
contribute to the semantics of a locality description
parameters:
inputstr - the string to clean
returns:
cleaned - the cleaned string
'''
cleaned = re.sub(r'[’<>:‒–—―…!«»-‐?‘’“”;⁄␠·&@*•^¤¢$€£¥₩₪†‡°¡¿¬#№%‰‱¶′§~¨_|¦⁂☞∴‽※}{\\\]\[\"\)\(]+', '', inputstr)
return cleaned
def remove_whitespace(inputstr):
''' Removes whitespace
parameters:
inputstr - the string to clean
returns:
cleaned - the cleaned string
'''
cleaned = re.sub(r'[\s]+','',inputstr)
return cleaned
def save_numbers(inputstr):
''' Replace any of , . / - and +, except in the middle of digits, with space
parameters:
inputstr - the string to clean
returns:
cleaned - the cleaned string
'''
cleaned = re.sub(r'(?<!\d)[.,\-\/\+](?!\d)',' ',inputstr)
return cleaned
def simplify_diacritics(inputstr):
# TODO - amend interpretations of ß (see https://github.com/VertNet/bels/issues/2)
''' Changes unicode diacritics to ASCII "equivalents"
parameters:
inputstr - the string to simplify
returns:
str - a simplified string
'''
functionname = 'simplify_diacritics()'
str=inputstr
defaultDiacriticsRemovalMap = [
{'base':'A', 'letters':r'[\u0041\u24B6\uFF21\u00C0\u00C1\u00C2\u1EA6\u1EA4\u1EAA\u1EA8\u00C3\u0100\u0102\u1EB0\u1EAE\u1EB4\u1EB2\u0226\u01E0\u00C4\u01DE\u1EA2\u00C5\u01FA\u01CD\u0200\u0202\u1EA0\u1EAC\u1EB6\u1E00\u0104\u023A\u2C6F]'},
{'base':'AA','letters':r'[\uA732]'},
{'base':'AE','letters':r'[\u00C6\u01FC\u01E2]'},
{'base':'AO','letters':r'[\uA734]'},
{'base':'AU','letters':r'[\uA736]'},
{'base':'AV','letters':r'[\uA738\uA73A]'},
{'base':'AY','letters':r'[\uA73C]'},
{'base':'B', 'letters':r'[\u0042\u24B7\uFF22\u1E02\u1E04\u1E06\u0243\u0182\u0181]'},
{'base':'C', 'letters':r'[\u0043\u24B8\uFF23\u0106\u0108\u010A\u010C\u00C7\u1E08\u0187\u023B\uA73E]'},
{'base':'D', 'letters':r'[\u0044\u24B9\uFF24\u1E0A\u010E\u1E0C\u1E10\u1E12\u1E0E\u0110\u018B\u018A\u0189\uA779]'},
{'base':'DZ','letters':r'[\u01F1\u01C4]'},
{'base':'Dz','letters':r'[\u01F2\u01C5]'},
{'base':'E', 'letters':r'[\u0045\u24BA\uFF25\u00C8\u00C9\u00CA\u1EC0\u1EBE\u1EC4\u1EC2\u1EBC\u0112\u1E14\u1E16\u0114\u0116\u00CB\u1EBA\u011A\u0204\u0206\u1EB8\u1EC6\u0228\u1E1C\u0118\u1E18\u1E1A\u0190\u018E]'},
{'base':'F', 'letters':r'[\u0046\u24BB\uFF26\u1E1E\u0191\uA77B]'},
{'base':'G', 'letters':r'[\u0047\u24BC\uFF27\u01F4\u011C\u1E20\u011E\u0120\u01E6\u0122\u01E4\u0193\uA7A0\uA77D\uA77E]'},
{'base':'H', 'letters':r'[\u0048\u24BD\uFF28\u0124\u1E22\u1E26\u021E\u1E24\u1E28\u1E2A\u0126\u2C67\u2C75\uA78D]'},
{'base':'I', 'letters':r'[\u0049\u24BE\uFF29\u00CC\u00CD\u00CE\u0128\u012A\u012C\u0130\u00CF\u1E2E\u1EC8\u01CF\u0208\u020A\u1ECA\u012E\u1E2C\u0197]'},
{'base':'J', 'letters':r'[\u004A\u24BF\uFF2A\u0134\u0248]'},
{'base':'K', 'letters':r'[\u004B\u24C0\uFF2B\u1E30\u01E8\u1E32\u0136\u1E34\u0198\u2C69\uA740\uA742\uA744\uA7A2]'},
{'base':'L', 'letters':r'[\u004C\u24C1\uFF2C\u013F\u0139\u013D\u1E36\u1E38\u013B\u1E3C\u1E3A\u0141\u023D\u2C62\u2C60\uA748\uA746\uA780]'},
{'base':'LJ','letters':r'[\u01C7]'},
{'base':'Lj','letters':r'[\u01C8]'},
{'base':'M', 'letters':r'[\u004D\u24C2\uFF2D\u1E3E\u1E40\u1E42\u2C6E\u019C]'},
{'base':'N', 'letters':r'[\u004E\u24C3\uFF2E\u01F8\u0143\u00D1\u1E44\u0147\u1E46\u0145\u1E4A\u1E48\u0220\u019D\uA790\uA7A4]'},
{'base':'NJ','letters':r'[\u01CA]'},
{'base':'Nj','letters':r'[\u01CB]'},
{'base':'O', 'letters':r'[\u004F\u24C4\uFF2F\u00D2\u00D3\u00D4\u1ED2\u1ED0\u1ED6\u1ED4\u00D5\u1E4C\u022C\u1E4E\u014C\u1E50\u1E52\u014E\u022E\u0230\u00D6\u022A\u1ECE\u0150\u01D1\u020C\u020E\u01A0\u1EDC\u1EDA\u1EE0\u1EDE\u1EE2\u1ECC\u1ED8\u01EA\u01EC\u00D8\u01FE\u0186\u019F\uA74A\uA74C]'},
{'base':'OI','letters':r'[\u01A2]'},
{'base':'OO','letters':r'[\uA74E]'},
{'base':'OU','letters':r'[\u0222]'},
{'base':'P', 'letters':r'[\u0050\u24C5\uFF30\u1E54\u1E56\u01A4\u2C63\uA750\uA752\uA754]'},
{'base':'Q', 'letters':r'[\u0051\u24C6\uFF31\uA756\uA758\u024A]'},
{'base':'R', 'letters':r'[\u0052\u24C7\uFF32\u0154\u1E58\u0158\u0210\u0212\u1E5A\u1E5C\u0156\u1E5E\u024C\u2C64\uA75A\uA7A6\uA782]'},
{'base':'S', 'letters':r'[\u0053\u24C8\uFF33\u1E9E\u015A\u1E64\u015C\u1E60\u0160\u1E66\u1E62\u1E68\u0218\u015E\u2C7E\uA7A8\uA784]'},
{'base':'T', 'letters':r'[\u0054\u24C9\uFF34\u1E6A\u0164\u1E6C\u021A\u0162\u1E70\u1E6E\u0166\u01AC\u01AE\u023E\uA786]'},
{'base':'TZ','letters':r'[\uA728]'},
{'base':'U', 'letters':r'[\u0055\u24CA\uFF35\u00D9\u00DA\u00DB\u0168\u1E78\u016A\u1E7A\u016C\u00DC\u01DB\u01D7\u01D5\u01D9\u1EE6\u016E\u0170\u01D3\u0214\u0216\u01AF\u1EEA\u1EE8\u1EEE\u1EEC\u1EF0\u1EE4\u1E72\u0172\u1E76\u1E74\u0244]'},
{'base':'V', 'letters':r'[\u0056\u24CB\uFF36\u1E7C\u1E7E\u01B2\uA75E\u0245]'},
{'base':'VY','letters':r'[\uA760]'},
{'base':'W', 'letters':r'[\u0057\u24CC\uFF37\u1E80\u1E82\u0174\u1E86\u1E84\u1E88\u2C72]'},
{'base':'X', 'letters':r'[\u0058\u24CD\uFF38\u1E8A\u1E8C]'},
{'base':'Y', 'letters':r'[\u0059\u24CE\uFF39\u1EF2\u00DD\u0176\u1EF8\u0232\u1E8E\u0178\u1EF6\u1EF4\u01B3\u024E\u1EFE]'},
{'base':'Z', 'letters':r'[\u005A\u24CF\uFF3A\u0179\u1E90\u017B\u017D\u1E92\u1E94\u01B5\u0224\u2C7F\u2C6B\uA762]'},
{'base':'a', 'letters':r'[\u0061\u24D0\uFF41\u1E9A\u00E0\u00E1\u00E2\u1EA7\u1EA5\u1EAB\u1EA9\u00E3\u0101\u0103\u1EB1\u1EAF\u1EB5\u1EB3\u0227\u01E1\u00E4\u01DF\u1EA3\u00E5\u01FB\u01CE\u0201\u0203\u1EA1\u1EAD\u1EB7\u1E01\u0105\u2C65\u0250]'},
{'base':'aa','letters':r'[\uA733]'},
{'base':'ae','letters':r'[\u00E6\u01FD\u01E3]'},
{'base':'ao','letters':r'[\uA735]'},
{'base':'au','letters':r'[\uA737]'},
{'base':'av','letters':r'[\uA739\uA73B]'},
{'base':'ay','letters':r'[\uA73D]'},
{'base':'b', 'letters':r'[\u0062\u24D1\uFF42\u1E03\u1E05\u1E07\u0180\u0183\u0253]'},
{'base':'c', 'letters':r'[\u0063\u24D2\uFF43\u0107\u0109\u010B\u010D\u00E7\u1E09\u0188\u023C\uA73F\u2184]'},
{'base':'d', 'letters':r'[\u0064\u24D3\uFF44\u1E0B\u010F\u1E0D\u1E11\u1E13\u1E0F\u0111\u018C\u0256\u0257\uA77A]'},
{'base':'dz','letters':r'[\u01F3\u01C6]'},
{'base':'e', 'letters':r'[\u0065\u24D4\uFF45\u00E8\u00E9\u00EA\u1EC1\u1EBF\u1EC5\u1EC3\u1EBD\u0113\u1E15\u1E17\u0115\u0117\u00EB\u1EBB\u011B\u0205\u0207\u1EB9\u1EC7\u0229\u1E1D\u0119\u1E19\u1E1B\u0247\u025B\u01DD]'},
{'base':'f', 'letters':r'[\u0066\u24D5\uFF46\u1E1F\u0192\uA77C]'},
{'base':'g', 'letters':r'[\u0067\u24D6\uFF47\u01F5\u011D\u1E21\u011F\u0121\u01E7\u0123\u01E5\u0260\uA7A1\u1D79\uA77F]'},
{'base':'h', 'letters':r'[\u0068\u24D7\uFF48\u0125\u1E23\u1E27\u021F\u1E25\u1E29\u1E2B\u1E96\u0127\u2C68\u2C76\u0265]'},
{'base':'hv','letters':r'[\u0195]'},
{'base':'i', 'letters':r'[\u0069\u24D8\uFF49\u00EC\u00ED\u00EE\u0129\u012B\u012D\u00EF\u1E2F\u1EC9\u01D0\u0209\u020B\u1ECB\u012F\u1E2D\u0268\u0131]'},
{'base':'j', 'letters':r'[\u006A\u24D9\uFF4A\u0135\u01F0\u0249]'},
{'base':'k', 'letters':r'[\u006B\u24DA\uFF4B\u1E31\u01E9\u1E33\u0137\u1E35\u0199\u2C6A\uA741\uA743\uA745\uA7A3]'},
{'base':'l', 'letters':r'[\u006C\u24DB\uFF4C\u0140\u013A\u013E\u1E37\u1E39\u013C\u1E3D\u1E3B\u017F\u0142\u019A\u026B\u2C61\uA749\uA781\uA747]'},
{'base':'lj','letters':r'[\u01C9]'},
{'base':'m', 'letters':r'[\u006D\u24DC\uFF4D\u1E3F\u1E41\u1E43\u0271\u026F]'},
{'base':'n', 'letters':r'[\u006E\u24DD\uFF4E\u01F9\u0144\u00F1\u1E45\u0148\u1E47\u0146\u1E4B\u1E49\u019E\u0272\u0149\uA791\uA7A5]'},
{'base':'nj','letters':r'[\u01CC]'},
{'base':'o', 'letters':r'[\u006F\u24DE\uFF4F\u00F2\u00F3\u00F4\u1ED3\u1ED1\u1ED7\u1ED5\u00F5\u1E4D\u022D\u1E4F\u014D\u1E51\u1E53\u014F\u022F\u0231\u00F6\u022B\u1ECF\u0151\u01D2\u020D\u020F\u01A1\u1EDD\u1EDB\u1EE1\u1EDF\u1EE3\u1ECD\u1ED9\u01EB\u01ED\u00F8\u01FF\u0254\uA74B\uA74D\u0275]'},
{'base':'oi','letters':r'[\u01A3]'},
{'base':'ou','letters':r'[\u0223]'},
{'base':'oo','letters':r'[\uA74F]'},
{'base':'p','letters':r'[\u0070\u24DF\uFF50\u1E55\u1E57\u01A5\u1D7D\uA751\uA753\uA755]'},
{'base':'q','letters':r'[\u0071\u24E0\uFF51\u024B\uA757\uA759]'},
{'base':'r','letters':r'[\u0072\u24E1\uFF52\u0155\u1E59\u0159\u0211\u0213\u1E5B\u1E5D\u0157\u1E5F\u024D\u027D\uA75B\uA7A7\uA783]'},
{'base':'s','letters':r'[\u0073\u24E2\uFF53\u00DF\u015B\u1E65\u015D\u1E61\u0161\u1E67\u1E63\u1E69\u0219\u015F\u023F\uA7A9\uA785\u1E9B]'},
{'base':'t','letters':r'[\u0074\u24E3\uFF54\u1E6B\u1E97\u0165\u1E6D\u021B\u0163\u1E71\u1E6F\u0167\u01AD\u0288\u2C66\uA787]'},
{'base':'tz','letters':r'[\uA729]'},
{'base':'u','letters':r'[\u0075\u24E4\uFF55\u00F9\u00FA\u00FB\u0169\u1E79\u016B\u1E7B\u016D\u00FC\u01DC\u01D8\u01D6\u01DA\u1EE7\u016F\u0171\u01D4\u0215\u0217\u01B0\u1EEB\u1EE9\u1EEF\u1EED\u1EF1\u1EE5\u1E73\u0173\u1E77\u1E75\u0289]'},
{'base':'v','letters':r'[\u0076\u24E5\uFF56\u1E7D\u1E7F\u028B\uA75F\u028C]'},
{'base':'vy','letters':r'[\uA761]'},
{'base':'w','letters':r'[\u0077\u24E6\uFF57\u1E81\u1E83\u0175\u1E87\u1E85\u1E98\u1E89\u2C73]'},
{'base':'x','letters':r'[\u0078\u24E7\uFF58\u1E8B\u1E8D]'},
{'base':'y','letters':r'[\u0079\u24E8\uFF59\u1EF3\u00FD\u0177\u1EF9\u0233\u1E8F\u00FF\u1EF7\u1E99\u1EF5\u01B4\u024F\u1EFF]'},
{'base':'z','letters':r'[\u007A\u24E9\uFF5A\u017A\u1E91\u017C\u017E\u1E93\u1E95\u01B6\u0225\u0240\u2C6C\uA763]'}
]
i = 0
while i<len(defaultDiacriticsRemovalMap):
str = re.sub(defaultDiacriticsRemovalMap[i]['letters'], defaultDiacriticsRemovalMap[i]['base'], str)
i += 1
return str
def casefold_and_normalize(inputstr):
''' casefolds, then unicode normalizes inputstr"
parameters:
inputstr - the string to normalize and casefold
returns:
str - a normalized and casefolded string
'''
functionname = 'casefold_and_normalize()'
str=inputstr
cf = str.casefold()
ns = unicodedata.normalize('NFKC',cf)
return ns
# def normalize_and_casefold(inputstr):
# ''' Normalizes unicode, then casefolds inputstr"
# parameters:
# inputstr - the string to casefold and normalize
# returns:
# str - a casefolded and normalized string
# '''
# functionname = 'normalize_and_casefold()'
#
# str=inputstr
# ns = unicodedata.normalize('NFKC',str)
# cf = str.casefold()
# return cf
# def location_match_id_hex(inputstr):
# ''' Prepares an input location string for matching
# parameters:
# inputstr - the location string to create a match string for
# returns:
# id - the location match id
# '''
# functionname = 'location_match_id_hex()'
#
# id = hashlib.sha256(inputstr.encode('utf-8')).hexdigest()
# return id
| 53.40339 | 295 | 0.6545 |
353cc7a2d918ba9529487ab0fba23a29602edb4a | 1,390 | py | Python | dicom_wsi/compare_imgs.py | Steven-N-Hart/dicom_wsi | 254fc404193594e9b321fdefa2aeefb56a42e99a | [
"MIT"
] | 6 | 2020-12-23T01:20:38.000Z | 2022-01-13T09:06:20.000Z | dicom_wsi/compare_imgs.py | Steven-N-Hart/dicom_wsi | 254fc404193594e9b321fdefa2aeefb56a42e99a | [
"MIT"
] | 5 | 2020-01-10T21:24:52.000Z | 2020-08-18T20:04:39.000Z | dicom_wsi/compare_imgs.py | Steven-N-Hart/dicom_wsi | 254fc404193594e9b321fdefa2aeefb56a42e99a | [
"MIT"
] | 6 | 2019-11-25T16:03:58.000Z | 2021-09-25T11:42:16.000Z | from PIL import Image
import pyvips
from skimage.metrics import structural_similarity as ssim
from skimage.metrics import mean_squared_error
import pydicom
import numpy as np
from os.path import basename as bn
svs_file = '../tests/CMU-1.svs'
dcm_file = 'svs_jp2.0-1.dcm'
compression = 'jp2'
x, y = 24001, 2501
tile_size = 500
array_number = 378
ds = pydicom.dcmread(dcm_file)
dcm_img = ds.pixel_array[array_number]
wsi = pyvips.Image.new_from_file(svs_file)
resize_level = 1
img = wsi.resize(resize_level)
format_to_dtype = {
'uchar': np.uint8,
'char': np.int8,
'ushort': np.uint16,
'short': np.int16,
'uint': np.uint32,
'int': np.int32,
'float': np.float32,
'double': np.float64,
'complex': np.complex64,
'dpcomplex': np.complex128,
}
np_3d = np.ndarray(buffer=img.write_to_memory(),
dtype=format_to_dtype[img.format],
shape=[img.height, img.width, img.bands])
np_3d = np_3d[:, :, :3]
raw_img = np_3d[x:x+tile_size, y:y+tile_size, :]
mse_result = mean_squared_error(raw_img, dcm_img)
ssim_result = ssim(dcm_img, raw_img, multichannel=True)
Image.fromarray(dcm_img).save(f"OUT/{compression}_{bn(svs_file.split('.')[2])}_{x}-{y}-{array_number}.dcm.png")
Image.fromarray(raw_img).save(f"OUT/{compression}_{bn(svs_file.split('.')[2])}_{x}-{y}-{array_number}.raw.png")
print(f'mse_result: {mse_result:.4f}, ssim_result:{ssim_result:.4f}')
| 28.958333 | 111 | 0.711511 |
050b9526f1c952f6d9589c38b5d7007b04f7d8c9 | 42,303 | py | Python | torch/onnx/symbolic_opset11.py | jeongukjae/pytorch | d168eae114f95c2a8893104353a5566fd6fb4655 | [
"Intel"
] | null | null | null | torch/onnx/symbolic_opset11.py | jeongukjae/pytorch | d168eae114f95c2a8893104353a5566fd6fb4655 | [
"Intel"
] | 1 | 2021-04-12T19:49:08.000Z | 2021-04-12T19:49:08.000Z | torch/onnx/symbolic_opset11.py | shmsong/pytorch | 90e532f3ef17a9611e9e7a9f1f6189d4168bf084 | [
"Intel"
] | 1 | 2022-02-23T02:34:50.000Z | 2022-02-23T02:34:50.000Z |
from sys import maxsize
import torch
import torch.onnx.symbolic_helper as sym_help
import warnings
import numpy
from torch.onnx.symbolic_helper import parse_args, _unimplemented, _is_tensor_list
from torch.onnx.symbolic_opset9 import expand, unused
from torch.nn.modules.utils import _single, _pair, _triple
from torch.onnx.utils import _add_block, _add_input_to_block, _add_output_to_block
# EDITING THIS FILE? READ THIS FIRST!
# see Note [Edit Symbolic Files] in symbolic_helper.py
# This file exports ONNX ops for opset 11
@parse_args('v', 'f', 'f')
def hardtanh(g, self, min_val, max_val):
dtype = self.type().scalarType()
if dtype is None:
dtype = 6 # float
else:
dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype])
min_val = g.op("Constant", value_t=torch.tensor(min_val, dtype=sym_help.scalar_type_to_pytorch_type[dtype]))
max_val = g.op("Constant", value_t=torch.tensor(max_val, dtype=sym_help.scalar_type_to_pytorch_type[dtype]))
return g.op("Clip", self, min_val, max_val)
def clamp(g, self, min, max):
dtype = self.type().scalarType()
def _cast_if_not_none(tensor, dtype):
if tensor is not None and not sym_help._is_none(tensor):
return g.op("Cast", tensor, to_i=sym_help.cast_pytorch_to_onnx[dtype])
else:
return tensor
if dtype is not None:
min = _cast_if_not_none(min, dtype)
max = _cast_if_not_none(max, dtype)
return g.op("Clip", self, min, max)
def clamp_min(g, self, min):
max = unused(g)
return clamp(g, self, min, max)
def clamp_max(g, self, max):
min = unused(g)
return clamp(g, self, min, max)
# Opset 11 gather accepts negative indices
@parse_args('v', 'i', 'v')
def select(g, self, dim, index):
return g.op("Gather", self, index, axis_i=dim)
def index_put(g, self, indices_list_value, values, accumulate=False):
if sym_help._is_packed_list(indices_list_value):
indices_list = sym_help._unpack_list(indices_list_value)
else:
indices_list = [indices_list_value]
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
args = [self] + indices_list + [values, accumulate]
return g.op("ATen", *args, operator_s='index_put')
from torch.onnx.symbolic_opset9 import add, expand
accumulate = sym_help._parse_arg(accumulate, 'b')
if len(indices_list) == 0:
return values
index = indices_list[0]
if len(indices_list) > 1:
for ind in indices_list[1:]:
index = add(g, index, ind)
broadcast_index_shape = g.op("Shape", index)
indices_list = [
sym_help._unsqueeze_helper(g, expand(g, ind, broadcast_index_shape, None), [-1]) for ind in indices_list
]
index = g.op("Concat", *indices_list, axis_i=-1)
else:
# Replace index_put node with masked_scatter or masked_fill
# when inputs to the index_put node contains boolean inputs
#
# index_put -> masked_fill
# * input index contains single tensor of Bool type (e.g.: %24 <- %23).
# * input value contains single element (e.g.: %18).
#
# Torch IR
# %mask : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) = aten::clone(%0, %6)
# %16 : Bool(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) =
# aten::to(%8, %26, %27, %11, %12, %28, %29, %15)
# %18 : Float(requires_grad=0, device=cpu) = prim::Constant[value={1}]()
# %23 : Bool(8, strides=[1], device=cpu) = aten::view(%16, %22)
# %24 : Tensor?[] = prim::ListConstruct(%23)
# %25 : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) =
# aten::index_put(%mask, %24, %18, %30)
# return (%25)
#
#
# index_put -> masked_scatter
# * input index contains single tensor of Bool type (e.g.: %32 <- %31).
# * input value contains multiple elements (e.g.: %28).
#
# Torch IR
# %mask : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) = aten::clone(%0, %6)
# %28 : Float(8, strides=[1], requires_grad=0, device=cpu)
# = prim::Constant[value= 1 1 1 1 1 1 1 1 [ CPUFloatType{8} ]]()
# %15 : Bool(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu)
# = aten::ne(%mask, %some_const)
# %23 : Bool(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu)
# = aten::to(%15, %34, %35, %18, %19, %36, %37, %22)
# %38 : Long(requires_grad=0, device=cpu) = prim::Constant[value={0}]()
# %30 : int[] = prim::Constant[value=[-1]]()
# %31 : Bool(8, strides=[1], device=cpu) = aten::view(%23, %30)
# %32 : Tensor?[] = prim::ListConstruct(%31)
# %33 : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu)
# = aten::index_put(%mask, %32, %28, %38)
# return (%33)
bool_inp = index
if bool_inp.type() is not None and bool_inp.type().scalarType() == 'Bool':
rank = sym_help._get_tensor_rank(values)
if rank is not None and rank == 0:
from torch.onnx.symbolic_opset9 import masked_fill
return masked_fill(g, self, bool_inp, values)
return masked_scatter(g, self, bool_inp, values)
broadcast_index_shape = g.op("Shape", index)
index = sym_help._unsqueeze_helper(g, index, [-1])
sub_data_shape = sym_help._slice_helper(
g, g.op("Shape", self), axes=[0], starts=[len(indices_list)], ends=[maxsize])
values_shape = g.op("Concat", broadcast_index_shape, sub_data_shape, axis_i=0)
# Check if values is a singular value and expand accordingly
rank = sym_help._get_tensor_rank(values)
if rank is not None and rank == 0:
values = expand(g, values, values_shape, None)
values = g.op("Reshape", values, values_shape)
dtype = self.type().scalarType()
if dtype is not None and dtype != values.type().scalarType():
values = g.op("Cast", values, to_i=sym_help.cast_pytorch_to_onnx[dtype])
dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype])
dtype = sym_help.scalar_type_to_pytorch_type[dtype]
if accumulate:
zeros = g.op("ConstantOfShape", g.op("Shape", self), value_t=torch.tensor([0], dtype=dtype))
result = g.op("ScatterND", zeros, index, values)
result = add(g, self, result)
else:
result = g.op("ScatterND", self, index, values)
return result
@parse_args('v', 'i')
def pixel_shuffle(g, self, upscale_factor):
rank = sym_help._get_tensor_rank(self)
if rank is not None and rank != 4:
return _unimplemented("pixel_shuffle", "only support 4d input")
return g.op("DepthToSpace", self, blocksize_i=upscale_factor, mode_s="CRD")
def _interpolate(name, dim, interpolate_mode):
return sym_help._interpolate_helper(name, dim, interpolate_mode)
upsample_nearest1d = _interpolate('upsample_nearest1d', 3, "nearest")
upsample_nearest2d = _interpolate('upsample_nearest2d', 4, "nearest")
upsample_nearest3d = _interpolate('upsample_nearest3d', 5, "nearest")
upsample_linear1d = _interpolate('upsample_linear1d', 3, "linear")
upsample_bilinear2d = _interpolate('upsample_bilinear2d', 4, "linear")
upsample_trilinear3d = _interpolate('upsample_trilinear3d', 5, "linear")
upsample_bicubic2d = _interpolate('upsample_bicubic2d', 4, "cubic")
def __interpolate(g, input, size, scale_factor, mode, align_corners, recompute_scale_factor):
return sym_help.__interpolate_helper(g, input, size, scale_factor, mode, align_corners, recompute_scale_factor)
@parse_args('v', 'i', 'v', 'v')
def gather(g, self, dim, index, sparse_grad=False):
if sym_help._maybe_get_const(sparse_grad, 'i'):
return _unimplemented("gather", "sparse_grad == True")
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return g.op("ATen", self, dim, index, sparse_grad, operator_s="gather")
return g.op("GatherElements", self, index, axis_i=dim)
@parse_args('v', 'i', 'v', 'v')
def scatter(g, self, dim, index, src):
from torch.onnx.symbolic_opset9 import expand_as
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return g.op("ATen", self, dim, index, src, operator_s="scatter")
src_type = src.type().scalarType()
src = sym_help._maybe_get_scalar(src)
if sym_help._is_value(src):
return g.op("ScatterElements", self, index, src, axis_i=dim)
else:
# Check if scalar 'src' has same type as self (PyTorch allows different
# type for scalar src (but not when src is tensor)). If not, insert Cast node.
if self.type().scalarType() != src_type:
src = g.op("Cast", src, to_i=sym_help.cast_pytorch_to_onnx[self.type().scalarType()])
return g.op("ScatterElements", self, index, expand_as(g, src, index), axis_i=dim)
@parse_args('v', 'i', 'none')
def cumsum(g, self, dim, dtype=None):
dim_tensor = g.op("Constant", value_t=torch.tensor(dim, dtype=torch.int))
if dtype and dtype.node().kind() != 'prim::Constant':
parsed_dtype = sym_help._get_const(dtype, 'i', 'dtype')
cast = g.op("Cast", self, to_i=sym_help.scalar_type_to_onnx[parsed_dtype])
else:
cast = self
csum = g.op("CumSum", cast, dim_tensor)
return csum
def masked_select(g, self, mask):
from torch.onnx.symbolic_opset9 import nonzero, expand_as
index = nonzero(g, expand_as(g, mask, self))
return g.op('GatherND', self, index)
def masked_scatter(g, self, mask, source):
from torch.onnx.symbolic_opset9 import nonzero, expand_as, view, size
index = nonzero(g, expand_as(g, mask, self))
# NOTE: source can have more elements than needed.
# It could also have arbitrary shape.
# This is not supported by ONNX::ScatterND, so we need to flatten and slice source tensor.
source = view(g, source, torch.LongTensor([-1]))
source = sym_help._slice_helper(g, source,
axes=torch.LongTensor([0]),
starts=torch.LongTensor([0]),
ends=size(g, index, torch.LongTensor([0])),
dynamic_slice=True)
return g.op('ScatterND', self, index, source)
def _len(g, self):
if _is_tensor_list(self) or self.node().kind() == "onnx::SplitToSequence":
return g.op("SequenceLength", self)
sz_0 = size(g, self, g.op("Constant", value_t=torch.LongTensor([0])))
return sym_help._squeeze_helper(g, sz_0, [0])
def __getitem_(g, self, i):
if sym_help._is_tensor_list(self):
# SequenceAt requires that the input be a List of Tensors
return g.op("SequenceAt", self, i)
else:
from torch.onnx.symbolic_opset9 import __getitem_ as getitem
return getitem(g, self, i)
def append(g, self, tensor):
return g.op("SequenceInsert", self, tensor)
def add(g, self, other, alpha=None):
if sym_help._is_value(self) and sym_help._is_tensor_list(self):
tensor_list_node = other.node()
if tensor_list_node.kind() != "prim::ListConstruct":
return _unimplemented("add", "does not support adding dynamic tensor list to another")
tensors = sym_help._unpack_list(other)
l = self
for t in tensors:
l = g.op("SequenceInsert", l, t)
return l
return torch.onnx.symbolic_opset9.add(g, self, other, alpha)
def insert(g, self, pos, tensor):
return g.op("SequenceInsert", self, tensor, pos)
def pop(g, tensor_list, dim):
return g.op("SequenceErase", tensor_list, dim)
def Delete(g, tensor_list, dim):
return g.op("SequenceErase", tensor_list, dim)
def cat(g, tensor_list, dim):
if sym_help._is_packed_list(tensor_list):
from torch.onnx.symbolic_opset9 import cat as cat_opset9
return cat_opset9(g, tensor_list, dim)
else:
dim = sym_help._get_const(dim, 'i', 'dim')
return g.op("ConcatFromSequence", tensor_list, axis_i=dim)
def stack(g, tensor_list, dim):
if sym_help._is_packed_list(tensor_list):
from torch.onnx.symbolic_opset9 import stack as stack_opset9
return stack_opset9(g, tensor_list, dim)
else:
dim = sym_help._get_const(dim, 'i', 'dim')
return g.op("ConcatFromSequence", tensor_list, axis_i=dim, new_axis_i=1)
@parse_args('v', 'i', 'i', 'i')
def _unique2(g, self, sorted, return_inverse, return_counts):
u, indices, inverse_indices, counts = g.op("Unique", self, sorted_i=sorted, outputs=4)
return u, inverse_indices, counts
def _avg_pool(name, tuple_fn):
@parse_args('v', 'is', 'is', 'is', 'i', 'i', 'none')
def symbolic_fn(g, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override=None):
padding = sym_help._avgpool_helper(tuple_fn, padding, kernel_size, stride, divisor_override, name)
if not stride:
stride = kernel_size
if count_include_pad:
input = g.op("Pad", input,
g.op("Constant", value_t=torch.tensor(((0,) * 2 + padding) * 2)), mode_s='constant')
padding = (0,) * len(padding)
output = g.op("AveragePool", input,
kernel_shape_i=tuple_fn(kernel_size),
strides_i=tuple_fn(stride),
pads_i=padding * 2,
ceil_mode_i=ceil_mode)
return output
return symbolic_fn
avg_pool1d = _avg_pool('avg_pool1d', _single)
avg_pool2d = _avg_pool('avg_pool2d', _pair)
avg_pool3d = _avg_pool('avg_pool3d', _triple)
@parse_args('v', 'i', 'i', 'i', 'i')
def unique_dim(g, self, dim, sorted, return_inverse, return_counts):
u, indices, inverse_indices, counts = g.op("Unique", self, axis_i=dim, sorted_i=sorted, outputs=4)
return u, inverse_indices, counts
@parse_args('v', 'v', 'i', 'i', 'i', 'none')
def topk(g, self, k, dim, largest, sorted, out=None):
return sym_help._topk_helper(g, self, k, dim, largest=largest, sorted=sorted, out=out)
@parse_args('v', 'i', 'i', 'none')
def sort(g, self, dim, decending, out=None):
return sym_help._sort_helper(g, self, dim, decending=decending, out=out)
def round(g, self):
return g.op("Round", self)
@parse_args('v', 'v', 'i', 'i')
def split(g, self, split_size_or_sizes, dim, _outputs=None):
if not sym_help._is_split_static(split_size_or_sizes, _outputs):
split_out = g.op("SplitToSequence", self, split_size_or_sizes, axis_i=dim)
if _outputs is None:
return split_out
# Convert to multiple slice nodes iff number of splits and number of outputs are statically known.
if sym_help._is_packed_list(split_size_or_sizes) and len(sym_help._unpack_list(split_size_or_sizes)) == _outputs:
split_sizes = [sym_help._unsqueeze_helper(g, v, [0]) for v in sym_help._unpack_list(split_size_or_sizes)]
start = g.op("Constant", value_t=torch.tensor([0], dtype=torch.long))
axis = g.op("Constant", value_t=torch.tensor([dim], dtype=torch.long))
res = []
for i in range(_outputs):
end = g.op("Add", start, split_sizes[i]) # split_sizes is a list of same length as _outputs
res.append(g.op("Slice", self, start, end, axis))
start = end
return res
return [g.op("SequenceAt", split_out, g.op("Constant", value_t=torch.tensor([i], dtype=torch.long)))
for i in range(_outputs)]
else:
return torch.onnx.symbolic_opset9.split(g, self, split_size_or_sizes, dim, _outputs)
@parse_args('v', 'v', 'i', 'i')
def split_with_sizes(g, self, split_sizes, dim, _outputs=None):
return split(g, self, split_sizes, dim, _outputs)
@parse_args('v', 'i', 'i')
def unbind(g, self, dim=0, _outputs=None):
if _outputs is None:
return g.op("SplitToSequence", self, g.op("Constant", value_t=torch.tensor(1, dtype=torch.long)), axis_i=dim, keepdims_i=0)
else:
return torch.onnx.symbolic_opset9.unbind(g, self, dim, _outputs)
# Generate paddings in ONNX order based on pad in pytorch.
# Args:
# dim: the dimension of the tensor.
# pad: the paddings in pytorch.
# The order is dim_n_begin, dim_n_end, dim_n-1_begin, dim_n-1_end, ..., dim_m_begin, dim_m_end,
# where m is in range [0, n].
def _prepare_onnx_paddings(g, dim, pad):
# The desired order of paddings is
# dim_0_begin, dim_1_begin, ... , dim_0_end, ..., dim_n_end.
# n is the dimension of input.
# Assume zero-dimensions in the beginning, pad the "pad" sequence with zeros in the beginning
pad_len = torch.onnx.symbolic_opset9.size(g, pad, g.op("Constant", value_t=torch.tensor([0])))
# Set extension = [0] * (dim * 2 - len(pad))
extension = g.op("Sub", g.op("Mul", g.op("Constant", value_t=torch.tensor(dim, dtype=torch.int64)),
g.op("Constant", value_t=torch.tensor(2, dtype=torch.int64))), pad_len)
# Concat pad with extension: paddings = [dim_n_begin, dim_n_end, dim_n-1_begin, dim_n-1_end, 0, 0, ... ]
# Currently ONNX only supports int64 type for Pad
pad = g.op("Cast", pad, to_i=sym_help.cast_pytorch_to_onnx['Long'])
paddings = g.op("Concat", pad, g.op("ConstantOfShape", extension, value_t=torch.tensor([0], dtype=torch.int64)), axis_i=0)
# Reshape and reverse order and collate first beginnings and then ends
# paddings = [[..., 0, dim_n-1_begin, dim_n_begin],
# [..., 0, dim_n-1_end, dim_n_end]]
# Reshape back to 1-D paddings = [..., 0, dim_n - 1_begin, dim_n_begin, ..., 0, dim_n - 1_end, dim_n_end]
paddings = g.op("Reshape", paddings, g.op("Constant", value_t=torch.tensor([-1, 2])))
paddings = g.op("Transpose", torch.onnx.symbolic_opset10.flip(g, paddings, [0]), perm_i=[1, 0])
paddings = g.op("Reshape", paddings, g.op("Constant", value_t=torch.tensor([-1])))
padding_c = g.op("Cast", paddings, to_i=sym_help.cast_pytorch_to_onnx['Long'])
return padding_c
def constant_pad_nd(g, input, padding, value=None):
mode = "constant"
value = sym_help._maybe_get_scalar(value)
value = sym_help._if_scalar_type_as(g, value, input)
pad = _prepare_onnx_paddings(g, sym_help._get_tensor_rank(input), padding)
return g.op("Pad", input, pad, value, mode_s=mode)
def reflection_pad(g, input, padding):
mode = "reflect"
paddings = _prepare_onnx_paddings(g, sym_help._get_tensor_rank(input), padding)
return g.op("Pad", input, paddings, mode_s=mode)
def replication_pad(g, input, padding):
mode = "edge"
paddings = _prepare_onnx_paddings(g, sym_help._get_tensor_rank(input), padding)
return g.op("Pad", input, paddings, mode_s=mode)
reflection_pad1d = reflection_pad
reflection_pad2d = reflection_pad
reflection_pad3d = reflection_pad
replication_pad1d = replication_pad
replication_pad2d = replication_pad
replication_pad3d = replication_pad
def linalg_det(g, self):
return g.op("Det", self)
def logdet(g, input):
from torch.onnx.symbolic_opset9 import log
return log(g, linalg_det(g, input))
def arange(g, *args):
def _get_arange_dtype(dtype):
dtype = sym_help._maybe_get_const(dtype, 'i')
return dtype
if len(args) == 2 or len(args) == 5:
if len(args) == 2:
# aten::arange(Scalar end, Tensor out)
dtype = None
else:
# aten::arange(Scalar end, ScalarType dtype, Layout, Device, bool pin_memory)
dtype = _get_arange_dtype(args[1])
type, end, start, step = sym_help._arange_cast_helper(g, end=args[0], dtype=dtype)
start_default = g.op("Constant", value_t=torch.tensor(0, dtype=sym_help.scalar_type_to_pytorch_type[type]))
delta_default = g.op("Constant", value_t=torch.tensor(1, dtype=sym_help.scalar_type_to_pytorch_type[type]))
arange_tensor = g.op("Range", start_default, end, delta_default)
elif len(args) == 4 or len(args) == 7:
if len(args) == 4:
# aten::arange(Scalar start, Scalar end, Scalar step, Tensor out)
dtype = None
else:
# aten::arange(Scalar start, Scalar end, Scalar step, ScalarType dtype, Layout, Device, bool pin_memory)
dtype = _get_arange_dtype(args[3])
type, end, start, step = sym_help._arange_cast_helper(g, start=args[0], end=args[1], step=args[2], dtype=dtype)
arange_tensor = g.op("Range", start, end, step)
elif len(args) == 6:
# aten::arange(Scalar start, Scalar end, ScalarType dtype, Layout, Device, bool pin_memory)
dtype = _get_arange_dtype(args[2])
type, end, start, step = sym_help._arange_cast_helper(g, start=args[0], end=args[1], dtype=dtype)
delta_default = g.op("Constant", value_t=torch.tensor(1, dtype=sym_help.scalar_type_to_pytorch_type[type]))
arange_tensor = g.op("Range", start, end, delta_default)
else:
raise NotImplementedError("Unknown aten::arange signature taking " + str(len(args)) + " arguments.")
return arange_tensor
@parse_args('v', 'i')
def _dim_arange(g, like, dim):
like_shape = g.op('Shape', like)
stop = g.op("Gather", like_shape, g.op("Constant", value_t=torch.tensor(dim)), axis_i=0)
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return g.op("_caffe2::Range", stop)
return arange(g, stop, 4, None, None, None)
def size(g, self, dim=None):
if dim is None:
return g.op("Shape", self)
return sym_help._size_helper(g, self, dim)
def squeeze(g, self, dim=None):
if dim is None:
return g.op("Squeeze", self)
dim = sym_help._get_const(dim, 'i', 'dim')
input_rank = sym_help._get_tensor_rank(self)
adjusted_dim = dim
if input_rank is not None and dim < 0:
adjusted_dim += input_rank
dim_size = sym_help._get_tensor_dim_size(self, adjusted_dim)
if (dim < 0 and input_rank is None) or dim_size is None:
# If onnx shape inference is not on, export always as dynamic.
# Because we cannot tell if observed static shape is also static at runtime.
# create 'cond' node (condition is shape[i]==1)
dim_constant = g.op("Constant", value_t=torch.tensor([dim]))
size = sym_help._size_helper(g, self, dim_constant)
const_one = g.op("Constant", value_t=torch.ones(1, dtype=torch.int64))
cond = g.op("Equal", size, const_one)
# create the 'If' node and add the 'then' and 'else' blocks to it.
if_node_outputs = g.op("If", cond)
if_node = if_node_outputs.node()
if_block = torch.onnx.utils._add_block(if_node)
squeeze_ = sym_help._squeeze_helper(if_block, self, [dim])
torch.onnx.utils._add_output_to_block(if_block, squeeze_)
else_block = torch.onnx.utils._add_block(if_node)
identity_ = else_block.op("Identity", self)
torch.onnx.utils._add_output_to_block(else_block, identity_)
return if_node_outputs
# For static input shape
dim = adjusted_dim
if dim_size > 1:
warnings.warn("This model contains a squeeze operation on dimension " + str(dim) + ". The size of " +
"this dimension in the given input is " + str(dim_size) + ". The model will " +
"be exported without the squeeze node. If the model is intended to be used with dynamic " +
"input shapes, please export with dynamic_axes argument.")
return self
return sym_help._squeeze_helper(g, self, [dim])
@parse_args('v', 'i')
def unsqueeze(g, self, dim):
return sym_help._unsqueeze_helper(g, self, [dim])
def mm(g, self, other):
return g.op("Gemm", self, other, beta_f=0.0, alpha_f=1.0)
def index(g, self, index):
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return g.op("ATen", self, index, operator_s="index")
if sym_help._is_packed_list(index):
indices = sym_help._unpack_list(index)
else:
indices = [index]
# Handle single mask index.
if len(indices) == 1:
index = indices[0]
if not sym_help._is_none(index) and (index.type().scalarType() == "Bool" or index.type().scalarType() == "Byte"):
from torch.onnx.symbolic_opset9 import nonzero
index = nonzero(g, index)
return g.op('GatherND', self, index)
from torch.onnx.symbolic_opset9 import index as index_opset9
return index_opset9(g, self, index)
def index_fill(g, self, dim, index, value):
dim_value = sym_help._parse_arg(dim, 'i')
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return g.op("ATen", self, index, value, dim_i=dim_value, operator_s="index_fill")
expanded_index_shape, expanded_index = sym_help._index_fill_reshape_helper(g, self, dim, index)
value = sym_help._maybe_get_scalar(value)
value = sym_help._if_scalar_type_as(g, value, self)
expanded_value = expand(g, value, expanded_index_shape, None)
return scatter(g, self, dim, expanded_index, expanded_value)
def index_copy(g, self, dim, index, source):
dim_value = sym_help._parse_arg(dim, 'i')
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return g.op("ATen", self, index, source, dim_i=dim_value, operator_s="index_copy")
expanded_index_shape, expanded_index = sym_help._index_fill_reshape_helper(g, self, dim, index)
return scatter(g, self, dim, expanded_index, source)
def __rshift_(g, self, other):
# make sure to cast other to self's type
# (when self is long, make sure that other is not float)
if other.type().scalarType() != self.type().scalarType():
other = g.op("Cast", other, to_i=sym_help.cast_pytorch_to_onnx[self.type().scalarType()])
if self.type().scalarType() == 'Byte':
return g.op('BitShift', self, other, direction_s="RIGHT")
two = g.op('Constant', value_t=torch.tensor(2, dtype=torch.float32))
# exponent (same type as self) has to be float or double in onnx::Pow
if not sym_help._is_fp(self):
other = g.op("Cast", other, to_i=sym_help.cast_pytorch_to_onnx['Float'])
two_pow = g.op('Pow', two, other)
two_pow = g.op('Cast', two_pow, to_i=sym_help.cast_pytorch_to_onnx[self.type().scalarType()])
rshift = g.op('Div', self, two_pow)
return rshift
def __lshift_(g, self, other):
# make sure to cast other to self's type
# (when self is long, make sure that other is not float)
if other.type().scalarType() != self.type().scalarType():
other = g.op("Cast", other, to_i=sym_help.cast_pytorch_to_onnx[self.type().scalarType()])
if self.type().scalarType() == 'Byte':
return g.op('BitShift', self, other, direction_s="LEFT")
two = g.op('Constant', value_t=torch.tensor(2, dtype=torch.float32))
# exponent (same type as self) has to be float or double in onnx::Pow
if not sym_help._is_fp(self):
other = g.op("Cast", other, to_i=sym_help.cast_pytorch_to_onnx['Float'])
two_pow = g.op('Pow', two, other)
two_pow = g.op('Cast', two_pow, to_i=sym_help.cast_pytorch_to_onnx[self.type().scalarType()])
lshift = g.op('Mul', self, two_pow)
return lshift
def _get_im2col_indices_along_dim(g, input_d, kernel_size_d, dilation_d, padding_d, stride_d):
# Input is always 4-D (N, C, H, W)
# Calculate indices of sliding blocks along spatial dimension
# Slide kernel over input each dim d:
# each dimension d ranges from 0 to input[d]+2xpadding[d]-dilation[d]x(kernel_size[d]-1)
# with steps = stride
blocks_d = g.op("Add", input_d, g.op("Constant", value_t=torch.tensor(padding_d * 2)))
blocks_d = g.op("Sub", blocks_d, g.op("Constant", value_t=torch.tensor(dilation_d * (kernel_size_d - 1))))
# Stride kernel over input and find starting indices along dim d
blocks_d_indices = g.op("Range", g.op("Constant", value_t=torch.tensor(0)),
blocks_d, g.op("Constant", value_t=torch.tensor(stride_d)))
# Apply dilation on kernel and find its indices along dim d
kernel_grid = numpy.arange(0, kernel_size_d * dilation_d, dilation_d)
kernel_grid = g.op("Constant", value_t=torch.tensor([kernel_grid]))
# Broadcast and add kernel staring positions (indices) with
# kernel_grid along dim d, to get block indices along dim d
blocks_d_indices = sym_help._unsqueeze_helper(g, blocks_d_indices, [0]) # Reshape to [1, -1]
kernel_mask = g.op('Reshape', kernel_grid, g.op('Constant', value_t=torch.tensor([-1, 1])))
block_mask = g.op("Add", blocks_d_indices, kernel_mask)
return block_mask
def _get_im2col_padded_input(g, input, padding_h, padding_w):
# Input is always 4-D tensor (N, C, H, W)
# Padding tensor has the following format: (padding_h, padding_w)
# Reshape the padding to follow ONNX format: (dim1_begin, dim2_begin,...,dim1_end, dim2_end,...)
pad = g.op("Constant", value_t=torch.LongTensor([0, 0, padding_h, padding_w] * 2))
return g.op("Pad", input, pad)
def _get_im2col_output_shape(g, input, kernel_h, kernel_w):
batch_dim = size(g, input, g.op("Constant", value_t=torch.tensor(0)))
channel_dim = size(g, input, g.op("Constant", value_t=torch.tensor(1)))
channel_unfolded = g.op("Mul", channel_dim,
g.op("Constant", value_t=torch.tensor(kernel_h * kernel_w)))
return g.op("Concat",
sym_help._unsqueeze_helper(g, batch_dim, [0]),
sym_help._unsqueeze_helper(g, channel_unfolded, [0]),
g.op("Constant", value_t=torch.tensor([-1])), axis_i=0)
@parse_args('v', 'is', 'is', 'is', 'is')
def im2col(g, input, kernel_size, dilation, padding, stride):
# Input is always 4-D tensor (N, C, H, W)
# All other args are int[2]
input_h = size(g, input, g.op("Constant", value_t=torch.tensor(2)))
input_w = size(g, input, g.op("Constant", value_t=torch.tensor(3)))
stride_h, stride_w = stride[0], stride[1]
padding_h, padding_w = padding[0], padding[1]
dilation_h, dilation_w = dilation[0], dilation[1]
kernel_h, kernel_w = kernel_size[0], kernel_size[1]
blocks_row_indices = _get_im2col_indices_along_dim(g, input_h, kernel_h, dilation_h, padding_h, stride_h)
blocks_col_indices = _get_im2col_indices_along_dim(g, input_w, kernel_w, dilation_w, padding_w, stride_w)
output_shape = _get_im2col_output_shape(g, input, kernel_h, kernel_w)
padded_input = _get_im2col_padded_input(g, input, padding_h, padding_w)
# For a 4D matrix of size (1, 1, 3, 3) as below with kernel_size=2, stride=1, and dilation=1
# [[[[1., 2., 3.,],
# [4., 5., 6.,],
# [7., 8., 9.,]]]]
# First gather indices along rows (dim=2) with blocks_row_indices = [[0,1], [1,2]] to get:
# [[[[[1., 2., 3.],
# [4., 5., 6.]],
# [[4., 5., 6.],
# [7., 8., 9.]]]]]
# And then gather along cols (dim=4) with blocks_row_indices = [[0,1], [1,2]] to get:
# [[[[[[1., 2.],
# [4., 5.]],
# [[2., 3.],
# [5., 6]]],
# [[[4., 5.],
# [7., 8.]],
# [[5., 6.],
# [8., 9.]]]]]]
# Transpose dims 3 (depth) and 4 (rows), and then reshape to output shape (1, 1, 4, 4) to get:
# [[[1., 2., 4., 5.],
# [2., 3., 5., 6.],
# [4., 5., 7., 8.],
# [5., 6., 8., 9.]]]
output = g.op("Gather", padded_input, blocks_row_indices, axis_i=2)
output = g.op("Gather", output, blocks_col_indices, axis_i=4)
output = g.op("Transpose", output, perm_i=[0, 1, 2, 4, 3, 5])
return g.op("Reshape", output, output_shape)
def narrow(g, input, dim, start, length):
from torch.onnx.symbolic_helper import _slice_helper
end = g.op("Add", start, length)
return _slice_helper(g, input, axes=dim, starts=start, ends=end, dynamic_slice=True)
@parse_args('v', 'i', 'i')
def flatten(g, input, start_dim, end_dim):
dim = sym_help._get_tensor_rank(input)
# use ONNX's Flatten operator for cases where the output shape is 2D
if start_dim == 1:
if (end_dim == -1 or (dim is not None and end_dim == dim - 1)):
return g.op("Flatten", input, axis_i=start_dim)
elif start_dim == 0:
if (end_dim == -2 or (dim is not None and end_dim == dim - 2)):
return g.op("Flatten", input, axis_i=end_dim + 1)
if dim is None:
return _unimplemented("dim",
"ONNX and PyTorch use different strategies to split the input. "
"Input rank must be known at export time.")
# if end_dim is negative add dim
if end_dim < 0 :
end_dim = dim + end_dim
return sym_help._flatten_helper(g, input, start_dim, end_dim, dim)
@parse_args('v', 'v', 'v', 'i', 'i', 'i', 'v', 'i', 'i')
def embedding_bag(g,
embedding_matrix,
indices,
offsets,
scale_grad_by_freq,
mode,
sparse,
per_sample_weights,
include_last_offset,
padding_idx):
if scale_grad_by_freq and sym_help._training_mode:
return sym_help._onnx_unsupported('embedding_bag with scale_grad_by_freq for training mode')
if padding_idx is not None and padding_idx >= 0:
raise RuntimeError('embedding_bag with padding_idx')
loop_condition = g.op("Constant", value_t=torch.tensor(1))
loop_condition = g.op("Cast", loop_condition, to_i=9)
zero = g.op("Constant", value_t=torch.tensor([0]))
indices_len = sym_help._unsqueeze_helper(g,
sym_help._size_helper(g, indices, g.op("Constant", value_t=torch.tensor(0))),
[0])
if not include_last_offset:
offsets = [offsets, indices_len]
offsets = g.op("Concat", *offsets, axis_i=0)
# Offsets holds the starting index position of each bag. So we create a list of the indices slices (determined by
# offsets) and gather those indices in indices_row. Then we use this subset of indices to gather from embeddings.
# The embeddings output is a loop scan output, so we can avoid creating a sequence and inserting elements in.
offsets_starts = sym_help._slice_helper(g, offsets, axes=[0], starts=[0], ends=[maxsize], steps=[1])
offsets_ends = sym_help._slice_helper(g, offsets, axes=[0], starts=[1], ends=[maxsize], steps=[1])
loop_len = sym_help._size_helper(g, offsets_ends, g.op("Constant", value_t=torch.tensor(0)))
loop = g.op("Loop", loop_len, loop_condition)
loop_block = _add_block(loop.node())
block_input_iter = _add_input_to_block(loop_block)
cond = _add_input_to_block(loop_block)
indices_start = loop_block.op("Gather", offsets_starts, block_input_iter, axis_i=0)
indices_end = loop_block.op("Gather", offsets_ends, block_input_iter, axis_i=0)
indices_start = sym_help._unsqueeze_helper(loop_block, indices_start, [0])
indices_end = sym_help._unsqueeze_helper(loop_block, indices_end, [0])
indices_row = loop_block.op("Slice", indices, indices_start, indices_end, zero)
embeddings = loop_block.op("Gather", embedding_matrix, indices_row, axis_i=0)
if not sym_help._is_none(per_sample_weights):
per_sample_weights_row = loop_block.op("Slice", per_sample_weights,
indices_start,
indices_end,
zero)
per_sample_weights_row = sym_help._unsqueeze_helper(loop_block, per_sample_weights_row, [1])
embeddings = loop_block.op("Mul", embeddings, per_sample_weights_row)
if mode == 0:
embeddings = sym_help._reducesum_helper(loop_block, embeddings, axes_i=[0], keepdims_i=0)
elif mode == 1:
embeddings = loop_block.op("ReduceMean", embeddings, axes_i=[0], keepdims_i=0)
else:
embeddings = loop_block.op("ReduceMax", embeddings, axes_i=[0], keepdims_i=0)
cond_out = loop_block.op("Cast", loop_condition, to_i=9)
_add_output_to_block(loop_block, cond_out)
_add_output_to_block(loop_block, embeddings)
# aten::embedding_bag returns a tuple of 4 elements: output, offset2bag, bag_size, max_indices.
# But the last three outputs are not used in torch.nn.EmbeddingBag or torch.nn.functional.embedding_bag.
return loop.node().output(), None, None, None
def prim_ConstantChunk(g, self, chunks, dim):
input_shape = g.op("Shape", self)
axis = g.op("Constant", value_t=torch.tensor([dim], dtype=torch.long))
input_shape_dim = g.op("Gather", input_shape, axis, axis_i=0)
start = g.op("Constant", value_t=torch.tensor([0], dtype=torch.long))
chunk_size = g.op("Constant", value_t=torch.tensor([chunks], dtype=torch.long))
chunk_size_minus_1 = g.op("Constant", value_t=torch.tensor([chunks - 1], dtype=torch.long))
input_shape_dim_shift = g.op("Add", input_shape_dim, chunk_size_minus_1)
chunk_dim = g.op("Div", input_shape_dim_shift, chunk_size)
res = []
for i in range(chunks):
index = g.op("Constant", value_t=torch.tensor([i + 1], dtype=torch.long))
end = g.op("Mul", chunk_dim, index)
res.append(g.op("Slice", self, start, end, axis))
start = end
return res
def repeat_interleave(g, self, repeats, dim=None):
from torch.onnx.symbolic_opset9 import reshape
input = self
final_dim = dim
# if dim is None flatten
# By default, use the flattened input array, and return a flat output array
if sym_help._is_none(dim):
input = reshape(g, self, g.op("Constant", value_t=torch.tensor([-1])))
dim = 0
else:
dim = sym_help._maybe_get_scalar(dim)
repeats_dim = sym_help._get_tensor_rank(repeats)
repeats_sizes = sym_help._get_tensor_sizes(repeats)
input_sizes = sym_help._get_tensor_sizes(input)
if repeats_dim is None:
raise RuntimeError('Unsupported: ONNX export of repeat_interleave for unknown '
'repeats rank.')
if repeats_sizes is None:
raise RuntimeError('Unsupported: ONNX export of repeat_interleave for unknown '
'repeats size.')
if input_sizes is None:
raise RuntimeError('Unsupported: ONNX export of repeat_interleave for unknown '
'input size.')
# Handle cases where dim is negative
if dim < 0:
dim += len(input_sizes)
output_sizes = input_sizes.copy()
perm_i = [0]
for idx, input_size in enumerate(input_sizes):
perm_i.append(idx + 1)
if input_size is None:
output_sizes[idx], input_sizes[idx] = 0, -1
perm_i[0], perm_i[dim] = perm_i[dim], perm_i[0]
# Cases when repeats is a single value tensor and dim has unknown input size
if (repeats_dim == 0 or (repeats_dim == 1 and repeats_sizes[0] == 1)) and output_sizes[dim] == 0:
if not sym_help._is_tensor(repeats):
repeats = g.op("Constant", value_t=torch.LongTensor(repeats))
reps = sym_help._size_helper(g, input, dim)
reps = unsqueeze(g, reps, 0)
repeats = g.op("Expand", repeats, reps)
# There are cases when the repeats are 1-d tensor with multiple repeats, but dim
# provided along one of the dynamic axes provided. A simple example would be
# input.shape -> [1, 1, *] where * represents the dynamic axes, and dim = 2
# Now, repeat interleaving can be performed in pytorch when the value of * matches
# with the number of elements in repeat, for example if * -> 2, number of repeats
# should be 2 as well.
else:
return torch.onnx.symbolic_opset9.repeat_interleave(g, self, repeats, final_dim)
reps_like = g.op("ConstantOfShape", g.op("Shape", repeats),
value_t=torch.tensor([1], dtype=torch.long))
r_splits = split(g, repeats, reps_like, 0)
i_splits = split(g, input, reps_like, dim)
output_sizes[dim], input_sizes[dim] = -1, 1
# Create a loop to iterate over each value along the dimension
# and perform individual interleaving using the repeats tensor
# Loop is of the following pattern
# input (trip_count, cond)
# int trip_count = ...;
# bool cond = ...;
# for (int i=0; i < trip_count && cond; ++i) {
# cond = ...;
# }
# Loop conditions
loop_condition = g.op("Constant", value_t=torch.tensor(1))
loop_condition = g.op("Cast", loop_condition, to_i=9)
loop_len = reps
loop = g.op("Loop", loop_len, loop_condition)
# Loop inputs
loop_block = _add_block(loop.node())
block_input_iter = _add_input_to_block(loop_block)
cond = _add_input_to_block(loop_block)
r_split = loop_block.op("SequenceAt", r_splits, block_input_iter)
i_split = loop_block.op("SequenceAt", i_splits, block_input_iter)
i_split = unsqueeze(loop_block, i_split, dim + 1)
r_concat = [loop_block.op("Constant", value_t=torch.LongTensor(input_sizes[:dim + 1])),
r_split,
loop_block.op("Constant", value_t=torch.LongTensor(input_sizes[dim + 1:]))]
r_concat = loop_block.op("Concat", *r_concat, axis_i=0)
i_split = expand(loop_block, i_split, r_concat, None)
i_split = reshape(loop_block, i_split, g.op("Constant", value_t=torch.LongTensor(output_sizes)))
# Loop outputs
cond_out = loop_block.op("Cast", loop_condition, to_i=9)
_add_output_to_block(loop_block, cond_out)
_add_output_to_block(loop_block, i_split)
loop_out = loop.node().output()
# In this loop, the outputs are scan outputs and are concatenated along
# the zero'th dimension (by default). In order to avoid this and concatenate
# along the dimension provided, some post-processing is required
loop_out = g.op("Transpose", loop_out, perm_i=perm_i)
return reshape(g, loop_out, g.op("Constant", value_t=torch.LongTensor(output_sizes)))
| 44.717759 | 131 | 0.650379 |
8e06ecc37fe3c7245408dde4b7b391c97cc6c815 | 1,474 | py | Python | my_utils/log_wrapper.py | ashishbaghudana/san_mrc | 03ed7d94c735f1fe2854bb9c208385b5fde44905 | [
"BSD-3-Clause"
] | 1 | 2019-08-23T13:33:37.000Z | 2019-08-23T13:33:37.000Z | my_utils/log_wrapper.py | ashishbaghudana/san_mrc | 03ed7d94c735f1fe2854bb9c208385b5fde44905 | [
"BSD-3-Clause"
] | null | null | null | my_utils/log_wrapper.py | ashishbaghudana/san_mrc | 03ed7d94c735f1fe2854bb9c208385b5fde44905 | [
"BSD-3-Clause"
] | null | null | null | import logging
import sys
from time import gmtime, strftime
from colorlog import ColoredFormatter
def create_logger(name, silent=False, to_disk=False, log_file=None, prefix=None):
"""Logger wrapper
by xiaodl
"""
# setup logger
log = logging.getLogger(name)
log.setLevel(logging.DEBUG)
formatter = ColoredFormatter(
"%(asctime)s %(log_color)s%(levelname)-8s%(reset)s [%(blue)s%(message)s%(reset)s]",
datefmt='%Y-%m-%d %I:%M:%S',
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red,bg_white',
},
secondary_log_colors={},
style='%'
)
fformatter = logging.Formatter(
"%(asctime)s [%(funcName)-12s] %(levelname)-8s [%(message)s]",
datefmt='%Y-%m-%d %I:%M:%S',
style='%'
)
if not silent:
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
log.addHandler(ch)
if to_disk:
prefix = prefix if prefix is not None else 'my_log'
log_file = log_file if log_file is not None else strftime('{}-%Y-%m-%d-%H-%M-%S.log'.format(prefix), gmtime())
fh = logging.FileHandler(log_file)
fh.setLevel(logging.DEBUG)
fh.setFormatter(fformatter)
log.addHandler(fh)
# disable elmo info
log.propagate = False
return log
| 30.081633 | 118 | 0.580733 |
ae2cbe4d69c97473f67868c3bd3018493f6120f8 | 5,553 | py | Python | telegram_bot/decorators.py | torbencarstens/bildbot | 69dd04cab8cf5ff4e9bdbf1773983ecf024cfe70 | [
"MIT"
] | 1 | 2022-03-29T08:27:09.000Z | 2022-03-29T08:27:09.000Z | telegram_bot/decorators.py | torbencarstens/bildbot | 69dd04cab8cf5ff4e9bdbf1773983ecf024cfe70 | [
"MIT"
] | 1 | 2020-03-15T16:12:22.000Z | 2020-03-15T16:46:55.000Z | telegram_bot/decorators.py | preparingforexams/bildbot | 8e50f3099044344d77033b63ebcf5db721cc72b9 | [
"MIT"
] | 1 | 2021-03-28T05:10:15.000Z | 2021-03-28T05:10:15.000Z | from __future__ import annotations
import inspect
from datetime import timedelta
from telegram import Update
from telegram.ext import CallbackContext
from . import bot
from . import chat
from . import logger
from . import user
class Command:
def __init__(self, chat_admin: bool = False, main_admin: bool = False):
self.chat_admin = chat_admin
self.main_admin = main_admin
@staticmethod
def _add_chat(clazz, update: Update, context: CallbackContext) -> chat.Chat:
new_chat = clazz.chats.get(update.effective_chat.id)
if not new_chat:
new_chat = chat.Chat(update.effective_chat.id, clazz.updater.bot)
clazz.chats[new_chat.id] = new_chat
context.chat_data["chat"] = new_chat
new_chat.title = update.effective_chat.title
new_chat.type = update.effective_chat.type
return new_chat
@staticmethod
def _add_user(update: Update, context: CallbackContext) -> user.User:
return user.User.from_tuser(update.effective_user)
def __call__(self, func):
def wrapped_f(*args, **kwargs):
exception = None
log = logger.create_logger(f"command_{func.__name__}")
log.debug("Start")
log.debug(f"args: {args} | kwargs: {kwargs}")
signature = inspect.signature(func)
arguments = signature.bind(*args, **kwargs).arguments
clazz: bot.Bot = arguments.get("self")
update: Update = arguments.get("update")
context: CallbackContext = arguments.get("context")
execution_message: str = f"Executing {func.__name__}"
finished_execution_message: str = f"Finished executing {func.__name__}"
if not update:
log.debug("Execute function due to coming directly from the bot.")
log.debug(execution_message)
result = func(*args, **kwargs)
log.debug(finished_execution_message)
return result
log.debug(f"message from user: {update.effective_user.first_name}")
current_chat = context.chat_data.get("chat")
if not current_chat:
current_chat = self._add_chat(clazz, update, context)
current_chat.type = update.effective_chat.type
if not clazz.chats.get(current_chat.id):
clazz.chats[current_chat.id] = current_chat
current_user = current_chat.get_user_by_id(update.effective_user.id)
if not current_user:
current_user = self._add_user(update, context)
current_chat.add_user(current_user)
context.user_data["user"] = current_user
if self.main_admin:
if current_chat.id == clazz.state.get("main_id"):
log.debug("Execute function due to coming from the main_chat")
else:
message = f"Chat {chat} is not allowed to perform this action."
log.warning(message)
clazz.mute_user(chat_id=current_chat.id, user=current_user, until_date=timedelta(minutes=15),
reason=message)
exception = PermissionError()
if self.chat_admin:
if current_chat.type == chat.ChatType.PRIVATE:
log.debug("Execute function due to coming from a private chat")
elif current_user in current_chat.administrators():
log.debug(
f"User ({current_user.name}) is a chat admin and therefore allowed to perform this action, executing")
else:
log.error(
f"User ({current_user.name}) isn't a chat_admin and is not allowed to perform this action.")
exception = PermissionError()
if update.effective_message:
log.debug(f"Message: {update.effective_message.text}")
current_chat.add_message(update) # Needs user in chat
log.debug(execution_message)
try:
if exception:
raise exception
result = func(*args, **kwargs)
log.debug(finished_execution_message)
return result
except PermissionError:
if update.effective_message:
update.effective_message.reply_text(
f"You ({current_user.name}) are not allowed to perform this action.")
except Exception as e:
# Log for debugging purposes
log.error(str(e), exc_info=True)
raise e
finally:
clazz.save_state()
log.debug("End")
return wrapped_f
def group(function):
def wrapper(clz: chat.Chat, *args, **kwargs):
log = logger.create_logger(f"group_wrapper_{function.__name__}")
log.debug("Start")
if not (hasattr(clz, "type") and (isinstance(clz.type, str) or isinstance(clz.type, chat.ChatType))):
message = "group decorator can only be used on a class which has a `type` attribute of type `str` or `chat.ChatType`."
log.error(message)
raise TypeError(message)
if clz.type == chat.ChatType.PRIVATE:
log.debug("Not executing group function in private chat.")
return False
log.debug("Execute function")
return function(clz, *args, **kwargs)
return wrapper
| 38.296552 | 130 | 0.593913 |
238502c9d37dce4d01ecd710ebc042803c28bfb1 | 5,692 | py | Python | vnpy/api/ctp/vnctpmd/test/mdtest.py | riverdarda/vnpy.-msincense | 4f39ef3269082581171f3d0d6f046224266a8d21 | [
"MIT"
] | 3 | 2020-08-14T00:06:32.000Z | 2021-11-22T00:50:02.000Z | vnpy/api/ctp/vnctpmd/test/mdtest.py | riverdarda/vnpy.-msincense | 4f39ef3269082581171f3d0d6f046224266a8d21 | [
"MIT"
] | null | null | null | vnpy/api/ctp/vnctpmd/test/mdtest.py | riverdarda/vnpy.-msincense | 4f39ef3269082581171f3d0d6f046224266a8d21 | [
"MIT"
] | 3 | 2020-03-07T12:45:00.000Z | 2021-02-14T03:10:38.000Z | # encoding: UTF-8
import sys,os
from time import sleep
#from qtpy import QtGui
vnpy_root = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','..','..','..'))
if vnpy_root not in sys.path:
print(u'append {}'.format(vnpy_root))
sys.path.append(vnpy_root)
from vnpy.api.ctp.vnctpmd import MdApi
from threading import Thread
#----------------------------------------------------------------------
def print_dict(d):
"""按照键值打印一个字典"""
for key,value in d.items():
print( key + ':' + str(value))
#----------------------------------------------------------------------
def simple_log(func):
"""简单装饰器用于输出函数名"""
def wrapper(*args, **kw):
print( "")
print( str(func.__name__))
return func(*args, **kw)
return wrapper
########################################################################
class TestMdApi(MdApi):
"""测试用实例"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(TestMdApi, self).__init__()
self.is_connected = False
#----------------------------------------------------------------------
@simple_log
def onFrontConnected(self):
"""服务器连接"""
print('tdtest.py: onFrontConnected')
self.is_connected = True
#----------------------------------------------------------------------
@simple_log
def onFrontDisconnected(self, n):
"""服务器断开"""
print (n)
self.is_connected = False
#----------------------------------------------------------------------
@simple_log
def onHeartBeatWarning(self, n):
"""心跳报警"""
print (n)
#----------------------------------------------------------------------
@simple_log
def onRspError(self, error, n, last):
"""错误"""
print_dict(error)
@simple_log
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
print_dict(data)
print_dict(error)
print('onRspUserLogin')
#----------------------------------------------------------------------
@simple_log
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspSubMarketData(self, data, error, n, last):
"""订阅合约回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspUnSubMarketData(self, data, error, n, last):
"""退订合约回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRtnDepthMarketData(self, data):
"""行情推送"""
print_dict(data)
#----------------------------------------------------------------------
@simple_log
def onRspSubForQuoteRsp(self, data, error, n, last):
"""订阅合约回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspUnSubForQuoteRsp(self, data, error, n, last):
"""退订合约回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRtnForQuoteRsp(self, data):
"""行情推送"""
print_dict(data)
# 长江
md_addr = "tcp://124.74.10.62:47213"
td_addr = "tcp://124.74.10.62:43205"
# 银河联通:
#md_addr = "tcp://114.255.82.175:31213"
#td_addr = "tcp://114.255.82.175:31205"
# 银河电信
#md_addr = "tcp://106.39.36.72:31213"
#td_addr = "tcp://106.39.36.72:31205"
user_id = "70000989"
user_pass = "cjqh@123"
app_id = "client_huafu_2.0.0"
auth_code = "T14ZHEJ5X7EH6VAM"
broker_id = '4300'
#----------------------------------------------------------------------
def main():
"""主测试函数,出现堵塞时可以考虑使用sleep"""
reqid = 0
# 创建Qt应用对象,用于事件循环
#app = QtGui.QApplication(sys.argv)
# 创建API对象
api = TestMdApi()
# 在C++环境中创建MdApi对象,传入参数是希望用来保存.con文件的地址
print('create mdapi')
api.createFtdcMdApi('')
# 注册前置机地址
print('mdtest:registerFront:{}'.format(md_addr))
api.registerFront(md_addr)
# 初始化api,连接前置机
api.init()
sleep(0.5)
print('mdtest: login')
# 登陆
loginReq = {} # 创建一个空字典
loginReq['UserID'] = user_id # 参数作为字典键值的方式传入
loginReq['Password'] = user_pass # 键名和C++中的结构体成员名对应
loginReq['BrokerID'] = broker_id
reqid = reqid + 1 # 请求数必须保持唯一性
i = api.reqUserLogin(loginReq, 1)
counter = 0
while (True):
if api.is_connected:
break
sleep(1)
counter += 1
print('waiting {}'.format(counter))
if counter > 10:
print('time expired, connect fail, auth fail')
exit(0)
## 登出,测试出错(无此功能)
#reqid = reqid + 1
#i = api.reqUserLogout({}, 1)
#sleep(0.5)
## 安全退出,测试通过
#i = api.exit()
## 获取交易日,目前输出为空
#day = api.getTradingDay()
#print 'Trading Day is:' + str(day)
#sleep(0.5)
## 订阅合约,测试通过
print('subscribe')
i = api.subscribeMarketData('sc1906')
## 退订合约,测试通过
#i = api.unSubscribeMarketData('IF1505')
# 订阅询价,测试通过
#i = api.subscribeForQuoteRsp('IO1504-C-3900')
# 退订询价,测试通过
#i = api.unSubscribeForQuoteRsp('IO1504-C-3900')
if __name__ == '__main__':
# 主程序
thread = Thread(target=main, args=())
thread.start()
| 26.351852 | 93 | 0.448349 |
0919569616587935efdf7974805bd00251126de0 | 675 | py | Python | PyMOTW/source/math/math_distance_2_points.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
] | 1 | 2019-01-04T05:47:50.000Z | 2019-01-04T05:47:50.000Z | PyMOTW/source/math/math_distance_2_points.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
] | 1 | 2020-07-18T03:52:03.000Z | 2020-07-18T04:18:01.000Z | PyMOTW/source/math/math_distance_2_points.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
] | 2 | 2021-03-06T04:28:32.000Z | 2021-03-06T04:59:17.000Z | #!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
"""Calculate the distance to a point.
"""
#end_pymotw_header
import math
print('{:^8} {:^8} {:^8} {:^8} {:^8}'.format(
'X1', 'Y1', 'X2', 'Y2', 'Distance',
))
print('{:-^8} {:-^8} {:-^8} {:-^8} {:-^8}'.format(
'', '', '', '', '',
))
POINTS = [
((5, 5), (6, 6)),
((-6, -6), (-5, -5)),
((0, 0), (3, 4)), # 3-4-5 triangle
((-1, -1), (2, 3)), # 3-4-5 triangle
]
for (x1, y1), (x2, y2) in POINTS:
x = x1 - x2
y = y1 - y2
h = math.hypot(x, y)
print('{:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f}'.format(
x1, y1, x2, y2, h,
))
| 20.454545 | 59 | 0.435556 |
b863b347758a25b2054e8bf7725c3eaad7bf89bb | 8,291 | py | Python | salt/states/zabbix_valuemap.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 19 | 2016-01-29T14:37:52.000Z | 2022-03-30T18:08:01.000Z | salt/states/zabbix_valuemap.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 223 | 2016-03-02T16:39:41.000Z | 2022-03-03T12:26:35.000Z | salt/states/zabbix_valuemap.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 64 | 2016-02-04T19:45:26.000Z | 2021-12-15T02:02:31.000Z | # -*- coding: utf-8 -*-
"""
.. versionadded:: 2017.7
Management of Zabbix Valuemap object over Zabbix API.
:codeauthor: Jakub Sliva <jakub.sliva@ultimum.io>
"""
from __future__ import absolute_import, unicode_literals
import json
import logging
try:
from salt.ext import six
from salt.exceptions import SaltException
IMPORTS_OK = True
except ImportError:
IMPORTS_OK = False
log = logging.getLogger(__name__)
def __virtual__():
"""
Only make these states available if Zabbix module and run_query function is available
and all 3rd party modules imported.
"""
if "zabbix.run_query" in __salt__ and IMPORTS_OK:
return True
return False, "Import zabbix or other needed modules failed."
def present(name, params, **kwargs):
"""
Creates Zabbix Value map object or if differs update it according defined parameters
:param name: Zabbix Value map name
:param params: Definition of the Zabbix Value map
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
.. code-block:: yaml
zabbix-valuemap-present:
zabbix_valuemap.present:
- name: Number mapping
- params:
mappings:
- value: 1
newvalue: one
- value: 2
newvalue: two
"""
zabbix_id_mapper = __salt__["zabbix.get_zabbix_id_mapper"]()
dry_run = __opts__["test"]
ret = {"name": name, "result": False, "comment": "", "changes": {}}
# Create input params substituting functions with their results
params["name"] = name
input_params = __salt__["zabbix.substitute_params"](params, **kwargs)
log.info(
"Zabbix Value map: input params: %s",
six.text_type(json.dumps(input_params, indent=4)),
)
search = {"output": "extend", "selectMappings": "extend", "filter": {"name": name}}
# GET Value map object if exists
valuemap_get = __salt__["zabbix.run_query"]("valuemap.get", search, **kwargs)
log.info(
"Zabbix Value map: valuemap.get result: %s",
six.text_type(json.dumps(valuemap_get, indent=4)),
)
existing_obj = (
__salt__["zabbix.substitute_params"](valuemap_get[0], **kwargs)
if valuemap_get and len(valuemap_get) == 1
else False
)
if existing_obj:
diff_params = __salt__["zabbix.compare_params"](input_params, existing_obj)
log.info(
"Zabbix Value map: input params: {%s",
six.text_type(json.dumps(input_params, indent=4)),
)
log.info(
"Zabbix Value map: Object comparison result. Differences: %s",
six.text_type(diff_params),
)
if diff_params:
diff_params[zabbix_id_mapper["valuemap"]] = existing_obj[
zabbix_id_mapper["valuemap"]
]
log.info(
"Zabbix Value map: update params: %s",
six.text_type(json.dumps(diff_params, indent=4)),
)
if dry_run:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{0}" would be fixed.'.format(name)
ret["changes"] = {
name: {
"old": 'Zabbix Value map "{0}" differs '
"in following parameters: {1}".format(name, diff_params),
"new": 'Zabbix Value map "{0}" would correspond to definition.'.format(
name
),
}
}
else:
valuemap_update = __salt__["zabbix.run_query"](
"valuemap.update", diff_params, **kwargs
)
log.info(
"Zabbix Value map: valuemap.update result: %s",
six.text_type(valuemap_update),
)
if valuemap_update:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{0}" updated.'.format(name)
ret["changes"] = {
name: {
"old": 'Zabbix Value map "{0}" differed '
"in following parameters: {1}".format(name, diff_params),
"new": 'Zabbix Value map "{0}" fixed.'.format(name),
}
}
else:
ret["result"] = True
ret[
"comment"
] = 'Zabbix Value map "{0}" already exists and corresponds to a definition.'.format(
name
)
else:
if dry_run:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{0}" would be created.'.format(name)
ret["changes"] = {
name: {
"old": 'Zabbix Value map "{0}" does not exist.'.format(name),
"new": 'Zabbix Value map "{0}" would be created '
"according definition.".format(name),
}
}
else:
# ACTION.CREATE
valuemap_create = __salt__["zabbix.run_query"](
"valuemap.create", input_params, **kwargs
)
log.info(
"Zabbix Value map: valuemap.create result: %s",
six.text_type(valuemap_create),
)
if valuemap_create:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{0}" created.'.format(name)
ret["changes"] = {
name: {
"old": 'Zabbix Value map "{0}" did not exist.'.format(name),
"new": 'Zabbix Value map "{0}" created according definition.'.format(
name
),
}
}
return ret
def absent(name, **kwargs):
"""
Makes the Zabbix Value map to be absent (either does not exist or delete it).
:param name: Zabbix Value map name
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
.. code-block:: yaml
zabbix-valuemap-absent:
zabbix_valuemap.absent:
- name: Value map name
"""
dry_run = __opts__["test"]
ret = {"name": name, "result": False, "comment": "", "changes": {}}
try:
object_id = __salt__["zabbix.get_object_id_by_params"](
"valuemap", {"filter": {"name": name}}, **kwargs
)
except SaltException:
object_id = False
if not object_id:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{0}" does not exist.'.format(name)
else:
if dry_run:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{0}" would be deleted.'.format(name)
ret["changes"] = {
name: {
"old": 'Zabbix Value map "{0}" exists.'.format(name),
"new": 'Zabbix Value map "{0}" would be deleted.'.format(name),
}
}
else:
valuemap_delete = __salt__["zabbix.run_query"](
"valuemap.delete", [object_id], **kwargs
)
if valuemap_delete:
ret["result"] = True
ret["comment"] = 'Zabbix Value map "{0}" deleted.'.format(name)
ret["changes"] = {
name: {
"old": 'Zabbix Value map "{0}" existed.'.format(name),
"new": 'Zabbix Value map "{0}" deleted.'.format(name),
}
}
return ret
| 35.431624 | 119 | 0.522494 |
aac0b33bdc978a47b057f8ae5ce42498535363fe | 521 | py | Python | landavailability/api/migrations/0007_auto_20161128_1727.py | alphagov/land-avilability-api | 048d4eed4caedb7b9f41caa5d69025506b2eb57d | [
"MIT"
] | 1 | 2017-07-24T17:00:34.000Z | 2017-07-24T17:00:34.000Z | landavailability/api/migrations/0007_auto_20161128_1727.py | alphagov/land-availability-api | 048d4eed4caedb7b9f41caa5d69025506b2eb57d | [
"MIT"
] | 23 | 2016-11-21T15:00:11.000Z | 2019-06-04T07:07:55.000Z | landavailability/api/migrations/0007_auto_20161128_1727.py | alphagov/land-avilability-api | 048d4eed4caedb7b9f41caa5d69025506b2eb57d | [
"MIT"
] | 4 | 2017-03-23T16:42:40.000Z | 2021-12-01T07:27:30.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-28 17:27
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0006_location'),
]
operations = [
migrations.AlterField(
model_name='location',
name='geom',
field=django.contrib.gis.db.models.fields.MultiPolygonField(geography=True, srid=4326),
),
]
| 23.681818 | 99 | 0.642994 |
7a65817d4d10eddefbfa50057dbd3b9b73049f27 | 2,937 | py | Python | data/p2DJ/New/program/qiskit/QC/startQiskit_QC12.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/program/qiskit/QC/startQiskit_QC12.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/program/qiskit/QC/startQiskit_QC12.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=2
# total number=5
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.cx(input_qubit[0],input_qubit[1]) # number=2
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.x(input_qubit[0]) # number=3
prog.x(input_qubit[0]) # number=4
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_belem")
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit_QC12.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 27.448598 | 82 | 0.621042 |
e126d7210746acb5c091d671fae0ab304445c7d4 | 3,184 | py | Python | tests/sentry/utils/test_types.py | uandco/sentry | 5b8d45cb71c6617dac8e64265848623fbfce9c99 | [
"BSD-3-Clause"
] | 2 | 2019-03-04T12:45:54.000Z | 2019-03-04T12:45:55.000Z | tests/sentry/utils/test_types.py | uandco/sentry | 5b8d45cb71c6617dac8e64265848623fbfce9c99 | [
"BSD-3-Clause"
] | 196 | 2019-06-10T08:34:10.000Z | 2022-02-22T01:26:13.000Z | tests/sentry/utils/test_types.py | uandco/sentry | 5b8d45cb71c6617dac8e64265848623fbfce9c99 | [
"BSD-3-Clause"
] | 1 | 2017-02-09T06:36:57.000Z | 2017-02-09T06:36:57.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from sentry.utils.types import (
InvalidTypeError,
Any,
Bool,
Int,
Float,
String,
Dict,
Sequence,
)
from unittest import TestCase
class OptionsTypesTest(TestCase):
def test_any(self):
assert Any('foo') == 'foo'
assert Any(1) == 1
assert Any(None) is None
assert Any() is None
assert Any.test(None)
assert Any.test('foo')
assert Any.test('bar')
def test_bool(self):
assert Bool(True) is True
assert Bool(1) is True
assert Bool('y') is True
assert Bool('YES') is True
assert Bool('t') is True
assert Bool('true') is True
assert Bool('1') is True
assert Bool('on') is True
assert Bool(False) is False
assert Bool(0) is False
assert Bool('n') is False
assert Bool('NO') is False
assert Bool('f') is False
assert Bool('false') is False
assert Bool('0') is False
assert Bool('off') is False
assert Bool() is False
assert Bool.test(None) is False
assert Bool(True) is True
assert Bool.test('foo') is False
with self.assertRaises(InvalidTypeError):
Bool('foo')
def test_int(self):
assert Int(1) == 1
assert Int('1') == 1
assert Int('-1') == -1
assert Int() == 0
with self.assertRaises(InvalidTypeError):
Int('foo')
with self.assertRaises(InvalidTypeError):
Int('1.1')
def test_float(self):
assert Float(1.0) == 1.0
assert Float('1') == 1.0
assert Float('-1.1') == -1.1
assert Float(1) == 1.0
assert Float() == 0.0
with self.assertRaises(InvalidTypeError):
Float('foo')
def test_string(self):
assert String('foo') == 'foo'
assert String(u'foo') == u'foo'
assert String() == u''
with self.assertRaises(InvalidTypeError):
String(0)
def test_dict(self):
assert Dict({}) == {}
assert Dict({'foo': 'bar'}) == {'foo': 'bar'}
assert Dict('{foo: bar}') == {'foo': 'bar'}
assert Dict() == {}
with self.assertRaises(InvalidTypeError):
assert Dict('[]')
with self.assertRaises(InvalidTypeError):
assert Dict([])
with self.assertRaises(InvalidTypeError):
assert Dict('')
with self.assertRaises(InvalidTypeError):
# malformed yaml/json
assert Dict('{foo:bar}')
def test_sequence(self):
assert Sequence(()) == ()
assert Sequence([]) == []
assert Sequence((1, 2, 3)) == (1, 2, 3)
assert Sequence([1, 2, 3]) == [1, 2, 3]
assert Sequence('[1,2,3]') == (1, 2, 3)
with self.assertRaises(InvalidTypeError):
Sequence('{}')
with self.assertRaises(InvalidTypeError):
Sequence({})
with self.assertRaises(InvalidTypeError):
Sequence('')
with self.assertRaises(InvalidTypeError):
# malformed yaml/json
Sequence('[1,')
| 29.481481 | 53 | 0.539573 |
5cb293386c1213e28470fcf24070bf9b82674a25 | 19,282 | py | Python | run_stanford_stages.py | Stanford-STAGES/stanford-stages | 819a32d0edba23917f867436c0dd32347af31515 | [
"CC-BY-4.0"
] | 51 | 2018-08-08T14:39:23.000Z | 2022-03-13T07:57:19.000Z | run_stanford_stages.py | Stanford-STAGES/stanford-stages | 819a32d0edba23917f867436c0dd32347af31515 | [
"CC-BY-4.0"
] | 30 | 2019-04-12T10:15:21.000Z | 2022-02-10T13:27:28.000Z | run_stanford_stages.py | Stanford-STAGES/stanford-stages | 819a32d0edba23917f867436c0dd32347af31515 | [
"CC-BY-4.0"
] | 23 | 2018-11-08T06:30:44.000Z | 2022-03-22T03:52:42.000Z | import sys, time, json, traceback
from collections import namedtuple
from pathlib import Path
import copy
import csv
import inf_tools
from inf_tools import print_log
import inf_narco_app as narcoApp
class MissingRequiredChannelError(Exception):
pass
class ConfigurationStagesError(Exception):
pass
class RunStagesError(Exception):
def __init__(self, message, edf_filename=''):
self.message = message
if isinstance(edf_filename, Path):
edf_filename = str(edf_filename)
self.edf_filename = edf_filename
def run_using_json_file(json_file: str):
output_subpath = 'stanford_stages'
if json_file is None:
print_log('Requires a json_file for input. Nothing done', 'error')
return
if not isinstance(json_file, Path):
json_file = Path(json_file)
if not json_file.is_file():
print_log(f'Could not locate json processing file ({str(json_file)}. Nothing done.', 'error')
return
with open(json_file, 'r') as fid:
json_dict: dict = json.load(fid)
# bypass looking for edf data in the case when channel indices are not provided, the channel labels are provided and
# they are explicitly set to 'None'.
bypass_edf_check = json_dict.get('bypass_edf_check', False) or \
'channel_indices' not in json_dict and 'channel_labels' in json_dict and all([label.lower() == 'none' for label in json_dict['channel_labels'].values()])
if bypass_edf_check:
# This will let us bypass the channel label configuration later without raising an exception.
print("Bypassing edf check")
json_dict['channel_indices'] = []
json_dict['bypass_edf_check'] = bypass_edf_check
'''
json_dict['channel_labels'] = {
'central3': json_dict.pop('C3'),
'central4': json_dict.pop('C4'),
'occipital1': json_dict.pop('O1'),
'occipital2': json_dict.pop('O2'),
'eog_l': json_dict.pop('EOG-L'),
'eog_r': json_dict.pop('EOG-R'),
'chin_emg': json_dict.pop('EMG')
}
'''
# Need to pop any keys here that are not found as part of inf_config class.
psg_path = None
if 'edf_pathname' in json_dict:
psg_path = json_dict.pop('edf_pathname')
if not bypass_edf_check:
edf_files = inf_tools.get_edf_filenames(psg_path)
else:
unwanted_edf_files = inf_tools.get_edf_files(psg_path)
# b = inf_tools.get_files_with_ext('F:\\jazz\\testing', 'h5')
h5_files = inf_tools.get_h5_files(psg_path)
if len(h5_files):
# determine basename of these files... note that we currently save .features and .hypnodensity files with .h5 suffx
# as well as encoding files
prefix_names = [b.stem.partition('.hypnodensity')[0].partition('.features')[0] for b in h5_files] # file1.hypnodensity.h5 and file1.h5 --> file1 and file1
# remove any duplicates that are found
prefix_names = list(set(prefix_names))
# then create mock-up .edf files with the remaining basenames, provided that they are not in the list of .edf files already.
edf_names = [b.stem.lower() for b in unwanted_edf_files]
p = Path(psg_path)
edf_files = []
for name in prefix_names:
if name.lower() not in edf_names:
edf_files.append(str(p / (name + '.edf')))
elif 'edf_filename' in json_dict:
edf_files = [json_dict.pop('edf_filename')]
elif 'edf_files' in json_dict:
edf_files = json_dict.pop('edf_files')
else:
print_log(f'No edf file or pathname specified in json file ({str(json_file)}. Nothing done.', 'error')
return -1
num_edfs = len(edf_files)
if num_edfs == 0:
if 'edf_pathname' is not None:
print_log(
f'{num_edfs} .edf files found at the edf_pathname ("{psg_path}") specified in "{json_file}"! '
f'Nothing to do.', 'error')
else:
print_log(f'{num_edfs} .edf files listed in json file ({json_file})! Nothing to do.', 'error')
return 0
if 'output_path' not in json_dict or len(json_dict['output_path'].strip()) == 0:
if 'output_path' in json_dict:
del (json_dict['output_path'])
# only need this psg_path to be guaranteed in this instance, where there is no output path
# specified. In this case, we don't know if we have a psg_path for sure, but we are
# guaranteed to have edf_files in a list.
psg_path = str(Path(edf_files[0]).parent)
output_path = Path(psg_path) / output_subpath
print_log('No output path speficied. Setting path to: ' + str(output_path), 'warning')
else:
output_path = Path(json_dict.pop('output_path'))
if not output_path.is_dir():
output_path.mkdir(parents=True)
if not output_path.is_dir():
print_log('Could not find or create output directory (' + str(output_path) + '! QUITTING', 'error')
return -1
else:
print_log('Created ' + str(output_path), 'debug')
else:
print_log('Found ' + str(output_path), 'debug')
if 'edf_pathname' in json_dict:
print_log(f'{num_edfs} .edf files found at the edf_pathname ("{psg_path}") specified in "{json_file}".', 'info')
else:
print_log(f'{num_edfs} .edf files listed in json file ({json_file}) for processing. '
f'Output folder is: {str(output_path)}\n')
# Put this back into our json configuration ...
json_dict['output_path'] = str(output_path)
# Check for .evt file(s) containing start/stop events to exclude from the analysis (e.g. bad data)
data_exclusion_key = 'exclusion_events_pathname'
data_exclusion_path = json_dict.get(data_exclusion_key, None)
if data_exclusion_path is not None:
data_exclusion_path = Path(data_exclusion_path)
if not data_exclusion_path.is_dir():
err_msg = f'A {data_exclusion_key} entry was found in the json file, but the path ("{str(data_exclusion_path)}") could not be found. Correct the pathname or remove it.'
print_log(err_msg, 'error')
# data_exclusion_path = None
raise ConfigurationStagesError(err_msg)
else:
print_log(f'Using "{str(data_exclusion_path)}" as path containing data exclusion event file(s).')
# Check for .csv file containing lights out/on information
lights_filename_key = 'lights_filename'
lights_edf_dict = {}
if lights_filename_key in json_dict:
lights_filename = json_dict[lights_filename_key].strip()
if lights_filename != "":
lights_filename = Path(lights_filename)
if not lights_filename.exists():
err_msg = f'Could not find the "{lights_filename_key}" key specified in the .json configuration file. Correct the filename or remove it: {str(lights_filename)}'
print_log(err_msg, 'error')
raise ConfigurationStagesError(err_msg)
else:
print_log(f'Loading lights off/on information from "{str(lights_filename)}')
lights_edf_dict: dict = load_lights_from_csv_file(lights_filename)
# Previously called run_with_edf_files() here
start_time = time.time()
pass_fail_dictionary = dict.fromkeys(edf_files, False)
# Fail with warning if no .edf files are found ?
# Lights on/off order of preference is
# 1. If there is a lights on/off file with an entry for the current .edf, its value is used
# 2. If that is missing, then the lights_off and lights_on keys will be used if they are listed in the .json configuration file
# 3. If this is missing, the default value will be None for each lights_off and lights_on field.
# A value of None is handled as no entry given which and the entire study will be used
# (i.e. lights out assumed to coincides with PSG start and lights on coincides with the end of the recording).
default_lights_off = json_dict.get("inf_config", {}).get("lights_off", None)
default_lights_on = json_dict.get("inf_config", {}).get("lights_on", None)
edf_files = sorted(edf_files)
for index, edfFile in enumerate(edf_files):
try: # ref: https://docs.python.org/3/tutorial/errors.html
edf_filename = Path(edfFile).name
msg = f'{index + 1:03d} / {num_edfs:03d}: {edf_filename}\t'
print_log(msg, 'STAGES')
# create a copy to avoid issues of making alteration below, such as channel indices ...
cur_json_dict = copy.deepcopy(json_dict)
# Give some flexibility to whether the .edf file name is given or just the basename (sans extension)
file_key = None
if edf_filename in lights_edf_dict:
file_key = edf_filename
elif edf_filename.partition('.')[0] in lights_edf_dict:
file_key = edf_filename.partition('.')[0]
if file_key is not None:
cur_json_dict["inf_config"]["lights_off"] = lights_edf_dict[file_key].get("lights_off", default_lights_off)
cur_json_dict["inf_config"]["lights_on"] = lights_edf_dict[file_key].get("lights_on", default_lights_on)
print_log(f"Lights off: {cur_json_dict['inf_config']['lights_off']}, Lights on: "
f"{cur_json_dict['inf_config']['lights_on']}")
if data_exclusion_path is not None:
# data_exclusion_filename = str(data_exclusion_path / (edf_filename.partition('.')[0] + '.evt'))
# cur_json_dict['bad_data_events'] = get_bad_data_events(data_exclusion_filename)
data_exclusion_file = data_exclusion_path / (edf_filename.partition('.')[0] + '.evt')
if data_exclusion_file.is_file():
log_msg = f'Data exclusion file found: {str(data_exclusion_file)}'
cur_json_dict["inf_config"]["bad_data_filename"] = str(data_exclusion_file)
else:
log_msg = f'No data exclusion file found for current study ({edf_filename}): {str(data_exclusion_file)}'
print_log(log_msg, 'info')
score, diagnosis_str = run_study(edfFile, json_configuration=cur_json_dict, bypass_edf_check=bypass_edf_check)
pass_fail_dictionary[edfFile] = True
if diagnosis_str is None:
result_str = 'Narcoleposy Diagnosis Not Performed'
else:
result_str = f'[run_stanford_stages.py] Score: {score:0.4f}. Diagnosis: {diagnosis_str}'
print_log(result_str, 'STAGES')
except KeyboardInterrupt:
print_log('\nUser cancel ...', 'info')
exit(0)
except (OSError, ValueError, AttributeError, TypeError) as err:
print_log("{0}: {1}".format(type(err).__name__, err), 'error')
traceback.print_exc()
except KeyError as err:
print_log("{0}: {1}".format(type(err).__name__, err), 'error')
except MissingRequiredChannelError as err:
print_log("Missing required channel(s):\n{0}".format(err), 'error')
except (RunStagesError, narcoApp.StanfordStagesError) as err:
print_log(f'{type(err).__name__}: {err.message} ({err.edf_filename})', 'error')
except IndexError as err:
print_log("{0}: {1}".format(type(err).__name__, err), 'error')
traceback.print_exc()
print_log('An IndexError may be raised if the application was previously run with a subset of all '
'16 models and is now using a greater or different selection of models. Try deleting the '
'cached hypnodensity.(pkl/h5) file(s) and run the software again to generate the '
'necessary hypnodensity information for the current configuration.')
except:
# print("Unexpected error:", sys.exc_info()[0])
print_log("Unexpected error " + str(sys.exc_info()[0]) + ": " + str(sys.exc_info()[1]), 'error')
traceback.print_exc()
# So many options in python for this
num_pass = sum(pass_fail_dictionary.values())
# numPass = [passFailDictionary.values()].count(True)
# numPass = len([t for t in passFailDictionary.values() if t])
num_fail = num_edfs - num_pass
elapsed_time = time.time() - start_time
print_log(f'{num_edfs} edf files processed in {elapsed_time / 60:0.1f} minutes.\n'
f'\t{num_pass} processed successfully\n\t{num_fail} had errors')
if num_fail > 0:
fail_index = 1
print_log('The following file(s) failed:', 'warning')
for (filename, passed) in pass_fail_dictionary.items():
if not passed:
print_log(filename)
fail_index = fail_index + 1
def run_study(edf_file, json_configuration: {}, bypass_edf_check: bool = False):
if not isinstance(edf_file, type(Path)):
edf_file = Path(edf_file)
if not bypass_edf_check and not edf_file.is_file():
err_msg = 'edf_file is not a file'
raise RunStagesError(err_msg, edf_file)
print_log("Processing {filename:s}".format(filename=str(edf_file)))
# display_set_selection(edf_channel_labels_found)
if 'inf_config' in json_configuration:
json_configuration['inf_config']['lights_off'] = edftime2elapsedseconds(edf_file, json_configuration['inf_config'].get('lights_off', None))
json_configuration['inf_config']['lights_on'] = edftime2elapsedseconds(edf_file, json_configuration['inf_config'].get('lights_on', None))
# Build up our dictionary / channel index mapping
if 'channel_indices' not in json_configuration:
label_dictionary = json_configuration.get("channel_labels", None)
if label_dictionary is None:
err_msg = 'Either "channel_indices" or "channel_labels" key is required in the json configuration. ' \
'Neither was found.'
raise RunStagesError(err_msg, edf_file)
edf_channel_indices_available = dict()
edf_channel_labels_found = inf_tools.get_channel_labels(edf_file)
for generic_label, edf_label in label_dictionary.items():
if isinstance(edf_label, list):
edf_label_set = set(edf_label)
edf_label_set = edf_label_set.intersection(edf_channel_labels_found)
if len(edf_label_set) > 0:
edf_channel_indices_available[generic_label] = edf_channel_labels_found.index(edf_label_set.pop())
else:
print_log('{0:s} not found'.format(generic_label), 'debug')
else:
if edf_label.lower().strip() == 'none' or edf_label.strip() == '':
continue
elif edf_label in edf_channel_labels_found:
edf_channel_indices_available[generic_label] = edf_channel_labels_found.index(edf_label)
else:
print_log('{0:s} not found'.format(edf_label), 'debug')
# Now we have prepped everything, so let's see if we actually have what we need or not.
can_continue, cannot_continue_msg = True, ''
if 'central3' not in edf_channel_indices_available and 'central4' not in edf_channel_indices_available:
cannot_continue_msg += 'Required central EEG channel missing.\n'
can_continue = False
if not ('occipital1' in edf_channel_indices_available or 'occipital2' in edf_channel_indices_available):
cannot_continue_msg += 'Required occipital EEG channel missing.\n'
can_continue = False
if 'eog_l' not in edf_channel_indices_available:
cannot_continue_msg += 'Required L-EOG channel is missing.\n'
can_continue = False
if 'eog_r' not in edf_channel_indices_available:
cannot_continue_msg += 'Required R-EOG channel is missing.\n'
can_continue = False
if 'chin_emg' not in edf_channel_indices_available:
cannot_continue_msg += 'Required chin EMG channel is missing.\n'
can_continue = False
if not can_continue:
print_log(cannot_continue_msg, 'debug')
raise MissingRequiredChannelError(cannot_continue_msg)
json_configuration["channel_indices"] = edf_channel_indices_available
return narcoApp.main(str(edf_file), json_configuration)
def get_bad_data_events(events_filename):
events_dict = {}
if not Path(events_filename).exists():
print_log(f"File containing events to exclude not found: {events_filename}", 'warning')
else:
with open(events_filename) as fid:
f_csv = csv.reader(fid)
for line in f_csv:
start = line[0]
stop = line[1]
if start not in events_dict or float(stop) > float(events_dict[start]):
events_dict[start] = stop
return events_dict
def load_lights_from_csv_file(lights_filename):
lights_dict: dict = {}
if not Path(lights_filename).exists():
print_log(f"Lights filename does not exist: {lights_filename}", 'warning')
else:
with open(lights_filename) as fid:
# f_csv = csv.DictReader(fid)
f_csv = csv.reader(fid)
# headings = next(f_csv)
#Row = namedtuple('Row', ['filename', 'lights_on', 'lights_off'])
for line in f_csv:
#row = Row(*line)
lights_dict[line[0]] = dict(zip(('lights_off', 'lights_on'), line[1:3]))
#lights_dict[row.filename] = {'lights_on': row.lights_on, 'lights_off': row.lights_off}
return lights_dict
def edftime2elapsedseconds(edf_file, time_value):
if isinstance(time_value, str) and ":" in time_value:
if edf_file is None or not Path(edf_file).exists():
raise(ValueError('Cannot convert time stamp to elapsed seconds from the study start because an EDF file, which contains the study start time, was not found.'))
else:
study_start_time_seconds = inf_tools.get_study_starttime_as_seconds(edf_file)
if study_start_time_seconds is None:
raise(RunStagesError('Unable to find start time for edf file'))
time_hh_mm_ss = time_value.split(':')
convert_hh_mm_ss = [3600, 60, 1, 0.001]
time_value_seconds = 0
for idx, value in enumerate(time_hh_mm_ss):
time_value_seconds = time_value_seconds + int(value)*convert_hh_mm_ss[idx]
elapsed_seconds = time_value_seconds - study_start_time_seconds
if elapsed_seconds < 0:
elapsed_seconds = elapsed_seconds+24*3600
else:
elapsed_seconds = time_value
return elapsed_seconds
def print_usage(tool_name='run_stanford_stages.py'):
print("Usage:\n\t", tool_name, " <json config file>")
if __name__ == '__main__':
args = sys.argv
if len(args) != 2:
print_usage(args[0])
else:
run_using_json_file(json_file=args[1])
| 48.205 | 181 | 0.643242 |
e808e0b1cb860b3a375d220062dc0559134d575f | 7,723 | py | Python | material/templatetags/material_form_internal.py | hansegucker/django-material | 5a3d47504569ba3d65250d5b75e8d9c7e63640b7 | [
"BSD-3-Clause"
] | null | null | null | material/templatetags/material_form_internal.py | hansegucker/django-material | 5a3d47504569ba3d65250d5b75e8d9c7e63640b7 | [
"BSD-3-Clause"
] | 8 | 2021-03-19T04:45:42.000Z | 2021-09-22T19:11:49.000Z | virtual/lib/python3.6/site-packages/material/templatetags/material_form_internal.py | tw8130/Awwards | d1296cdc26356443d6ec5869495eedb766ecbcf2 | [
"Unlicense"
] | null | null | null | from __future__ import division
import math
import re
import json
from collections import OrderedDict
import django
from django.db.models.query import QuerySet
from django.forms.forms import BoundField
from django.template import Library
from django.template.base import (
Node, TemplateSyntaxError, Variable, token_kwargs
)
from django.core.serializers.json import DjangoJSONEncoder
from django.utils import formats
from django.utils.html import escape
from ..base import Field
from ..widgets import SelectDateWidget
from .material_form import FormPartNode, WidgetAttrNode, _render_parts
if django.VERSION < (3,):
from django.utils.encoding import force_text
else:
from django.utils.encoding import force_str as force_text
register = Library()
@register.tag('render')
class FormRenderNode(Node):
"""Sugar for element in template rendering."""
def __init__(self, parser, token): # noqa D102
bits = token.split_contents()
if len(bits) == 0:
raise TemplateSyntaxError(
"%r received invalid args, expected one element for render."
" Got: %r".format(bits[0], bits[1:])
)
remaining_bits = bits[2:]
self.kwargs = token_kwargs(remaining_bits, parser)
if remaining_bits:
raise TemplateSyntaxError("%r received an invalid token: %r" %
(bits[0], remaining_bits[0]))
for key in self.kwargs:
if key not in ('template', 'widget'):
raise TemplateSyntaxError("%r received an invalid key: %r" %
(bits[0], key))
self.kwargs[key] = self.kwargs[key]
self.nodelist = parser.parse(('end{}'.format(bits[0]),))
parser.delete_first_token()
self.element = Variable(bits[1])
def render(self, context): # noqa D102
element = self.element.resolve(context)
options = {}
for key, value in self.kwargs.items():
options[key] = value.resolve(context)
# render inner parts
children = (
node for node in self.nodelist
if isinstance(node, FormPartNode)
)
_render_parts(context, children)
attrs = (
node for node in self.nodelist
if isinstance(node, WidgetAttrNode)
)
for attr in attrs:
attr.render(context)
# render element
if isinstance(element, BoundField):
return Field(element.name).render(context, **options)
elif hasattr(element, 'render'):
with context.push(parent=element):
return element.render(context, **options)
else:
raise TemplateSyntaxError(
"form_render can't render %r".format(element)
)
@register.filter
def multiwidget_value(bound_field, pos):
"""Subwidget value for MultiWidget."""
value = bound_field.value()
if not isinstance(value, (list, tuple)):
value = bound_field.field.widget.decompress(value)
return value[pos]
@register.filter
def have_default_choice(field):
"""Handle special case for SelectMultiple widget."""
return [
choice
for choice, _ in field.widget.choices
if choice is None or choice == ""
]
@register.filter
def jquery_datepicker_format(field):
"""Convert django input format to jquery datepicket format."""
input_format = field.input_formats[0]
# %a, %A, %z, %f %Z %j %U %W %c %x %X unsupported
subst = {
'%d': 'd', # Day of the month as a zero-padded decimal number
'%b': 'M', # Month as locale's abbreviated name
'%B': 'F', # Month as locale's full name
'%m': 'm', # Month as a zero-padded decimal number
'%y': 'y', # Year without century as a zero-padded decimal number
'%Y': 'Y', # Year with century as a decimal number
'%H': 'H', # Hour (24-hour clock) as a zero-padded decimal number
'%I': 'h', # Hour (12-hour clock) as a zero-padded decimal number
'%p': 'a', # Locale's equivalent of either AM or PM
'%M': 'i', # Minute as a zero-padded decimal number
'%S': 's', # Second as a zero-padded decimal number
'%%': '%' # A literal '%' character
}
return re.sub('|'.join(re.escape(key) for key in subst.keys()),
lambda k: subst[k.group(0)], input_format)
@register.filter
def datepicker_value(value, date_format):
"""Return localized date value."""
return formats.localize_input(value, date_format)
@register.filter('force_text')
def force_text_impl(value):
"""Coerce widget value to text."""
return force_text(value)
@register.filter
def split_choices_by_columns(choices, columns):
"""Split CheckboxSelectMultiple values into columns."""
columns = int(columns)
col_span = 12 // columns
per_column = int(math.ceil(len(choices) / columns))
choices = [tuple(choice) + (i,) for i, choice in enumerate(choices)]
return [
(col_span, choices[i:i + per_column])
for i in range(0, len(choices), per_column)
]
@register.filter
def select_date_widget_wrapper(bound_field):
"""Wrap SelectDateWidget into django-material internal wrapper."""
class Wrapper(object):
def __init__(self, bound_field):
self.bound_field = bound_field
@property
def selects(self):
widget = SelectDateWidget(self.bound_field.field.widget)
for data in widget.selects_data(self.bound_field.value()):
yield data
return Wrapper(bound_field)
@register.filter
def is_initial_file(value):
"""Check for initial value of FileFile."""
return bool(value and getattr(value, 'url', False))
@register.filter
def is_null_boolean_selected(bound_field, value):
"""Return NullBooleanField state."""
BOOL_VALUES = {True: True, False: False, 'true': True, 'false': False, '2': True, '3': False}
try:
current_value = BOOL_VALUES[value]
except KeyError:
current_value = None
return bound_field.value() == current_value
@register.filter
def select_options(bound_field):
"""
Return list of (group_name, option_label, option_value, selected).
If group_name is None - option is not belongs to group
"""
selected = bound_field.value()
if not isinstance(selected, (list, tuple, QuerySet)):
selected = [selected]
selected = set(force_text(v) for v in selected)
groups = OrderedDict()
for option in bound_field.field.widget.choices:
option_value, option_label = option[0], option[1]
if isinstance(option_label, (list, tuple)):
if option_value not in groups:
groups[option_value] = []
for value, label in option_label:
if value is None:
value = ''
value = force_text(value)
groups[option_value].append((label, value, value in selected))
else:
if None not in groups:
groups[None] = []
if option_value is None:
option_value = ''
value = force_text(option_value)
groups[None].append(
(option_label, option_value, value in selected)
)
return groups.items()
class JSONEncoder(DjangoJSONEncoder):
def default(self, obj):
if obj.__class__.__module__ == 'django.utils.functional':
return force_text(obj)
return json.JSONEncoder.default(self, obj)
@register.filter
def to_json_attr(obj):
return escape(json.dumps(obj, cls=JSONEncoder))
| 31.267206 | 97 | 0.622556 |
73bbcd560635e9d61b2583e2868593ebbf8e75ae | 260 | py | Python | script1.py | YohannesGetu/Personal-Website-with-Python | d4d43972c3d8844d341f275996afbd2324e33211 | [
"MIT"
] | null | null | null | script1.py | YohannesGetu/Personal-Website-with-Python | d4d43972c3d8844d341f275996afbd2324e33211 | [
"MIT"
] | null | null | null | script1.py | YohannesGetu/Personal-Website-with-Python | d4d43972c3d8844d341f275996afbd2324e33211 | [
"MIT"
] | null | null | null | from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def home():
return render_template("home.html")
@app.route('/about')
def about():
return render_template("about.html")
if __name__ == "__main__":
app.run(debug=True) | 18.571429 | 40 | 0.688462 |
a2e59ef93363978c04a1951422efd4e6c66f6789 | 1,376 | py | Python | dags/tutorial.py | leofontesnascimento/simple-airflow | f713b818165914e0473d804066d64d6856b09c7d | [
"Apache-2.0"
] | null | null | null | dags/tutorial.py | leofontesnascimento/simple-airflow | f713b818165914e0473d804066d64d6856b09c7d | [
"Apache-2.0"
] | null | null | null | dags/tutorial.py | leofontesnascimento/simple-airflow | f713b818165914e0473d804066d64d6856b09c7d | [
"Apache-2.0"
] | null | null | null | """
Code that goes along with the Airflow tutorial located at:
https://github.com/apache/airflow/blob/master/airflow/example_dags/tutorial.py
"""
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import datetime, timedelta
default_args = {
'owner': 'Airflow',
'depends_on_past': False,
'start_date': datetime(2015, 6, 1),
'email': ['airflow@example.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
dag = DAG('tutorial', default_args=default_args,
schedule_interval=timedelta(days=1))
# t1, t2 and t3 are examples of tasks created by instantiating operators
t1 = BashOperator(
task_id='print_date',
bash_command='date',
dag=dag)
t2 = BashOperator(
task_id='sleep',
bash_command='sleep 5',
retries=3,
dag=dag)
templated_command = """
{% for i in range(5) %}
echo "{{ ds }}"
echo "{{ macros.ds_add(ds, 7)}}"
echo "{{ params.my_param }}"
{% endfor %}
"""
t3 = BashOperator(
task_id='templated',
bash_command=templated_command,
params={'my_param': 'Parameter I passed in'},
dag=dag)
t2.set_upstream(t1)
t3.set_upstream(t1)
| 24.571429 | 78 | 0.65189 |
ada2e7bc20fafdd0f292090a7307ba3f739035d6 | 1,065 | py | Python | abm-swarm.py | RachidStat/PyCX | a1a597e61d03b25cf138dd11ab136db8202e1243 | [
"BSD-2-Clause-FreeBSD"
] | 176 | 2019-12-18T11:44:28.000Z | 2022-03-27T09:09:33.000Z | abm-swarm.py | RachidStat/PyCX | a1a597e61d03b25cf138dd11ab136db8202e1243 | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2020-03-29T00:51:25.000Z | 2020-07-19T11:08:32.000Z | abm-swarm.py | RachidStat/PyCX | a1a597e61d03b25cf138dd11ab136db8202e1243 | [
"BSD-2-Clause-FreeBSD"
] | 56 | 2019-12-18T19:04:12.000Z | 2022-03-22T09:35:33.000Z | import pycxsimulator
from pylab import *
class agent:
def __init__(self):
self.x = rand(2)
self.v = rand(2) - array([0.5, 0.5])
self.xs = [self.x[0]]
self.ys = [self.x[1]]
def accelerate(self):
c = mean([a.x for a in agents])
f = 0.5 * (c - self.x) / norm(c - self.x)
self.v += f # accelerating toward the center of mass
def move(self):
self.x += self.v
self.xs.append(self.x[0])
self.ys.append(self.x[1])
if len(self.xs) > 10:
del self.xs[0]
del self.ys[0]
def initialize():
global agents
agents = [agent() for _ in range(50)]
def observe():
cla()
for a in agents:
plot([a.x[0]], [a.x[1]], 'g.') # drawing current position
plot(a.xs, a.ys, 'b', alpha = 0.1) # drawing trajectory as well
axis('scaled')
def update():
global agents
for a in agents:
a.accelerate()
for a in agents:
a.move()
pycxsimulator.GUI().start(func=[initialize, observe, update])
| 25.97561 | 71 | 0.528638 |
8cae4157f01a489785edf029548e0c8207beee1e | 7,307 | py | Python | sos4hjb/test/optimization/test_sos_program.py | TobiaMarcucci/sos4hjb | d8bd5c0179891ff09f11be48777bef148d952a2d | [
"MIT"
] | 3 | 2020-07-05T17:36:06.000Z | 2021-11-20T10:41:58.000Z | sos4hjb/test/optimization/test_sos_program.py | TobiaMarcucci/sos4hjb | d8bd5c0179891ff09f11be48777bef148d952a2d | [
"MIT"
] | null | null | null | sos4hjb/test/optimization/test_sos_program.py | TobiaMarcucci/sos4hjb | d8bd5c0179891ff09f11be48777bef148d952a2d | [
"MIT"
] | 1 | 2022-01-25T06:39:56.000Z | 2022-01-25T06:39:56.000Z | import unittest
import numpy as np
from sos4hjb.polynomials import (Variable, MonomialVector, ChebyshevVector,
Polynomial)
def make_test_sos_program(SosProgram):
Vectors = (MonomialVector, ChebyshevVector)
class TestSosProgram(unittest.TestCase):
x = Variable.multivariate('x', 2)
zero = {xi: 0 for xi in x}
one = {xi: 1 for xi in x}
two = {xi: 2 for xi in x}
def test_new_free_polynomial(self):
for Vector in Vectors:
# Fit free polynomial in 3 points.
prog = SosProgram()
basis = Vector.construct_basis(self.x, 3)
poly, coef = prog.add_polynomial(basis)
prog.add_linear_constraint(poly(self.zero) == 0)
prog.add_linear_constraint(poly(self.one) == 1)
prog.add_linear_constraint(poly(self.two) == 2)
prog.solve()
coef_opt = prog.substitute_minimizer(coef)
poly_opt = prog.substitute_minimizer(poly)
self.assertAlmostEqual(poly_opt(self.zero), 0, places=4)
self.assertAlmostEqual(poly_opt(self.one), 1, places=4)
self.assertAlmostEqual(poly_opt(self.two), 2, places=4)
def test_new_sos_polynomial(self):
# Fit free polynomial in 2 points, and minimize value at a third.
for Vector in Vectors:
# Normal polynomial.
prog = SosProgram()
basis = Vector.construct_basis(self.x, 3)
poly, gram, cons = prog.add_sos_polynomial(basis)
prog.add_linear_cost(poly(self.zero))
prog.add_linear_constraint(poly(self.one) == 1)
prog.add_linear_constraint(poly(self.two) == 2)
prog.solve()
poly_opt = prog.substitute_minimizer(poly)
self.assertAlmostEqual(prog.minimum(), 0, places=4)
self.assertAlmostEqual(poly_opt(self.zero), 0, places=4)
self.assertAlmostEqual(poly_opt(self.one), 1, places=4)
self.assertAlmostEqual(poly_opt(self.two), 2, places=4)
# Reconstruct polynomial from Gram matrix.
gram_opt = prog.substitute_minimizer(gram)
self.assertTrue(self._is_psd(gram_opt))
poly_opt_gram = Polynomial.quadratic_form(basis, gram_opt)
self.assertAlmostEqual(poly_opt, poly_opt_gram)
# Even polynomial.
prog = SosProgram()
poly, gram, cons = prog.add_even_sos_polynomial(basis)
prog.add_linear_cost(poly(self.zero))
prog.add_linear_constraint(poly(self.one) == 1)
prog.add_linear_constraint(poly(self.two) == 2)
prog.solve()
poly_opt = prog.substitute_minimizer(poly)
self.assertAlmostEqual(prog.minimum(), 0, places=4)
self.assertAlmostEqual(poly_opt(self.zero), 0, places=4)
self.assertAlmostEqual(poly_opt(self.one), 1, places=4)
self.assertAlmostEqual(poly_opt(self.two), 2, places=4)
# Reconstruct polynomial from Gram matrices.
gram_opt_e, gram_opt_o = [prog.substitute_minimizer(gi) for gi in gram]
self.assertTrue(self._is_psd(gram_opt_e))
self.assertTrue(self._is_psd(gram_opt_o))
basis_e = [v for v in basis if v.is_even()]
basis_o = [v for v in basis if v.is_odd()]
poly_opt_gram = Polynomial.quadratic_form(basis_e, gram_opt_e)
poly_opt_gram += Polynomial.quadratic_form(basis_o, gram_opt_o)
self.assertAlmostEqual(poly_opt, poly_opt_gram, places=4)
def test_add_sos_constraint(self):
# Fit free polynomial in 2 points, and minimize value at a third.
for Vector in Vectors:
# Normal polynomial.
prog = SosProgram()
basis = Vector.construct_basis(self.x, 6)
poly, coef = prog.add_polynomial(basis)
gram = prog.add_sos_constraint(poly)[1]
prog.add_linear_cost(poly(self.zero))
prog.add_linear_constraint(poly(self.one) == 1)
prog.add_linear_constraint(poly(self.two) == 2)
prog.solve()
poly_opt = prog.substitute_minimizer(poly)
self.assertAlmostEqual(prog.minimum(), 0, places=4)
self.assertAlmostEqual(poly_opt(self.zero), 0, places=4)
self.assertAlmostEqual(poly_opt(self.one), 1, places=4)
self.assertAlmostEqual(poly_opt(self.two), 2, places=4)
# Reconstruct polynomial from Gram matrix.
gram_opt = prog.substitute_minimizer(gram)
self.assertTrue(self._is_psd(gram_opt))
basis_half = Vector.construct_basis(self.x, 3)
poly_opt_gram = Polynomial.quadratic_form(basis_half, gram_opt)
self.assertAlmostEqual(poly_opt, poly_opt_gram)
# Even polynomial.
prog = SosProgram()
basis = Vector.construct_basis(self.x, 6, odd=False)
poly, coef = prog.add_polynomial(basis)
gram = prog.add_sos_constraint(poly)[1]
prog.add_linear_cost(poly(self.zero))
prog.add_linear_constraint(poly(self.one) == 1)
prog.add_linear_constraint(poly(self.two) == 2)
prog.solve()
poly_opt = prog.substitute_minimizer(poly)
self.assertAlmostEqual(prog.minimum(), 0, places=4)
self.assertAlmostEqual(poly_opt(self.zero), 0, places=4)
self.assertAlmostEqual(poly_opt(self.one), 1, places=4)
self.assertAlmostEqual(poly_opt(self.two), 2, places=4)
# Reconstruct polynomial from Gram matrices.
gram_opt_e, gram_opt_o = [prog.substitute_minimizer(gi) for gi in gram]
self.assertTrue(self._is_psd(gram_opt_e))
self.assertTrue(self._is_psd(gram_opt_o))
basis = Vector.construct_basis(self.x, 3)
basis_e = [v for v in basis if v.is_even()]
basis_o = [v for v in basis if v.is_odd()]
poly_opt_gram = Polynomial.quadratic_form(basis_e, gram_opt_e)
poly_opt_gram += Polynomial.quadratic_form(basis_o, gram_opt_o)
self.assertAlmostEqual(poly_opt, poly_opt_gram, places=4)
# Polynomial of odd degree.
prog = SosProgram()
basis = Vector.construct_basis(self.x, 3)
poly, c = prog.add_polynomial(basis)
with self.assertRaises(ValueError):
prog.add_sos_constraint(poly)
# Polynomial of length 0.
prog = SosProgram()
poly = Polynomial({})
with self.assertRaises(ValueError):
prog.add_sos_constraint(poly)
@staticmethod
def _is_psd(A, tol=1e-7):
return all(np.linalg.eig(A)[0] > - tol)
return TestSosProgram
| 47.141935 | 87 | 0.577255 |
d9ae6b271044f6c58630540c91b26185baf85a6e | 2,669 | py | Python | CIS106 Projects/DecrypterCode.py | TommyT2021/TommyT2021 | bb806cc69df4e073df1d15f5f37992dcfe5f960d | [
"MIT"
] | null | null | null | CIS106 Projects/DecrypterCode.py | TommyT2021/TommyT2021 | bb806cc69df4e073df1d15f5f37992dcfe5f960d | [
"MIT"
] | null | null | null | CIS106 Projects/DecrypterCode.py | TommyT2021/TommyT2021 | bb806cc69df4e073df1d15f5f37992dcfe5f960d | [
"MIT"
] | null | null | null | #the Shift Dictionary and Apply Shift
class Message(object):
def __init__(self, text):
self.message_text = text
self.valid_words = load_words(WORDLIST_FILENAME)
def get_message_text(self):
return self.message_text
def get_valid_words(self):
return self.valid_words[:]
def build_shift_dict(self, shift):
shifted = {}
abc = string.ascii_letters
for letter in abc:
index = abc.find(letter.lower())
shifted_index = (shift + index) % 26
nletter = abc[shifted_index]
if letter == letter.upper():
nletter = nletter.upper()
shifted[letter] = nletter
return shifted
def apply_shift(self, shift):
nmessage = ""
shifted = self.build_shift_dict(shift)
getText = Message.get_message_text(self)
for character in getText:
if character in getText:
if character in shifted:
nmessage += shifted[character]
else:
nmessage += character
return nmessage
#PlaintextMessage
class PlaintextMessage(Message):
def __init__(self, text, shift):
self.message_text = text
self.shift = shift
self.encrypting_dict = self.build_shift_dict(shift)
self.message_text_encrypted = self.apply_shift(shift)
def get_shift(self):
return self.shift
def get_encrypting_dict(self):
return self.encrypting_dict.copy()
def get_message_text_encrypted(self):
return self.message_text_encrypted
def change_shift(self, shift):
self.shift = shift
self.encrypting_dict = self.build_shift_dict(shift)
self.message_text_encrypted = self.apply_shift(shift)
#CiphertextMessage
class CiphertextMessage(Message):
def __init__(self, text):
self.message_text = text
self.valid_words = load_words(WORDLIST_FILENAME)
def decrypt_message(self):
bestS = 0
mDW = 0
for i in range (26):
decryptedM = self.apply_shift(i)
decryptedW = decryptedM.split()
validW = 0
for w in decryptedW:
if is_word(self.valid_words, w):
validW +=1
if validW > mDW:
mDW = validW
bestS = i
return (bestS, self.apply_shift(bestS))
#Decrypt a Story
def decrypt_story():
story = CiphertextMessage(get_story_string())
return story.decrypt_message()
| 31.4 | 62 | 0.579618 |
947087f4db02eb92c0de4e759cfa1f16a102ceec | 19,952 | py | Python | jinja/lexer.py | dcramer/jinja1-djangosupport | 755287f155c18ccabe69f1318bacdaca14f55da3 | [
"BSD-3-Clause"
] | 2 | 2015-09-24T19:53:35.000Z | 2015-11-06T10:47:02.000Z | jinja/lexer.py | dcramer/jinja1-djangosupport | 755287f155c18ccabe69f1318bacdaca14f55da3 | [
"BSD-3-Clause"
] | null | null | null | jinja/lexer.py | dcramer/jinja1-djangosupport | 755287f155c18ccabe69f1318bacdaca14f55da3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
jinja.lexer
~~~~~~~~~~~
This module implements a Jinja / Python combination lexer. The
`Lexer` class provided by this module is used to do some preprocessing
for Jinja.
On the one hand it filters out invalid operators like the bitshift
operators we don't allow in templates. On the other hand it separates
template code and python code in expressions.
Because of some limitations in the compiler package which are just
natural but annoying for Jinja, the lexer also "escapes" non names that
are not keywords. The Jinja parser then removes those escaping marks
again.
This is required in order to make "class" and some other python keywords
we don't use valid identifiers.
:copyright: 2007-2008 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import re
import unicodedata
from jinja.datastructure import TokenStream, Token
from jinja.exceptions import TemplateSyntaxError
from jinja.utils import set, sorted
from weakref import WeakValueDictionary
__all__ = ['Lexer', 'Failure', 'keywords']
# cache for the lexers. Exists in order to be able to have multiple
# environments with the same lexer
_lexer_cache = WeakValueDictionary()
# static regular expressions
whitespace_re = re.compile(r'\s+(?um)')
name_re = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*')
string_re = re.compile(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)")(?ms)')
integer_re = re.compile(r'\d+')
float_re = re.compile(r'\d+\.\d+')
regex_re = re.compile(r'\@/([^/\\]*(?:\\.[^/\\]*)*)*/[a-z]*(?ms)')
# set of used keywords
keywords = set(['and', 'block', 'cycle', 'elif', 'else', 'endblock',
'endfilter', 'endfor', 'endif', 'endmacro', 'endraw',
'endtrans', 'extends', 'filter', 'for', 'if', 'in',
'include', 'is', 'macro', 'not', 'or', 'pluralize', 'raw',
'recursive', 'set', 'trans', 'print', 'call', 'endcall'])
# bind operators to token types
operators = {
'+': 'add',
'-': 'sub',
'/': 'div',
'//': 'floordiv',
'*': 'mul',
'%': 'mod',
'**': 'pow',
'~': 'tilde',
'!': 'bang',
'@': 'at',
'[': 'lbracket',
']': 'rbracket',
'(': 'lparen',
')': 'rparen',
'{': 'lbrace',
'}': 'rbrace',
'==': 'eq',
'!=': 'ne',
'>': 'gt',
'>=': 'gteq',
'<': 'lt',
'<=': 'lteq',
'=': 'assign',
'.': 'dot',
':': 'colon',
'|': 'pipe',
',': 'comma'
}
reverse_operators = dict([(v, k) for k, v in operators.iteritems()])
assert len(operators) == len(reverse_operators), 'operators dropped'
operator_re = re.compile('(%s)' % '|'.join([re.escape(x) for x in
sorted(operators, key=lambda x: -len(x))]))
simple_escapes = {
'a': '\a',
'n': '\n',
'r': '\r',
'f': '\f',
't': '\t',
'v': '\v',
'\\': '\\',
'"': '"',
"'": "'",
'0': '\x00'
}
unicode_escapes = {
'x': 2,
'u': 4,
'U': 8
}
def unescape_string(lineno, filename, s):
r"""
Unescape a string. Supported escapes:
\a, \n, \r\, \f, \v, \\, \", \', \0
\x00, \u0000, \U00000000, \N{...}
Not supported are \101 because imho redundant.
"""
result = []
write = result.append
chariter = iter(s)
next_char = chariter.next
# faster lookup
sescapes = simple_escapes
uescapes = unicode_escapes
try:
for char in chariter:
if char == '\\':
char = next_char()
if char in sescapes:
write(sescapes[char])
elif char in uescapes:
seq = [next_char() for x in xrange(uescapes[char])]
try:
write(unichr(int(''.join(seq), 16)))
except ValueError:
raise TemplateSyntaxError('invalid unicode codepoint',
lineno, filename)
elif char == 'N':
if next_char() != '{':
raise TemplateSyntaxError('no name for codepoint',
lineno, filename)
seq = []
while 1:
char = next_char()
if char == '}':
break
seq.append(char)
try:
write(unicodedata.lookup(u''.join(seq)))
except KeyError:
raise TemplateSyntaxError('unknown character name',
lineno, filename)
else:
write('\\' + char)
else:
write(char)
except StopIteration:
raise TemplateSyntaxError('invalid string escape', lineno, filename)
return u''.join(result)
def unescape_regex(s):
"""
Unescape rules for regular expressions.
"""
buffer = []
write = buffer.append
in_escape = False
for char in s:
if in_escape:
in_escape = False
if char not in safe_chars:
write('\\' + char)
continue
write(char)
return u''.join(buffer)
class Failure(object):
"""
Class that raises a `TemplateSyntaxError` if called.
Used by the `Lexer` to specify known errors.
"""
def __init__(self, message, cls=TemplateSyntaxError):
self.message = message
self.error_class = cls
def __call__(self, lineno, filename):
raise self.error_class(self.message, lineno, filename)
class LexerMeta(type):
"""
Metaclass for the lexer that caches instances for
the same configuration in a weak value dictionary.
"""
def __call__(cls, environment):
key = hash((environment.block_start_string,
environment.block_end_string,
environment.variable_start_string,
environment.variable_end_string,
environment.comment_start_string,
environment.comment_end_string,
environment.trim_blocks))
# use the cached lexer if possible
if key in _lexer_cache:
return _lexer_cache[key]
# create a new lexer and cache it
lexer = type.__call__(cls, environment)
_lexer_cache[key] = lexer
return lexer
class Lexer(object):
"""
Class that implements a lexer for a given environment. Automatically
created by the environment class, usually you don't have to do that.
Note that the lexer is not automatically bound to an environment.
Multiple environments can share the same lexer.
"""
__metaclass__ = LexerMeta
def __init__(self, environment):
# shortcuts
c = lambda x: re.compile(x, re.M | re.S)
e = re.escape
# lexing rules for tags
tag_rules = [
(whitespace_re, None, None),
(float_re, 'float', None),
(integer_re, 'integer', None),
(name_re, 'name', None),
(string_re, 'string', None),
(regex_re, 'regex', None),
(operator_re, 'operator', None)
]
#: if variables and blocks have the same delimiters we won't
#: receive any variable blocks in the parser. This variable is `True`
#: if we need that.
self.no_variable_block = (
(environment.variable_start_string is
environment.variable_end_string is None) or
(environment.variable_start_string ==
environment.block_start_string and
environment.variable_end_string ==
environment.block_end_string)
)
# assamble the root lexing rule. because "|" is ungreedy
# we have to sort by length so that the lexer continues working
# as expected when we have parsing rules like <% for block and
# <%= for variables. (if someone wants asp like syntax)
# variables are just part of the rules if variable processing
# is required.
root_tag_rules = [
('comment', environment.comment_start_string),
('block', environment.block_start_string)
]
if not self.no_variable_block:
root_tag_rules.append(('variable',
environment.variable_start_string))
root_tag_rules.sort(lambda a, b: cmp(len(b[1]), len(a[1])))
# block suffix if trimming is enabled
block_suffix_re = environment.trim_blocks and '\\n?' or ''
# global lexing rules
self.rules = {
'root': [
# directives
(c('(.*?)(?:%s)' % '|'.join(
['(?P<raw_begin>(?:\s*%s\-|%s)\s*raw\s*%s)' % (
e(environment.block_start_string),
e(environment.block_start_string),
e(environment.block_end_string)
)] + [
'(?P<%s_begin>\s*%s\-|%s)' % (n, e(r), e(r))
for n, r in root_tag_rules
])), ('data', '#bygroup'), '#bygroup'),
# data
(c('.+'), 'data', None)
],
# comments
'comment_begin': [
(c(r'(.*?)((?:\-%s\s*|%s)%s)' % (
e(environment.comment_end_string),
e(environment.comment_end_string),
block_suffix_re
)), ('comment', 'comment_end'), '#pop'),
(c('(.)'), (Failure('Missing end of comment tag'),), None)
],
# blocks
'block_begin': [
(c('(?:\-%s\s*|%s)%s' % (
e(environment.block_end_string),
e(environment.block_end_string),
block_suffix_re
)), 'block_end', '#pop'),
] + tag_rules,
# raw block
'raw_begin': [
(c('(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % (
e(environment.block_start_string),
e(environment.block_start_string),
e(environment.block_end_string),
e(environment.block_end_string),
block_suffix_re
)), ('data', 'raw_end'), '#pop'),
(c('(.)'), (Failure('Missing end of raw directive'),), None)
]
}
# only add the variable rules to the list if we process variables
# the variable_end_string variable could be None and break things.
if not self.no_variable_block:
self.rules['variable_begin'] = [
(c('\-%s\s*|%s' % (
e(environment.variable_end_string),
e(environment.variable_end_string)
)), 'variable_end', '#pop')
] + tag_rules
def tokenize(self, source, filename=None):
"""
Works like `tokeniter` but returns a tokenstream of tokens and not a
generator or token tuples. Additionally all token values are already
converted into types and postprocessed. For example keywords are
already keyword tokens, not named tokens, comments are removed,
integers and floats converted, strings unescaped etc.
"""
def generate():
for lineno, token, value in self.tokeniter(source, filename):
if token in ('comment_begin', 'comment', 'comment_end'):
continue
elif token == 'data':
try:
value = str(value)
except UnicodeError:
pass
elif token == 'name':
value = str(value)
if value in keywords:
token = value
value = ''
elif token == 'string':
value = unescape_string(lineno, filename, value[1:-1])
try:
value = str(value)
except UnicodeError:
pass
elif token == 'regex':
args = value[value.rfind('/') + 1:]
value = unescape_regex(value[2:-(len(args) + 1)])
if args:
value = '(?%s)%s' % (args, value)
elif token == 'integer':
value = int(value)
elif token == 'float':
value = float(value)
elif token == 'operator':
token = operators[value]
value = ''
yield Token(lineno, token, value)
return TokenStream(generate(), filename)
def tokeniter(self, source, filename=None):
"""
This method tokenizes the text and returns the tokens in a generator.
Use this method if you just want to tokenize a template. The output
you get is not compatible with the input the jinja parser wants. The
parser uses the `tokenize` function with returns a `TokenStream` and
keywords instead of just names.
"""
source = '\n'.join(source.splitlines())
pos = 0
lineno = 1
stack = ['root']
statetokens = self.rules['root']
source_length = len(source)
balancing_stack = []
while True:
# tokenizer loop
for regex, tokens, new_state in statetokens:
m = regex.match(source, pos)
# if no match we try again with the next rule
if not m:
continue
# we only match blocks and variables if brances / parentheses
# are balanced. continue parsing with the lower rule which
# is the operator rule. do this only if the end tags look
# like operators
if balancing_stack and \
tokens in ('variable_end', 'block_end'):
continue
# tuples support more options
if isinstance(tokens, tuple):
for idx, token in enumerate(tokens):
# hidden group
if token is None:
g = m.group(idx)
if g:
lineno += g.count('\n')
continue
# failure group
elif token.__class__ is Failure:
raise token(lineno, filename)
# bygroup is a bit more complex, in that case we
# yield for the current token the first named
# group that matched
elif token == '#bygroup':
for key, value in m.groupdict().iteritems():
if value is not None:
yield lineno, key, value
lineno += value.count('\n')
break
else:
raise RuntimeError('%r wanted to resolve '
'the token dynamically'
' but no group matched'
% regex)
# normal group
else:
data = m.group(idx + 1)
if data:
yield lineno, token, data
lineno += data.count('\n')
# strings as token just are yielded as it, but just
# if the data is not empty
else:
data = m.group()
# update brace/parentheses balance
if tokens == 'operator':
if data == '{':
balancing_stack.append('}')
elif data == '(':
balancing_stack.append(')')
elif data == '[':
balancing_stack.append(']')
elif data in ('}', ')', ']'):
if not balancing_stack:
raise TemplateSyntaxError('unexpected "%s"' %
data, lineno,
filename)
expected_op = balancing_stack.pop()
if expected_op != data:
raise TemplateSyntaxError('unexpected "%s", '
'expected "%s"' %
(data, expected_op),
lineno, filename)
# yield items
if tokens is not None:
if data:
yield lineno, tokens, data
lineno += data.count('\n')
# fetch new position into new variable so that we can check
# if there is a internal parsing error which would result
# in an infinite loop
pos2 = m.end()
# handle state changes
if new_state is not None:
# remove the uppermost state
if new_state == '#pop':
stack.pop()
# resolve the new state by group checking
elif new_state == '#bygroup':
for key, value in m.groupdict().iteritems():
if value is not None:
stack.append(key)
break
else:
raise RuntimeError('%r wanted to resolve the '
'new state dynamically but'
' no group matched' %
regex)
# direct state name given
else:
stack.append(new_state)
statetokens = self.rules[stack[-1]]
# we are still at the same position and no stack change.
# this means a loop without break condition, avoid that and
# raise error
elif pos2 == pos:
raise RuntimeError('%r yielded empty string without '
'stack change' % regex)
# publish new function and start again
pos = pos2
break
# if loop terminated without break we havn't found a single match
# either we are at the end of the file or we have a problem
else:
# end of text
if pos >= source_length:
return
# something went wrong
raise TemplateSyntaxError('unexpected char %r at %d' %
(source[pos], pos), lineno,
filename)
| 38.44316 | 78 | 0.458651 |
4261b8a4c5a0d64f273826d7c36deb3b6b156eba | 1,870 | py | Python | Projetos/Image_converter/source/image_convert.py | JoaoEmanuell/Meus-Estudos-Python | f6f6eeb6016919e594613785ffe7136d74241ada | [
"MIT"
] | 2 | 2021-07-29T18:58:02.000Z | 2021-10-29T21:11:22.000Z | Projetos/Image_converter/source/image_convert.py | JoaoEmanuell/Meus-Estudos-Python | f6f6eeb6016919e594613785ffe7136d74241ada | [
"MIT"
] | null | null | null | Projetos/Image_converter/source/image_convert.py | JoaoEmanuell/Meus-Estudos-Python | f6f6eeb6016919e594613785ffe7136d74241ada | [
"MIT"
] | null | null | null | from PIL import Image
from pathlib import Path
class ImageConvert():
def __init__(self, IMAGENAME, NEWIMAGEFORMAT) -> None:
"""[Convert the desired image to the desired format.]
Args:
IMAGENAME ([str]): [Name of the image to be converted]
NEWIMAGEFORMAT ([str]): [Format the image will be saved]
"""
self.PATH = Path().absolute()
self.verfiy_if_converted_image_exists()
self.IMS = f'{self.PATH}/converted_images/'
self.name = str(IMAGENAME)
self.NEW_IMAGE_FORMAT = str(NEWIMAGEFORMAT)
self.IMAGE_FORMAT = self.VerifyExtensionFile(self.name.lower())
def convertImage(self):
"""[Convert the desired image.]
Returns:
[Int]: [Returns 0 if the conversion is successful, otherwise returns 1.]
"""
try :
with Image.open(self.name) as im:
self.name = self.name.replace(self.IMAGE_FORMAT, self.NEW_IMAGE_FORMAT).split('/')[-1]
im.convert('RGB').save(f'{self.IMS}{self.name}') # Convert to RGB and save image
return 0
except :
return 1
def VerifyExtensionFile(self, imageName):
"""[Check the image extension and return it.]
Args:
imageName ([str]): [image name]
Returns:
[str]: [Image extension]
"""
VALIDEXTENSIONS = ['png' ,'webp','jpeg','gif','bmp','tiff','pdf','eps', 'jpg']
for ex in VALIDEXTENSIONS:
if imageName.find(f'.{ex}') != -1:
return ex
def verfiy_if_converted_image_exists(self):
"""[Checks if the folder where the converted images will be saved exists, if not, this function will create the folder.]
"""
if not (Path('converted_images').exists()): Path('converted_images').mkdir() | 37.4 | 128 | 0.583422 |
99ecc1fcafd2f157b3ae5532465cf6186be3ebd1 | 1,718 | py | Python | plotly/validators/ohlc/decreasing/line/__init__.py | piyush1301/plotly.py | 50cd5c4cd4732042422751c7760acbab8dd8a50d | [
"MIT"
] | 6 | 2019-05-03T02:12:04.000Z | 2020-03-01T06:33:21.000Z | plotly/validators/ohlc/decreasing/line/__init__.py | piyush1301/plotly.py | 50cd5c4cd4732042422751c7760acbab8dd8a50d | [
"MIT"
] | null | null | null | plotly/validators/ohlc/decreasing/line/__init__.py | piyush1301/plotly.py | 50cd5c4cd4732042422751c7760acbab8dd8a50d | [
"MIT"
] | 5 | 2019-05-18T16:50:11.000Z | 2021-07-06T21:14:36.000Z |
import _plotly_utils.basevalidators
class WidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='width',
parent_name='ohlc.decreasing.line',
**kwargs
):
super(WidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop('anim', True),
edit_type=kwargs.pop('edit_type', 'style'),
min=kwargs.pop('min', 0),
role=kwargs.pop('role', 'style'),
**kwargs
)
import _plotly_utils.basevalidators
class DashValidator(_plotly_utils.basevalidators.DashValidator):
def __init__(
self, plotly_name='dash', parent_name='ohlc.decreasing.line', **kwargs
):
super(DashValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'style'),
role=kwargs.pop('role', 'style'),
values=kwargs.pop(
'values',
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
),
**kwargs
)
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='color',
parent_name='ohlc.decreasing.line',
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop('anim', True),
edit_type=kwargs.pop('edit_type', 'style'),
role=kwargs.pop('role', 'style'),
**kwargs
)
| 26.430769 | 78 | 0.579744 |
3cc5c7620526ae1b462d981906b17d5ee8c00a5d | 11,778 | py | Python | tests/TestSuite.py | LuminosoInsight/aplus | a8ede978dcbfa24d0ad4b2e07fbc104fe662b5b6 | [
"MIT"
] | null | null | null | tests/TestSuite.py | LuminosoInsight/aplus | a8ede978dcbfa24d0ad4b2e07fbc104fe662b5b6 | [
"MIT"
] | null | null | null | tests/TestSuite.py | LuminosoInsight/aplus | a8ede978dcbfa24d0ad4b2e07fbc104fe662b5b6 | [
"MIT"
] | null | null | null | # This is my attempt to translate the
# Javascript promises-aplus test suite
# (https://github.com/promises-aplus/promises-tests)
# into Python and then apply it to the
# promises library I've created.
from nose.tools import assert_equals
from aplus import Promise
class Counter:
"""
A helper class with some side effects
we can test.
"""
def __init__(self):
self.count = 0
def tick(self):
self.count = self.count+1
def value(self):
return self.count
def test_3_2_1():
"""
Test that the arguments to 'then' are optional.
"""
p1 = Promise()
p2 = p1.then()
p3 = Promise()
p4 = p3.then()
p1.fulfill(5)
p3.reject("How dare you!")
def test_3_2_1_1():
"""
That that the first argument to 'then' is ignored if it
is not a function.
"""
def testNonFunction(nonFunction, results):
def foo(results, k, r):
results[k] = r
p1 = Promise()
p2 = p1.then(nonFunction, lambda r: foo(results, str(nonFunction), r))
p1.reject("Error: "+str(nonFunction))
results = {}
nonFunctions = [None, False, 5, {}, []]
for v in nonFunctions:
testNonFunction(v, results)
for v in nonFunctions:
assert_equals(results[str(v)], "Error: "+str(v))
def test_3_2_1_2():
"""
That that the second argument to 'then' is ignored if it
is not a function.
"""
def testNonFunction(nonFunction, results):
def foo(results, k, r):
results[k] = r
p1 = Promise()
p2 = p1.then(lambda r: foo(results, str(nonFunction), r), nonFunction)
p1.fulfill("Error: "+str(nonFunction))
results = {}
nonFunctions = [None, False, 5, {}, []]
for v in nonFunctions:
testNonFunction(v, results)
for v in nonFunctions:
assert_equals(results[str(v)], "Error: "+str(v))
def test_3_2_2_1():
"""
The first argument to 'then' must be called when a promise is
fulfilled.
"""
c = Counter()
def check(v, c):
assert_equals(v, 5)
c.tick()
p1 = Promise()
p2 = p1.then(lambda v: check(v, c))
p1.fulfill(5)
assert_equals(1, c.value())
def test_3_2_2_2():
"""
Make sure callbacks are never called more than once.
"""
c = Counter()
p1 = Promise()
p2 = p1.then(lambda v: c.tick())
p1.fulfill(5)
try:
# I throw an exception
p1.fulfill(5)
assert False # Should not get here!
except AssertionError:
# This is expected
pass
assert_equals(1, c.value())
def test_3_2_2_3():
"""
Make sure fulfilled callback never called if promise is rejected
"""
cf = Counter()
cr = Counter()
p1 = Promise()
p2 = p1.then(lambda v: cf.tick(), lambda r: cr.tick())
p1.reject("Error")
assert_equals(0, cf.value())
assert_equals(1, cr.value())
def test_3_2_3_1():
"""
The second argument to 'then' must be called when a promise is
rejected.
"""
c = Counter()
def check(r, c):
assert_equals(r, "Error")
c.tick()
p1 = Promise()
p2 = p1.then(None, lambda r: check(r, c))
p1.reject("Error")
assert_equals(1, c.value())
def test_3_2_3_2():
"""
Make sure callbacks are never called more than once.
"""
c = Counter()
p1 = Promise()
p2 = p1.then(None, lambda v: c.tick())
p1.reject("Error")
try:
# I throw an exception
p1.reject("Error")
assert False # Should not get here!
except AssertionError:
# This is expected
pass
assert_equals(1, c.value())
def test_3_2_3_3():
"""
Make sure rejected callback never called if promise is fulfilled
"""
cf = Counter()
cr = Counter()
p1 = Promise()
p2 = p1.then(lambda v: cf.tick(), lambda r: cr.tick())
p1.fulfill(5)
assert_equals(0, cr.value())
assert_equals(1, cf.value())
def test_3_2_5_1_when():
"""
Then can be called multiple times on the same promise
and callbacks must be called in the order of the
then calls.
"""
def add(l, v):
l.append(v)
p1 = Promise()
order = []
p2 = p1.then(lambda v: add(order, "p2"))
p3 = p1.then(lambda v: add(order, "p3"))
p1.fulfill(2)
assert_equals(2, len(order))
assert_equals("p2", order[0])
assert_equals("p3", order[1])
def test_3_2_5_1_if():
"""
Then can be called multiple times on the same promise
and callbacks must be called in the order of the
then calls.
"""
def add(l, v):
l.append(v)
p1 = Promise()
p1.fulfill(2)
order = []
p2 = p1.then(lambda v: add(order, "p2"))
p3 = p1.then(lambda v: add(order, "p3"))
assert_equals(2, len(order))
assert_equals("p2", order[0])
assert_equals("p3", order[1])
def test_3_2_5_2_when():
"""
Then can be called multiple times on the same promise
and callbacks must be called in the order of the
then calls.
"""
def add(l, v):
l.append(v)
p1 = Promise()
order = []
p2 = p1.then(None, lambda v: add(order, "p2"))
p3 = p1.then(None, lambda v: add(order, "p3"))
p1.reject("Error")
assert_equals(2, len(order))
assert_equals("p2", order[0])
assert_equals("p3", order[1])
def test_3_2_5_2_if():
"""
Then can be called multiple times on the same promise
and callbacks must be called in the order of the
then calls.
"""
def add(l, v):
l.append(v)
p1 = Promise()
p1.reject("Error")
order = []
p2 = p1.then(None, lambda v: add(order, "p2"))
p3 = p1.then(None, lambda v: add(order, "p3"))
assert_equals(2, len(order))
assert_equals("p2", order[0])
assert_equals("p3", order[1])
def test_3_2_6_1():
"""
Promises returned by then must be fulfilled when the promise
they are chained from is fulfilled IF the fulfillment value
is not a promise.
"""
p1 = Promise()
pf = p1.then(lambda v: v*v)
p1.fulfill(5)
assert_equals(pf.value, 25)
p2 = Promise()
pr = p2.then(None, lambda r: 5)
p2.reject("Error")
assert_equals(5, pr.value)
def test_3_2_6_2_when():
"""
Promises returned by then must be rejected when any of their
callbacks throw an exception.
"""
def fail(v):
raise AssertionError("Exception Message")
p1 = Promise()
pf = p1.then(fail)
p1.fulfill(5)
assert pf.isRejected()
assert isinstance(pf.reason, AssertionError)
assert_equals("Exception Message", str(pf.reason))
p2 = Promise()
pr = p2.then(None, fail)
p2.reject("Error")
assert pr.isRejected()
assert isinstance(pr.reason, AssertionError)
assert_equals("Exception Message", str(pr.reason))
def test_3_2_6_2_if():
"""
Promises returned by then must be rejected when any of their
callbacks throw an exception.
"""
def fail(v):
raise AssertionError("Exception Message")
p1 = Promise()
p1.fulfill(5)
pf = p1.then(fail)
assert pf.isRejected()
assert isinstance(pf.reason, AssertionError)
assert_equals("Exception Message", str(pf.reason))
p2 = Promise()
p2.reject("Error")
pr = p2.then(None, fail)
assert pr.isRejected()
assert isinstance(pr.reason, AssertionError)
assert_equals("Exception Message", str(pr.reason))
def test_3_2_6_3_when_fulfilled():
"""
Testing return of pending promises to make
sure they are properly chained.
This covers the case where the root promise
is fulfilled after the chaining is defined.
"""
p1 = Promise()
pending = Promise()
pf = p1.then(lambda r: pending)
assert pending.isPending()
assert pf.isPending()
p1.fulfill(10)
pending.fulfill(5)
assert pending.isFulfilled()
assert_equals(5, pending.value)
assert pf.isFulfilled()
assert_equals(5, pf.value)
p2 = Promise()
bad = Promise()
pr = p2.then(lambda r: bad)
assert bad.isPending()
assert pr.isPending()
p2.fulfill(10)
bad.reject("Error")
assert bad.isRejected()
assert_equals("Error", bad.reason)
assert pr.isRejected()
assert_equals("Error", pr.reason)
def test_3_2_6_3_if_fulfilled():
"""
Testing return of pending promises to make
sure they are properly chained.
This covers the case where the root promise
is fulfilled before the chaining is defined.
"""
p1 = Promise()
p1.fulfill(10)
pending = Promise()
pending.fulfill(5)
pf = p1.then(lambda r: pending)
assert pending.isFulfilled()
assert_equals(5, pending.value)
assert pf.isFulfilled()
assert_equals(5, pf.value)
p2 = Promise()
p2.fulfill(10)
bad = Promise()
bad.reject("Error")
pr = p2.then(lambda r: bad)
assert bad.isRejected()
assert_equals("Error", bad.reason)
assert pr.isRejected()
assert_equals("Error", pr.reason)
def test_3_2_6_3_when_rejected():
"""
Testing return of pending promises to make
sure they are properly chained.
This covers the case where the root promise
is rejected after the chaining is defined.
"""
p1 = Promise()
pending = Promise()
pr = p1.then(None, lambda r: pending)
assert pending.isPending()
assert pr.isPending()
p1.reject("Error")
pending.fulfill(10)
assert pending.isFulfilled()
assert_equals(10, pending.value)
assert pr.isFulfilled()
assert_equals(10, pr.value)
p2 = Promise()
bad = Promise()
pr = p2.then(None, lambda r: bad)
assert bad.isPending()
assert pr.isPending()
p2.reject("Error")
bad.reject("Assertion")
assert bad.isRejected()
assert_equals("Assertion", bad.reason)
assert pr.isRejected()
assert_equals("Assertion", pr.reason)
def test_3_2_6_3_if_rejected():
"""
Testing return of pending promises to make
sure they are properly chained.
This covers the case where the root promise
is rejected before the chaining is defined.
"""
p1 = Promise()
p1.reject("Error")
pending = Promise()
pending.fulfill(10)
pr = p1.then(None, lambda r: pending)
assert pending.isFulfilled()
assert_equals(10, pending.value)
assert pr.isFulfilled()
assert_equals(10, pr.value)
p2 = Promise()
p2.reject("Error")
bad = Promise()
bad.reject("Assertion")
pr = p2.then(None, lambda r: bad)
assert bad.isRejected()
assert_equals("Assertion", bad.reason)
assert pr.isRejected()
assert_equals("Assertion", pr.reason)
def test_3_2_6_4_pending():
"""
Handles the case where the arguments to then
are not functions or promises.
"""
p1 = Promise()
p2 = p1.then(5)
p1.fulfill(10)
assert_equals(10, p1.value)
assert p2.isFulfilled()
assert_equals(10, p2.value)
def test_3_2_6_4_fulfilled():
"""
Handles the case where the arguments to then
are values, not functions or promises.
"""
p1 = Promise()
p1.fulfill(10)
p2 = p1.then(5)
assert_equals(10, p1.value)
assert p2.isFulfilled()
assert_equals(10, p2.value)
def test_3_2_6_5_pending():
"""
Handles the case where the arguments to then
are values, not functions or promises.
"""
p1 = Promise()
p2 = p1.then(None, 5)
p1.reject("Error")
assert_equals("Error", p1.reason)
assert p2.isRejected()
assert_equals("Error", p2.reason)
def test_3_2_6_5_rejected():
"""
Handles the case where the arguments to then
are values, not functions or promises.
"""
p1 = Promise()
p1.reject("Error")
p2 = p1.then(None, 5)
assert_equals("Error", p1.reason)
assert p2.isRejected()
assert_equals("Error", p2.reason)
| 24.900634 | 78 | 0.61946 |
34760988a56a4da762b7167c6d388ad71240fb1f | 39,070 | py | Python | py3k-sympy/sympy/physics/quantum/gate.py | cielavenir/sympy | ada04faf48a4eb6c1529e8a5d49a6f2f9ce2616e | [
"BSD-3-Clause"
] | null | null | null | py3k-sympy/sympy/physics/quantum/gate.py | cielavenir/sympy | ada04faf48a4eb6c1529e8a5d49a6f2f9ce2616e | [
"BSD-3-Clause"
] | null | null | null | py3k-sympy/sympy/physics/quantum/gate.py | cielavenir/sympy | ada04faf48a4eb6c1529e8a5d49a6f2f9ce2616e | [
"BSD-3-Clause"
] | null | null | null | """An implementation of gates that act on qubits.
Gates are unitary operators that act on the space of qubits.
Medium Term Todo:
* Optimize Gate._apply_operators_Qubit to remove the creation of many
intermediate Qubit objects.
* Add commutation relationships to all operators and use this in gate_sort.
* Fix gate_sort and gate_simp.
* Get multi-target UGates plotting properly.
* Get UGate to work with either sympy/numpy matrices and output either
format. This should also use the matrix slots.
"""
from itertools import chain
import random
from sympy import Mul, Pow, Integer, Matrix, Rational, Tuple, I, sqrt, Add
from sympy.core.numbers import Number
from sympy.core.compatibility import is_sequence
from sympy.printing.pretty.stringpict import prettyForm, stringPict
from sympy.physics.quantum.anticommutator import AntiCommutator
from sympy.physics.quantum.commutator import Commutator
from sympy.physics.quantum.qexpr import QuantumError
from sympy.physics.quantum.hilbert import ComplexSpace
from sympy.physics.quantum.operator import UnitaryOperator, Operator, HermitianOperator
from sympy.physics.quantum.matrixutils import (
matrix_tensor_product, matrix_eye
)
from sympy.physics.quantum.matrixcache import matrix_cache
__all__ = [
'Gate',
'CGate',
'UGate',
'OneQubitGate',
'TwoQubitGate',
'IdentityGate',
'HadamardGate',
'XGate',
'YGate',
'ZGate',
'TGate',
'PhaseGate',
'SwapGate',
'CNotGate',
# Aliased gate names
'CNOT',
'SWAP',
'H',
'X',
'Y',
'Z',
'T',
'S',
'Phase',
'normalized',
'gate_sort',
'gate_simp',
'random_circuit',
]
sqrt2_inv = Pow(2, Rational(-1,2), evaluate=False)
#-----------------------------------------------------------------------------
# Gate Super-Classes
#-----------------------------------------------------------------------------
_normalized = True
def normalized(normalize):
"""Should Hadamard gates be normalized by a 1/sqrt(2).
This is a global setting that can be used to simplify the look of various
expressions, by leaving of the leading 1/sqrt(2) of the Hadamard gate.
Parameters
----------
normalize : bool
Should the Hadamard gate include the 1/sqrt(2) normalization factor?
When True, the Hadamard gate will have the 1/sqrt(2). When False, the
Hadamard gate will not have this factor.
"""
global _normalized
_normalized = normalize
def _validate_targets_controls(tandc):
tandc = list(tandc)
# Check for integers
for bit in tandc:
if not bit.is_Integer:
raise TypeError('Integer expected, got: %r' % tandc[bit])
# Detect duplicates
if len(list(set(tandc))) != len(tandc):
raise QuantumError(
'Target/control qubits in a gate cannot be duplicated'
)
class Gate(UnitaryOperator):
"""Non-controlled unitary gate operator that acts on qubits.
This is a general abstract gate that needs to be subclassed to do anything
useful.
Parameters
----------
label : tuple, int
A list of the target qubits (as ints) that the gate will apply to.
Examples
--------
"""
_label_separator = ','
gate_name = 'G'
gate_name_latex = 'G'
#-------------------------------------------------------------------------
# Initialization/creation
#-------------------------------------------------------------------------
@classmethod
def _eval_args(cls, args):
args = UnitaryOperator._eval_args(args)
_validate_targets_controls(args)
return args
@classmethod
def _eval_hilbert_space(cls, args):
"""This returns the smallest possible Hilbert space."""
return ComplexSpace(2)**(max(args)+1)
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def nqubits(self):
"""The total number of qubits this gate acts on.
For controlled gate subclasses this includes both target and control
qubits, so that, for examples the CNOT gate acts on 2 qubits.
"""
return len(self.targets)
@property
def min_qubits(self):
"""The minimum number of qubits this gate needs to act on."""
return max(self.targets)+1
@property
def targets(self):
"""A tuple of target qubits."""
return self.label
@property
def gate_name_plot(self):
return r'$%s$' % self.gate_name_latex
#-------------------------------------------------------------------------
# Gate methods
#-------------------------------------------------------------------------
def get_target_matrix(self, format='sympy'):
"""The matrix rep. of the target part of the gate.
Parameters
----------
format : str
The format string ('sympy','numpy', etc.)
"""
raise NotImplementedError('get_target_matrix is not implemented in Gate.')
#-------------------------------------------------------------------------
# Apply
#-------------------------------------------------------------------------
def _apply_operator_IntQubit(self, qubits, **options):
"""Redirect an apply from IntQubit to Qubit"""
return self._apply_operator_Qubit(qubits, **options)
def _apply_operator_Qubit(self, qubits, **options):
"""Apply this gate to a Qubit."""
# Check number of qubits this gate acts on.
if qubits.nqubits < self.min_qubits:
raise QuantumError(
'Gate needs a minimum of %r qubits to act on, got: %r' %\
(self.min_qubits, qubits.nqubits)
)
# If the controls are not met, just return
if isinstance(self, CGate):
if not self.eval_controls(qubits):
return qubits
targets = self.targets
target_matrix = self.get_target_matrix(format='sympy')
# Find which column of the target matrix this applies to.
column_index = 0
n = 1
for target in targets:
column_index += n*qubits[target]
n = n<<1
column = target_matrix[:,int(column_index)]
# Now apply each column element to the qubit.
result = 0
for index in range(column.rows):
# TODO: This can be optimized to reduce the number of Qubit
# creations. We should simply manipulate the raw list of qubit
# values and then build the new Qubit object once.
# Make a copy of the incoming qubits.
new_qubit = qubits.__class__(*qubits.args)
# Flip the bits that need to be flipped.
for bit in range(len(targets)):
if new_qubit[targets[bit]] != (index>>bit)&1:
new_qubit = new_qubit.flip(targets[bit])
# The value in that row and column times the flipped-bit qubit
# is the result for that part.
result += column[index]*new_qubit
return result
#-------------------------------------------------------------------------
# Represent
#-------------------------------------------------------------------------
def _represent_default_basis(self, **options):
return self._represent_ZGate(None, **options)
def _represent_ZGate(self, basis, **options):
format = options.get('format','sympy')
nqubits = options.get('nqubits',0)
if nqubits == 0:
raise QuantumError('The number of qubits must be given as nqubits.')
# Make sure we have enough qubits for the gate.
if nqubits < self.min_qubits:
raise QuantumError(
'The number of qubits %r is too small for the gate.' % nqubits
)
target_matrix = self.get_target_matrix(format)
targets = self.targets
if isinstance(self, CGate):
controls = self.controls
else:
controls = []
m = represent_zbasis(
controls, targets, target_matrix, nqubits, format
)
return m
#-------------------------------------------------------------------------
# Print methods
#-------------------------------------------------------------------------
def _print_contents(self, printer, *args):
label = self._print_label(printer, *args)
return '%s(%s)' % (self.gate_name, label)
def _print_contents_pretty(self, printer, *args):
a = stringPict(str(self.gate_name))
b = self._print_label_pretty(printer, *args)
return self._print_subscript_pretty(a, b)
def _print_contents_latex(self, printer, *args):
label = self._print_label(printer, *args)
return '%s_{%s}' % (self.gate_name_latex, label)
def plot_gate(self, axes, gate_idx, gate_grid, wire_grid):
raise NotImplementedError('plot_gate is not implemented.')
class CGate(Gate):
"""A general unitary gate with control qubits.
A general control gate applies a target gate to a set of targets if all
of the control qubits have a particular values (set by
``CGate.control_value``).
Parameters
----------
label : tuple
The label in this case has the form (controls, gate), where controls
is a tuple/list of control qubits (as ints) and gate is a ``Gate``
instance that is the target operator.
Examples
--------
"""
gate_name = 'C'
gate_name_latex = 'C'
# The values this class controls for.
control_value = Integer(1)
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def _eval_args(cls, args):
# _eval_args has the right logic for the controls argument.
controls = args[0]
gate = args[1]
if not is_sequence(controls):
controls = (controls,)
controls = UnitaryOperator._eval_args(controls)
_validate_targets_controls(chain(controls,gate.targets))
return (controls, gate)
@classmethod
def _eval_hilbert_space(cls, args):
"""This returns the smallest possible Hilbert space."""
return ComplexSpace(2)**max(max(args[0])+1,args[1].min_qubits)
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def nqubits(self):
"""The total number of qubits this gate acts on.
For controlled gate subclasses this includes both target and control
qubits, so that, for examples the CNOT gate acts on 2 qubits.
"""
return len(self.targets)+len(self.controls)
@property
def min_qubits(self):
"""The minimum number of qubits this gate needs to act on."""
return max(max(self.controls),max(self.targets))+1
@property
def targets(self):
"""A tuple of target qubits."""
return self.gate.targets
@property
def controls(self):
"""A tuple of control qubits."""
return tuple(self.label[0])
@property
def gate(self):
"""The non-controlled gate that will be applied to the targets."""
return self.label[1]
#-------------------------------------------------------------------------
# Gate methods
#-------------------------------------------------------------------------
def get_target_matrix(self, format='sympy'):
return self.gate.get_target_matrix(format)
def eval_controls(self, qubit):
"""Return True/False to indicate if the controls are satisfied."""
return all([qubit[bit]==self.control_value for bit in self.controls])
def decompose(self, **options):
"""Decompose the controlled gate into CNOT and single qubits gates."""
if len(self.controls) == 1:
c = self.controls[0]
t = self.gate.targets[0]
if isinstance(self.gate, YGate):
g1 = PhaseGate(t)
g2 = CNotGate(c, t)
g3 = PhaseGate(t)
g4 = ZGate(t)
return g1*g2*g3*g4
if isinstance(self.gate, ZGate):
g1 = HadamardGate(t)
g2 = CNotGate(c, t)
g3 = HadamardGate(t)
return g1*g2*g3
else:
return self
#-------------------------------------------------------------------------
# Print methods
#-------------------------------------------------------------------------
def _print_contents(self, printer, *args):
controls = self._print_sequence(self.controls, ',', printer, *args)
gate = printer._print(self.gate, *args)
return '%s((%s),%s)' %\
(self.gate_name, controls, gate)
def _print_contents_pretty(self, printer, *args):
controls = self._print_sequence_pretty(self.controls, ',', printer, *args)
gate = printer._print(self.gate)
gate_name = stringPict(str(self.gate_name))
first = self._print_subscript_pretty(gate_name, controls)
gate = self._print_parens_pretty(gate)
final = prettyForm(*first.right((gate)))
return final
def _print_contents_latex(self, printer, *args):
controls = self._print_sequence(self.controls, ',', printer, *args)
gate = printer._print(self.gate, *args)
return r'%s_{%s}{\left(%s\right)}' %\
(self.gate_name_latex, controls, gate)
def plot_gate(self, circ_plot, gate_idx):
min_wire = int(min(chain(self.controls, self.targets)))
max_wire = int(max(chain(self.controls, self.targets)))
circ_plot.control_line(gate_idx, min_wire, max_wire)
for c in self.controls:
circ_plot.control_point(gate_idx, int(c))
self.gate.plot_gate(circ_plot, gate_idx)
class UGate(Gate):
"""General gate specified by a set of targets and a target matrix.
Parameters
----------
label : tuple
A tuple of the form (targets, U), where targets is a tuple of the
target qubits and U is a unitary matrix with dimension of
len(targets).
"""
gate_name = 'U'
gate_name_latex = 'U'
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def _eval_args(cls, args):
targets = args[0]
if not is_sequence(targets):
targets = (targets,)
targets = Gate._eval_args(targets)
_validate_targets_controls(targets)
mat = args[1]
if not isinstance(mat, Matrix):
raise TypeError('Matrix expected, got: %r' % mat)
dim = 2**len(targets)
if not all([dim == shape for shape in mat.shape]):
raise IndexError(
'Number of targets must match the matrix size: %r %r' %\
(targets, mat)
)
return (targets, mat)
@classmethod
def _eval_hilbert_space(cls, args):
"""This returns the smallest possible Hilbert space."""
return ComplexSpace(2)**(max(args[0])+1)
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def targets(self):
"""A tuple of target qubits."""
return tuple(self.label[0])
#-------------------------------------------------------------------------
# Gate methods
#-------------------------------------------------------------------------
def get_target_matrix(self, format='sympy'):
"""The matrix rep. of the target part of the gate.
Parameters
----------
format : str
The format string ('sympy','numpy', etc.)
"""
return self.label[1]
#-------------------------------------------------------------------------
# Print methods
#-------------------------------------------------------------------------
def _print_contents(self, printer, *args):
targets = self._print_targets(printer, *args)
return '%s(%s)' % (self.gate_name, targets)
def _print_contents_pretty(self, printer, *args):
targets = self._print_sequence_pretty(self.targets, ',', printer, *args)
gate_name = stringPict(str(self.gate_name))
return self._print_subscript_pretty(gate_name, targets)
def _print_contents_latex(self, printer, *args):
targets = self._print_sequence(self.targets, ',', printer, *args)
return r'%s_{%s}' % (self.gate_name_latex, targets)
def plot_gate(self, circ_plot, gate_idx):
circ_plot.one_qubit_box(
self.gate_name_plot,
gate_idx, int(self.targets[0])
)
class OneQubitGate(Gate):
"""A single qubit unitary gate base class."""
nqubits = Integer(1)
def plot_gate(self, circ_plot, gate_idx):
circ_plot.one_qubit_box(
self.gate_name_plot,
gate_idx, int(self.targets[0])
)
def _eval_commutator(self, other, **hints):
if isinstance(other, OneQubitGate):
if self.targets != other.targets or self.__class__ == other.__class__:
return Integer(0)
return Operator._eval_commutator(self, other, **hints)
def _eval_anticommutator(self, other, **hints):
if isinstance(other, OneQubitGate):
if self.targets != other.targets or self.__class__ == other.__class__:
return Integer(2)*self*other
return Operator._eval_anticommutator(self, other, **hints)
class TwoQubitGate(Gate):
"""A two qubit unitary gate base class."""
nqubits = Integer(2)
#-----------------------------------------------------------------------------
# Single Qubit Gates
#-----------------------------------------------------------------------------
class IdentityGate(OneQubitGate):
"""The single qubit identity gate.
Parameters
----------
target : int
The target qubit this gate will apply to.
Examples
--------
"""
gate_name = '1'
gate_name_latex = '1'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('eye2', format)
def _eval_commutator(self, other, **hints):
return Integer(0)
def _eval_anticommutator(self, other, **hints):
return Integer(2)*other
class HadamardGate(OneQubitGate):
"""The single qubit Hadamard gate.
Parameters
----------
target : int
The target qubit this gate will apply to.
Examples
--------
"""
gate_name = 'H'
gate_name_latex = 'H'
def get_target_matrix(self, format='sympy'):
if _normalized:
return matrix_cache.get_matrix('H', format)
else:
return matrix_cache.get_matrix('Hsqrt2', format)
def _eval_commutator_XGate(self, other, **hints):
return I*sqrt(2)*YGate(self.targets[0])
def _eval_commutator_YGate(self, other, **hints):
return I*sqrt(2)*(ZGate(self.targets[0])-XGate(self.targets[0]))
def _eval_commutator_ZGate(self, other, **hints):
return -I*sqrt(2)*YGate(self.targets[0])
def _eval_anticommutator_XGate(self, other, **hints):
return sqrt(2)*IdentityGate(self.targets[0])
def _eval_anticommutator_YGate(self, other, **hints):
return Integer(0)
def _eval_anticommutator_ZGate(self, other, **hints):
return sqrt(2)*IdentityGate(self.targets[0])
class XGate(HermitianOperator, OneQubitGate):
"""The single qubit X, or NOT, gate.
Parameters
----------
target : int
The target qubit this gate will apply to.
Examples
--------
"""
gate_name = 'X'
gate_name_latex = 'X'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('X', format)
def plot_gate(self, circ_plot, gate_idx):
circ_plot.not_point(
gate_idx, int(self.label[0])
)
def _eval_commutator_YGate(self, other, **hints):
return Integer(2)*I*ZGate(self.targets[0])
def _eval_anticommutator_XGate(self, other, **hints):
return Integer(2)*IdentityGate(self.targets[0])
def _eval_anticommutator_YGate(self, other, **hints):
return Integer(0)
def _eval_anticommutator_ZGate(self, other, **hints):
return Integer(0)
class YGate(HermitianOperator, OneQubitGate):
"""The single qubit Y gate.
Parameters
----------
target : int
The target qubit this gate will apply to.
Examples
--------
"""
gate_name = 'Y'
gate_name_latex = 'Y'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('Y', format)
def _eval_commutator_ZGate(self, other, **hints):
return Integer(2)*I*XGate(self.targets[0])
def _eval_anticommutator_YGate(self, other, **hints):
return Integer(2)*IdentityGate(self.targets[0])
def _eval_anticommutator_ZGate(self, other, **hints):
return Integer(0)
class ZGate(HermitianOperator, OneQubitGate):
"""The single qubit Z gate.
Parameters
----------
target : int
The target qubit this gate will apply to.
Examples
--------
"""
gate_name = 'Z'
gate_name_latex = 'Z'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('Z', format)
def _eval_commutator_XGate(self, other, **hints):
return Integer(2)*I*YGate(self.targets[0])
def _eval_anticommutator_YGate(self, other, **hints):
return Integer(0)
class PhaseGate(OneQubitGate):
"""The single qubit phase, or S, gate.
This gate rotates the phase of the state by pi/2 if the state is |1> and
does nothing if the state is |0>.
Parameters
----------
target : int
The target qubit this gate will apply to.
Examples
--------
"""
gate_name = 'S'
gate_name_latex = 'S'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('S', format)
def _eval_commutator_ZGate(self, other, **hints):
return Integer(0)
def _eval_commutator_TGate(self, other, **hints):
return Integer(0)
class TGate(OneQubitGate):
"""The single qubit pi/8 gate.
This gate rotates the phase of the state by pi/4 if the state is |1> and
does nothing if the state is |0>.
Parameters
----------
target : int
The target qubit this gate will apply to.
Examples
--------
"""
gate_name = 'T'
gate_name_latex = 'T'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('T', format)
def _eval_commutator_ZGate(self, other, **hints):
return Integer(0)
def _eval_commutator_PhaseGate(self, other, **hints):
return Integer(0)
# Aliases for gate names.
H = HadamardGate
X = XGate
Y = YGate
Z = ZGate
T = TGate
Phase = S = PhaseGate
#-----------------------------------------------------------------------------
# 2 Qubit Gates
#-----------------------------------------------------------------------------
class CNotGate(CGate, TwoQubitGate):
"""Two qubit controlled-NOT.
This gate performs the NOT or X gate on the target qubit if the control
qubits all have the value 1.
Parameters
----------
label : tuple
A tuple of the form (control, target).
Examples
--------
"""
gate_name = 'CNOT'
gate_name_latex = 'CNOT'
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def _eval_args(cls, args):
args = Gate._eval_args(args)
return args
@classmethod
def _eval_hilbert_space(cls, args):
"""This returns the smallest possible Hilbert space."""
return ComplexSpace(2)**(max(args)+1)
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def min_qubits(self):
"""The minimum number of qubits this gate needs to act on."""
return max(self.label)+1
@property
def targets(self):
"""A tuple of target qubits."""
return (self.label[1],)
@property
def controls(self):
"""A tuple of control qubits."""
return (self.label[0],)
@property
def gate(self):
"""The non-controlled gate that will be applied to the targets."""
return XGate(self.label[1])
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
# The default printing of Gate works better than those of CGate, so we
# go around the overridden methods in CGate.
def _print_contents(self, printer, *args):
return Gate._print_contents(self, printer, *args)
def _print_contents_pretty(self, printer, *args):
return Gate._print_contents_pretty(self, printer, *args)
def _print_contents_latex(self, printer, *args):
return Gate._print_contents_latex(self, printer, *args)
#-------------------------------------------------------------------------
# Commutator/AntiCommutator
#-------------------------------------------------------------------------
def _eval_commutator_ZGate(self, other, **hints):
"""[CNOT(i, j), Z(i)] == 0."""
if self.controls[0] == other.targets[0]:
return Integer(0)
else:
raise NotImplementedError('Commutator not implemented: %r' % other)
def _eval_commutator_TGate(self, other, **hints):
"""[CNOT(i, j), T(i)] == 0."""
return self._eval_commutator_ZGate(other, **hints)
def _eval_commutator_PhaseGate(self, other, **hints):
"""[CNOT(i, j), S(i)] == 0."""
return self._eval_commutator_ZGate(other, **hints)
def _eval_commutator_XGate(self, other, **hints):
"""[CNOT(i, j), X(j)] == 0."""
if self.targets[0] == other.targets[0]:
return Integer(0)
else:
raise NotImplementedError('Commutator not implemented: %r' % other)
def _eval_commutator_CNotGate(self, other, **hints):
"""[CNOT(i, j), CNOT(i,k)] == 0."""
if self.controls[0] == other.controls[0]:
return Integer(0)
else:
raise NotImplementedError('Commutator not implemented: %r' % other)
class SwapGate(TwoQubitGate):
"""Two qubit SWAP gate.
This gate swap the values of the two qubits.
Parameters
----------
label : tuple
A tuple of the form (target1, target2).
Examples
--------
"""
gate_name = 'SWAP'
gate_name_latex = 'SWAP'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('SWAP', format)
def decompose(self, **options):
"""Decompose the SWAP gate into CNOT gates."""
i,j = self.targets[0], self.targets[1]
g1 = CNotGate(i, j)
g2 = CNotGate(j, i)
return g1*g2*g1
def plot_gate(self, circ_plot, gate_idx):
min_wire = int(min(self.targets))
max_wire = int(max(self.targets))
circ_plot.control_line(gate_idx, min_wire, max_wire)
circ_plot.swap_point(gate_idx, min_wire)
circ_plot.swap_point(gate_idx, max_wire)
def _represent_ZGate(self, basis, **options):
"""Represent the SWAP gate in the computational basis.
The following representation is used to compute this:
SWAP = |1><1|x|1><1| + |0><0|x|0><0| + |1><0|x|0><1| + |0><1|x|1><0|
"""
format = options.get('format', 'sympy')
targets = [int(t) for t in self.targets]
min_target = min(targets)
max_target = max(targets)
nqubits = options.get('nqubits',self.min_qubits)
op01 = matrix_cache.get_matrix('op01', format)
op10 = matrix_cache.get_matrix('op10', format)
op11 = matrix_cache.get_matrix('op11', format)
op00 = matrix_cache.get_matrix('op00', format)
eye2 = matrix_cache.get_matrix('eye2', format)
result = None
for i, j in ((op01,op10),(op10,op01),(op00,op00),(op11,op11)):
product = nqubits*[eye2]
product[nqubits-min_target-1] = i
product[nqubits-max_target-1] = j
new_result = matrix_tensor_product(*product)
if result is None:
result = new_result
else:
result = result + new_result
return result
# Aliases for gate names.
CNOT = CNotGate
SWAP = SwapGate
#-----------------------------------------------------------------------------
# Represent
#-----------------------------------------------------------------------------
def represent_zbasis(controls, targets, target_matrix, nqubits, format='sympy'):
"""Represent a gate with controls, targets and target_matrix.
This function does the low-level work of representing gates as matrices
in the standard computational basis (ZGate). Currently, we support two
main cases:
1. One target qubit and no control qubits.
2. One target qubits and multiple control qubits.
For the base of multiple controls, we use the following expression [1]:
1_{2**n} + (|1><1|)^{(n-1)} x (target-matrix - 1_{2})
Parameters
----------
controls : list, tuple
A sequence of control qubits.
targets : list, tuple
A sequence of target qubits.
target_matrix : sympy.Matrix, numpy.matrix, scipy.sparse
The matrix form of the transformation to be performed on the target
qubits. The format of this matrix must match that passed into
the `format` argument.
nqubits : int
The total number of qubits used for the representation.
format : str
The format of the final matrix ('sympy', 'numpy', 'scipy.sparse').
Examples
--------
References
----------
[1] http://www.johnlapeyre.com/qinf/qinf_html/node6.html.
"""
controls = [int(x) for x in controls]
targets = [int(x) for x in targets]
nqubits = int(nqubits)
# This checks for the format as well.
op11 = matrix_cache.get_matrix('op11', format)
eye2 = matrix_cache.get_matrix('eye2', format)
# Plain single qubit case
if len(controls) == 0 and len(targets) == 1:
product = []
bit = targets[0]
# Fill product with [I1,Gate,I2] such that the unitaries,
# I, cause the gate to be applied to the correct Qubit
if bit != nqubits-1:
product.append(matrix_eye(2**(nqubits-bit-1), format=format))
product.append(target_matrix)
if bit != 0:
product.append(matrix_eye(2**bit, format=format))
return matrix_tensor_product(*product)
# Single target, multiple controls.
elif len(targets) == 1 and len(controls) >= 1:
target = targets[0]
# Build the non-trivial part.
product2 = []
for i in range(nqubits):
product2.append(matrix_eye(2, format=format))
for control in controls:
product2[nqubits-1-control] = op11
product2[nqubits-1-target] = target_matrix - eye2
return matrix_eye(2**nqubits, format=format) +\
matrix_tensor_product(*product2)
# Multi-target, multi-control is not yet implemented.
else:
raise NotImplementedError(
'The representation of multi-target, multi-control gates '
'is not implemented.'
)
#-----------------------------------------------------------------------------
# Gate manipulation functions.
#-----------------------------------------------------------------------------
def gate_simp(circuit):
"""Simplifies gates symbolically
It first sorts gates using gate_sort. It then applies basic
simplification rules to the circuit, e.g., XGate**2 = Identity
"""
# Bubble sort out gates that commute.
circuit = gate_sort(circuit)
# Do simplifications by subing a simplification into the first element
# which can be simplified. We recursively call gate_simp with new circuit
# as input more simplifications exist.
if isinstance(circuit, Add):
return sum(gate_simp(t) for t in circuit.args)
elif isinstance(circuit, Mul):
circuit_args = circuit.args
elif isinstance(circuit, Pow):
b, e = circuit.as_base_exp()
circuit_args = (gate_simp(b)**e,)
else:
return circuit
# Iterate through each element in circuit, simplify if possible.
for i in range(len(circuit_args)):
# H,X,Y or Z squared is 1.
# T**2 = S, S**2 = Z
if isinstance(circuit_args[i], Pow):
if isinstance(circuit_args[i].base,
(HadamardGate, XGate, YGate, ZGate))\
and isinstance(circuit_args[i].exp, Number):
# Build a new circuit taking replacing the
# H,X,Y,Z squared with one.
newargs = (circuit_args[:i] +\
(circuit_args[i].base**(circuit_args[i].exp % 2),) +\
circuit_args[i+1:])
# Recursively simplify the new circuit.
circuit = gate_simp(Mul(*newargs))
break
elif isinstance(circuit_args[i].base, PhaseGate):
# Build a new circuit taking old circuit but splicing
# in simplification.
newargs = circuit_args[:i]
# Replace PhaseGate**2 with ZGate.
newargs = newargs + (ZGate(circuit_args[i].base.args[0])**\
(Integer(circuit_args[i].exp/2)), circuit_args[i].base**\
(circuit_args[i].exp % 2))
# Append the last elements.
newargs = newargs + circuit_args[i+1:]
# Recursively simplify the new circuit.
circuit = gate_simp(Mul(*newargs))
break
elif isinstance(circuit_args[i].base, TGate):
# Build a new circuit taking all the old elements.
newargs = circuit_args[:i]
# Put an Phasegate in place of any TGate**2.
newargs = newargs + (PhaseGate(circuit_args[i].base.args[0])**\
Integer(circuit_args[i].exp/2), circuit_args[i].base**\
(circuit_args[i].exp % 2))
# Append the last elements.
newargs = newargs + circuit_args[i+1:]
# Recursively simplify the new circuit.
circuit = gate_simp(Mul(*newargs))
break
return circuit
def gate_sort(circuit):
"""Sorts the gates while keeping track of commutation relations
This function uses a bubble sort to rearrange the order of gate
application. Keeps track of Quantum computations special commutation
relations (e.g. things that apply to the same Qubit do not commute with
each other)
circuit is the Mul of gates that are to be sorted.
"""
# Make sure we have an Add or Mul.
if isinstance(circuit, Add):
return sum(gate_sort(t) for t in circuit.args)
if isinstance(circuit, Pow):
return gate_sort(circuit.base)**circuit.exp
elif isinstance(circuit, Gate):
return circuit
if not isinstance(circuit, Mul):
return circuit
changes = True
while changes:
changes = False
circ_array = circuit.args
for i in range(len(circ_array)-1):
# Go through each element and switch ones that are in wrong order
if isinstance(circ_array[i], (Gate, Pow)) and\
isinstance(circ_array[i+1], (Gate, Pow)):
# If we have a Pow object, look at only the base
first_base, first_exp = circ_array[i].as_base_exp()
second_base, second_exp = circ_array[i+1].as_base_exp()
# Use sympy's hash based sorting. This is not mathematical
# sorting, but is rather based on comparing hashes of objects.
# See Basic.compare for details.
if first_base.compare(second_base) > 0:
if Commutator(first_base, second_base).doit() == 0:
new_args = (circuit.args[:i] + (circuit.args[i+1],) +\
(circuit.args[i],) + circuit.args[i+2:])
circuit = Mul(*new_args)
circ_array = circuit.args
changes = True
break
if AntiCommutator(first_base, second_base).doit() == 0:
new_args = (circuit.args[:i] + (circuit.args[i+1],) +\
(circuit.args[i],) + circuit.args[i+2:])
sign = Integer(-1)**(first_exp*second_exp)
circuit = sign*Mul(*new_args)
circ_array = circuit.args
changes = True
break
return circuit
#-----------------------------------------------------------------------------
# Utility functions
#-----------------------------------------------------------------------------
def random_circuit(ngates, nqubits, gate_space=(X, Y, Z, S, T, H, CNOT, SWAP)):
"""Return a random circuit of ngates and nqubits.
This uses an equally weighted sample of (X, Y, Z, S, T, H, CNOT, SWAP)
gates.
Parameters
----------
ngates : int
The number of gates in the circuit.
nqubits : int
The number of qubits in the circuit.
gate_space : tuple
A tuple of the gate classes that will be used in the circuit.
Repeating gate classes multiple times in this tuple will increase
the frequency they appear in the random circuit.
"""
qubit_space = list(range(nqubits))
result = []
for i in range(ngates):
g = random.choice(gate_space)
if g == CNotGate or g == SwapGate:
qubits = random.sample(qubit_space,2)
g = g(*qubits)
else:
qubit = random.choice(qubit_space)
g = g(qubit)
result.append(g)
return Mul(*result)
def zx_basis_transform(self, format='sympy'):
"""Transformation matrix from Z to X basis."""
return matrix_cache.get_matrix('ZX', format)
def zy_basis_transform(self, format='sympy'):
"""Transformation matrix from Z to Y basis."""
return matrix_cache.get_matrix('ZY', format)
| 32.423237 | 87 | 0.551241 |
f6174a6a89004ac00ba9d6f1a14a143adf90a5fb | 970 | py | Python | zendesk/komand_zendesk/actions/delete_user/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | zendesk/komand_zendesk/actions/delete_user/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | zendesk/komand_zendesk/actions/delete_user/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
USER_ID = "user_id"
class Output:
STATUS = "status"
class DeleteUserInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"user_id": {
"type": "string",
"title": "User ID",
"description": "ID of user to delete E.g. 20444826487",
"order": 1
}
},
"required": [
"user_id"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class DeleteUserOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"status": {
"type": "boolean",
"title": "Status",
"description": "Success or failure",
"order": 1
}
},
"required": [
"status"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 16.724138 | 61 | 0.550515 |
0d5f3a3e75c7934c727779a685fb91a4e860da28 | 16,746 | py | Python | perfkitbenchmarker/providers/aws/athena.py | jojoya/PerfKitBenchmarker | 46c6db0e20de7f751601a9b7b7cbb19c786d395b | [
"Apache-2.0"
] | 2 | 2021-01-15T09:40:28.000Z | 2021-01-15T09:40:36.000Z | perfkitbenchmarker/providers/aws/athena.py | dharmateja03/PerfKitBenchmarker | 2f9abd17872dafd2b0eb45ab4cd2e6afe1585004 | [
"Apache-2.0"
] | null | null | null | perfkitbenchmarker/providers/aws/athena.py | dharmateja03/PerfKitBenchmarker | 2f9abd17872dafd2b0eb45ab4cd2e6afe1585004 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing class for AWS's Athena EDW service."""
import copy
import datetime
import json
import logging
import os
import re
from typing import Dict, Text
from absl import flags
from perfkitbenchmarker import data
from perfkitbenchmarker import edw_service
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers import aws
from perfkitbenchmarker.providers.aws import s3
from perfkitbenchmarker.providers.aws import util
AWS_ATHENA_CMD_PREFIX = ['aws', 'athena']
AWS_ATHENA_CMD_POSTFIX = ['--output', 'json']
# TODO(user): Derive the full table set from the TPC suite.
TPC_H_TABLES = [
'customer', 'lineitem', 'nation', 'orders', 'part', 'partsupp', 'region',
'supplier'
]
TPC_DS_TABLES = [
'call_center', 'catalog_page', 'catalog_returns', 'catalog_sales',
'customer', 'customer_address', 'customer_demographics', 'date_dim',
'dbgen_version', 'household_demographics', 'income_band', 'inventory',
'item', 'promotion', 'reason', 'ship_mode', 'store', 'store_returns',
'store_sales', 'time_dim', 'warehouse', 'web_page', 'web_returns',
'web_sales', 'web_site'
]
FLAGS = flags.FLAGS
class AthenaQueryError(RuntimeError):
pass
def GetAthenaClientInterface(database: str, output_bucket: str,
region: str) -> edw_service.EdwClientInterface:
"""Builds and Returns the requested Athena client Interface.
Args:
database: Name of the Athena database to execute queries against.
output_bucket: String name of the S3 bucket to store query output.
region: String aws region in which the database exists and client operations
are performed.
Returns:
A concrete Client Interface object (subclass of EdwClientInterface)
Raises:
RuntimeError: if an unsupported athena_client_interface is requested
"""
if FLAGS.athena_client_interface == 'JAVA':
return JavaClientInterface(database, output_bucket, region)
raise RuntimeError('Unknown Athena Client Interface requested.' +
FLAGS.athena_client_interface)
class GenericClientInterface(edw_service.EdwClientInterface):
"""Generic Client Interface class for Athena.
Attributes:
database: String name of the Athena database to execute queries against.
output_bucket: String name of the S3 bucket to store query output.
region: String aws region in which the database exists and client operations
are performed.
"""
def __init__(self, database: str, output_bucket: str, region: str):
super(GenericClientInterface, self).__init__()
self.database = database
self.output_bucket = 's3://%s' % output_bucket
self.region = region
def GetMetadata(self) -> Dict[str, str]:
"""Gets the Metadata attributes for the Client Interface."""
return {
'client': FLAGS.athena_client_interface,
'client_region': self.region
}
class JavaClientInterface(GenericClientInterface):
"""Java Client Interface class for Athena.
"""
def Prepare(self, package_name: str) -> None:
"""Prepares the client vm to execute query.
Installs the Java Execution Environment and a uber jar with
a) Athena Java client libraries,
b) An application to execute a query and gather execution details, and
collect CW metrics
c) their dependencies.
Args:
package_name: String name of the package defining the preprovisioned data
(certificates, etc.) to extract and use during client vm preparation.
"""
self.client_vm.Install('openjdk')
# Push the executable jar to the working directory on client vm
self.client_vm.InstallPreprovisionedPackageData(
package_name, ['athena-java-client-1.0.jar'], '')
def ExecuteQuery(self, query_name: Text) -> (float, Dict[str, str]):
"""Executes a query and returns performance details.
Args:
query_name: String name of the query to execute
Returns:
A tuple of (execution_time, run_metadata)
execution_time: A Float variable set to the query's completion time in
secs. -1.0 is used as a sentinel value implying the query failed. For a
successful query the value is expected to be positive.
run_metadata: A dictionary of query execution attributes eg. script name
"""
query_command = (
'java -cp athena-java-client-1.0.jar '
'com.google.cloud.performance.edw.Single --region {} --database {} '
'--output_location {} --query_file {} --query_timeout_secs {}'
.format(self.region, self.database, self.output_bucket, query_name,
FLAGS.athena_query_timeout))
if not FLAGS.athena_metrics_collection:
# execute the query in default primary workgroup
query_command = '{} --workgroup primary'.format(query_command)
query_command = '{} --collect_metrics {} --delete_workgroup {}'.format(
query_command, FLAGS.athena_metrics_collection,
FLAGS.athena_workgroup_delete)
stdout, _ = self.client_vm.RemoteCommand(query_command)
details = copy.copy(self.GetMetadata()) # Copy the base metadata
details.update(json.loads(stdout)['details'])
details['query_start'] = json.loads(stdout)['query_start']
details['query_end'] = json.loads(stdout)['query_end']
performance = json.loads(stdout)['query_wall_time_in_secs']
return performance, details
class CliClientInterface(GenericClientInterface):
"""Command Line Client Interface class for Athena.
Uses the native Athena client available with the awscli
https://docs.aws.amazon.com/cli/latest/reference/athena/index.html.
"""
def Prepare(self, package_name: str) -> None:
"""Prepares the client vm to execute query.
Installs the bq tool dependencies and authenticates using a service account.
Args:
package_name: String name of the package defining the preprovisioned data
(certificates, etc.) to extract and use during client vm preparation.
"""
self.client_vm.Install('pip')
self.client_vm.RemoteCommand('sudo pip install absl-py')
for pkg in ('aws_credentials', 'awscli'):
self.client_vm.Install(pkg)
# Push the framework to execute a sql query and gather performance details.
service_specific_dir = os.path.join('edw', Athena.SERVICE_TYPE)
self.client_vm.PushFile(
data.ResourcePath(
os.path.join(service_specific_dir, 'script_runner.sh')))
runner_permission_update_cmd = 'chmod 755 {}'.format('script_runner.sh')
self.client_vm.RemoteCommand(runner_permission_update_cmd)
self.client_vm.PushFile(
data.ResourcePath(os.path.join('edw', 'script_driver.py')))
self.client_vm.PushFile(
data.ResourcePath(
os.path.join(service_specific_dir,
'provider_specific_script_driver.py')))
def ExecuteQuery(self, query_name: Text) -> (float, Dict[str, str]):
"""Executes a query and returns performance details.
Args:
query_name: String name of the query to execute
Returns:
A tuple of (execution_time, run_metadata)
execution_time: A Float variable set to the query's completion time in
secs. -1.0 is used as a sentinel value implying the query failed. For a
successful query the value is expected to be positive.
run_metadata: A dictionary of query execution attributes eg. script name
"""
stdout, _ = self.client_vm.RemoteCommand(
'python script_driver.py --script={} --database={} --query_timeout={} '
'--athena_query_output_bucket={} --athena_region={}'.format(
query_name, self.database, FLAGS.athena_query_timeout,
self.output_bucket, self.region))
script_performance = json.loads(str(stdout))
execution_time = script_performance[query_name]['execution_time']
run_metadata = {'script': query_name}
if 'error_details' in script_performance[query_name]:
run_metadata['error_details'] = script_performance[query_name][
'error_details']
run_metadata.update(self.GetMetadata())
return execution_time, run_metadata
def ReadScript(script_uri):
"""Method to read a sql script based on its local path.
Arguments:
script_uri: Local URI of file containing SQL query.
Returns:
Query String contents of the URI location.
Raises:
IOError: If the script cannot be read.
"""
with open(script_uri) as fp:
return fp.read()
def PrepareQueryString(query_string_template, substitutions):
"""Method to read a template Athena script and substitute placeholders.
Args:
query_string_template: Template version of the Athena query.
substitutions: A dictionary of string placeholder keys and corresponding
string values.
Returns:
Materialized Athena query as a string.
"""
for key, value in substitutions.items():
query_string = query_string_template.replace(key, value)
return query_string
def RunScriptCommand(script_command):
"""Method to execute an AWS Athena cli command.
Args:
script_command: Fully compiled AWS Athena cli command.
Returns:
String stdout result of executing the query.
Script Command execution duration in seconds (rounded).
Raises:
AthenaQueryError: If the return code does not indicate success.
"""
start_time = datetime.datetime.now()
stdout, _, retcode = vm_util.IssueCommand(
script_command, raise_on_failure=False)
if retcode:
raise AthenaQueryError
end_time = datetime.datetime.now()
return stdout, int((end_time - start_time).total_seconds())
class Athena(edw_service.EdwService):
"""Object representing a Athena data warehouse."""
CLOUD = aws.CLOUD
SERVICE_TYPE = 'athena'
def __init__(self, edw_service_spec):
super(Athena, self).__init__(edw_service_spec)
self.region = util.GetRegionFromZone(FLAGS.zones[0])
self.output_bucket = '-'.join(
[FLAGS.athena_output_location_prefix, self.region, FLAGS.run_uri])
self.client_interface = GetAthenaClientInterface(self.cluster_identifier,
self.output_bucket,
self.region)
self.s3_service = s3.S3Service()
self.s3_service.PrepareService(self.region)
self.s3_service.MakeBucket(self.output_bucket)
if FLAGS.provision_athena:
self.data_bucket = 'pkb' + self.cluster_identifier.replace('_', '')
self.tables = (
TPC_H_TABLES if FLAGS.edw_tpc_dsb_type == 'tpc_h' else TPC_DS_TABLES)
self.athena_db_create_time = 0
self.athena_table_create_time = 0
def BuildAthenaCommand(self, query_string, database=None):
"""Method to compile a AWS Athena cli command.
Arguments:
query_string: A string with the query that needs to be executed on Athena.
database: The Athena database against which the query should be executed.
Returns:
Fully compiled AWS Athena cli command.
"""
cmd = []
cmd.extend(AWS_ATHENA_CMD_PREFIX)
cmd.extend([
'--region', self.region,
'start-query-execution',
'--query-string', query_string
])
if database:
cmd.extend(['--query-execution-context', ('Database=%s' % database)])
cmd.extend([
'--result-configuration',
('OutputLocation=s3://%s' % self.output_bucket)
])
cmd.extend(AWS_ATHENA_CMD_POSTFIX)
return cmd
def _Create(self):
"""Create a Athena data warehouse."""
def _EmptyDatabase():
"""Remove tables, if they exist, so they can be refreshed.
If the database and/or tables don't already exist, the drop commands
will simply fail, which won't raise errors.
"""
drop_script_path = data.ResourcePath('edw/athena/%s/ddl/drop.sql' %
FLAGS.edw_tpc_dsb_type)
drop_script_contents = ReadScript(drop_script_path)
# Drop all tables so the database can be dropped.
for table in self.tables:
# Remove the folder backing each parquet table so they can be refreshed.
vm_util.IssueCommand([
'aws', 's3', 'rm',
's3://%s/%s_parquet' % (self.data_bucket, table), '--recursive'
], raise_on_failure=False)
# The parquet tables don't have the type suffix so that the queries can
# run as written without having to change the table names.
for suffix in ['_csv', '']:
script_contents = PrepareQueryString(drop_script_contents,
{'{table}': table + suffix})
script_command = self.BuildAthenaCommand(
script_contents, database=self.cluster_identifier)
RunScriptCommand(script_command)
drop_database_query_string = PrepareQueryString(
'drop database database_name',
{'database_name': self.cluster_identifier})
script_command = self.BuildAthenaCommand(drop_database_query_string)
RunScriptCommand(script_command)
def _CreateDatabase():
create_database_query_string = PrepareQueryString(
'create database database_name',
{'database_name': self.cluster_identifier})
script_command = self.BuildAthenaCommand(create_database_query_string)
return RunScriptCommand(script_command)
def _CreateTable(table_create_sql_template):
template_script_path = data.ResourcePath(table_create_sql_template)
template_script_contents = ReadScript(template_script_path)
script_contents = PrepareQueryString(template_script_contents,
{'{bucket}': self.data_bucket})
script_command = self.BuildAthenaCommand(
script_contents, database=self.cluster_identifier)
return RunScriptCommand(script_command)
def _CreateAllTables():
"""Create all TPC benchmarking tables."""
cumulative_table_create_time = 0
for table in self.tables:
for suffix in ['_csv', '_parquet']:
script = 'edw/athena/%s/ddl/%s.sql' % (FLAGS.edw_tpc_dsb_type,
table + suffix)
_, table_create_time = _CreateTable(script)
cumulative_table_create_time += table_create_time
return cumulative_table_create_time
_EmptyDatabase()
_, self.athena_db_create_time = _CreateDatabase()
self.athena_table_create_time = _CreateAllTables()
def _Exists(self):
"""Method to validate the existence of a Athena data warehouse.
Returns:
Boolean value indicating the existence of a Athena data warehouse.
"""
raise NotImplementedError
def _Delete(self):
"""Delete a Athena data warehouse."""
if not FLAGS.teardown_athena:
logging.info('The current resource is requested to be long living.')
return
raise NotImplementedError
def Cleanup(self):
# Direct cleanup is used instead of _DeleteDependencies because the Athena
# warehouse resource isn't created/deleted each time.
self.s3_service.DeleteBucket(self.output_bucket)
def GetDataDetails(self) -> Dict[str, str]:
"""Returns a dictionary with underlying data details.
cluster_identifier = <dataset_id>
Data details are extracted from the dataset_id that follows the format:
<dataset>_<format>_<compression>_<partitioning>
eg.
tpch100_parquet_uncompressed_unpartitoned
Returns:
A dictionary set to underlying data's details (format, etc.)
"""
data_details = {}
# If the information isn't in the cluster identifier, skip collecting it.
if '_' not in self.cluster_identifier:
return data_details
parsed_id = re.split(r'_', self.cluster_identifier)
data_details['format'] = parsed_id[1]
data_details['compression'] = parsed_id[2]
data_details['partitioning'] = parsed_id[3]
return data_details
def GetMetadata(self):
"""Return a dictionary of the metadata for the Athena data warehouse."""
basic_data = super(Athena, self).GetMetadata()
basic_data.update({'database': self.cluster_identifier})
basic_data.update(self.GetDataDetails())
basic_data.update(self.client_interface.GetMetadata())
return basic_data
| 38.232877 | 80 | 0.701063 |
28c5a670cbc83afd2f09e479365588d80997b011 | 13,419 | py | Python | frappe/desk/doctype/desktop_icon/desktop_icon.py | ssuda777/frappe | d3f3df2ce15154aecc1d9d6d07d947e72c2e8c6e | [
"MIT"
] | 1 | 2021-06-11T10:28:07.000Z | 2021-06-11T10:28:07.000Z | frappe/desk/doctype/desktop_icon/desktop_icon.py | JMBodz/frappe | eb218a06d1cbfc3a8f1cc00ba8dac2c927d2f71d | [
"MIT"
] | 3 | 2021-08-23T15:20:28.000Z | 2022-03-27T07:47:36.000Z | frappe/desk/doctype/desktop_icon/desktop_icon.py | JMBodz/frappe | eb218a06d1cbfc3a8f1cc00ba8dac2c927d2f71d | [
"MIT"
] | 1 | 2021-08-03T07:12:43.000Z | 2021-08-03T07:12:43.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from frappe import _
import json
import random
from frappe.model.document import Document
from frappe.utils.user import UserPermissions
class DesktopIcon(Document):
def validate(self):
if not self.label:
self.label = self.module_name
def on_trash(self):
clear_desktop_icons_cache()
def after_doctype_insert():
frappe.db.add_unique('Desktop Icon', ('module_name', 'owner', 'standard'))
def get_desktop_icons(user=None):
'''Return desktop icons for user'''
if not user:
user = frappe.session.user
user_icons = frappe.cache().hget('desktop_icons', user)
if not user_icons:
fields = ['module_name', 'hidden', 'label', 'link', 'type', 'icon', 'color', 'description', 'category',
'_doctype', '_report', 'idx', 'force_show', 'reverse', 'custom', 'standard', 'blocked']
active_domains = frappe.get_active_domains()
blocked_doctypes = frappe.get_all("DocType", filters={
"ifnull(restrict_to_domain, '')": ("not in", ",".join(active_domains))
}, fields=["name"])
blocked_doctypes = [ d.get("name") for d in blocked_doctypes ]
standard_icons = frappe.db.get_all('Desktop Icon',
fields=fields, filters={'standard': 1})
standard_map = {}
for icon in standard_icons:
if icon._doctype in blocked_doctypes:
icon.blocked = 1
standard_map[icon.module_name] = icon
user_icons = frappe.db.get_all('Desktop Icon', fields=fields,
filters={'standard': 0, 'owner': user})
# update hidden property
for icon in user_icons:
standard_icon = standard_map.get(icon.module_name, None)
if icon._doctype in blocked_doctypes:
icon.blocked = 1
# override properties from standard icon
if standard_icon:
for key in ('route', 'label', 'color', 'icon', 'link'):
if standard_icon.get(key):
icon[key] = standard_icon.get(key)
if standard_icon.blocked:
icon.hidden = 1
# flag for modules_select dialog
icon.hidden_in_standard = 1
elif standard_icon.force_show:
icon.hidden = 0
# add missing standard icons (added via new install apps?)
user_icon_names = [icon.module_name for icon in user_icons]
for standard_icon in standard_icons:
if standard_icon.module_name not in user_icon_names:
# if blocked, hidden too!
if standard_icon.blocked:
standard_icon.hidden = 1
standard_icon.hidden_in_standard = 1
user_icons.append(standard_icon)
user_blocked_modules = frappe.get_doc('User', user).get_blocked_modules()
for icon in user_icons:
if icon.module_name in user_blocked_modules:
icon.hidden = 1
# sort by idx
user_icons.sort(key = lambda a: a.idx)
# translate
for d in user_icons:
if d.label: d.label = _(d.label)
frappe.cache().hset('desktop_icons', user, user_icons)
return user_icons
@frappe.whitelist()
def add_user_icon(_doctype, _report=None, label=None, link=None, type='link', standard=0):
'''Add a new user desktop icon to the desktop'''
if not label: label = _doctype or _report
if not link: link = 'List/{0}'.format(_doctype)
# find if a standard icon exists
icon_name = frappe.db.exists('Desktop Icon', {'standard': standard, 'link': link,
'owner': frappe.session.user})
if icon_name:
if frappe.db.get_value('Desktop Icon', icon_name, 'hidden'):
# if it is hidden, unhide it
frappe.db.set_value('Desktop Icon', icon_name, 'hidden', 0)
clear_desktop_icons_cache()
else:
idx = frappe.db.sql('select max(idx) from `tabDesktop Icon` where owner=%s',
frappe.session.user)[0][0] or \
frappe.db.sql('select count(*) from `tabDesktop Icon` where standard=1')[0][0]
if not frappe.db.get_value("Report", _report):
_report = None
userdefined_icon = frappe.db.get_value('DocType', _doctype, ['icon','color','module'], as_dict=True)
else:
userdefined_icon = frappe.db.get_value('Report', _report, ['icon','color','module'], as_dict=True)
module_icon = frappe.get_value('Desktop Icon', {'standard':1, 'module_name':userdefined_icon.module},
['name', 'icon', 'color', 'reverse'], as_dict=True)
if not module_icon:
module_icon = frappe._dict()
opts = random.choice(palette)
module_icon.color = opts[0]
module_icon.reverse = 0 if (len(opts) > 1) else 1
try:
new_icon = frappe.get_doc({
'doctype': 'Desktop Icon',
'label': label,
'module_name': label,
'link': link,
'type': type,
'_doctype': _doctype,
'_report': _report,
'icon': userdefined_icon.icon or module_icon.icon,
'color': userdefined_icon.color or module_icon.color,
'reverse': module_icon.reverse,
'idx': idx + 1,
'custom': 1,
'standard': standard
}).insert(ignore_permissions=True)
clear_desktop_icons_cache()
icon_name = new_icon.name
except frappe.UniqueValidationError as e:
frappe.throw(_('Desktop Icon already exists'))
except Exception as e:
raise e
return icon_name
@frappe.whitelist()
def set_order(new_order, user=None):
'''set new order by duplicating user icons (if user is set) or set global order'''
if isinstance(new_order, str):
new_order = json.loads(new_order)
for i, module_name in enumerate(new_order):
if module_name not in ('Explore',):
if user:
icon = get_user_copy(module_name, user)
else:
name = frappe.db.get_value('Desktop Icon',
{'standard': 1, 'module_name': module_name})
if name:
icon = frappe.get_doc('Desktop Icon', name)
else:
# standard icon missing, create one for DocType
name = add_user_icon(module_name, standard=1)
icon = frappe.get_doc('Desktop Icon', name)
icon.db_set('idx', i)
clear_desktop_icons_cache()
def set_desktop_icons(visible_list, ignore_duplicate=True):
'''Resets all lists and makes only the given one standard,
if the desktop icon does not exist and the name is a DocType, then will create
an icon for the doctype'''
# clear all custom only if setup is not complete
if not int(frappe.defaults.get_defaults().setup_complete or 0):
frappe.db.delete("Desktop Icon", {"standard": 0})
# set standard as blocked and hidden if setting first active domain
if not frappe.flags.keep_desktop_icons:
frappe.db.sql('update `tabDesktop Icon` set blocked=0, hidden=1 where standard=1')
# set as visible if present, or add icon
for module_name in visible_list:
name = frappe.db.get_value('Desktop Icon', {'module_name': module_name})
if name:
frappe.db.set_value('Desktop Icon', name, 'hidden', 0)
else:
if frappe.db.exists('DocType', module_name):
try:
add_user_icon(module_name, standard=1)
except frappe.UniqueValidationError as e:
if not ignore_duplicate:
raise e
else:
visible_list.remove(module_name)
if frappe.message_log:
frappe.message_log.pop()
# set the order
set_order(visible_list)
clear_desktop_icons_cache()
def set_hidden_list(hidden_list, user=None):
'''Sets property `hidden`=1 in **Desktop Icon** for given user.
If user is None then it will set global values.
It will also set the rest of the icons as shown (`hidden` = 0)'''
if isinstance(hidden_list, str):
hidden_list = json.loads(hidden_list)
# set as hidden
for module_name in hidden_list:
set_hidden(module_name, user, 1)
# set as seen
for module_name in list(set(get_all_icons()) - set(hidden_list)):
set_hidden(module_name, user, 0)
if user:
clear_desktop_icons_cache()
else:
frappe.clear_cache()
def set_hidden(module_name, user=None, hidden=1):
'''Set module hidden property for given user. If user is not specified,
hide/unhide it globally'''
if user:
icon = get_user_copy(module_name, user)
if hidden and icon.custom:
frappe.delete_doc(icon.doctype, icon.name, ignore_permissions=True)
return
# hidden by user
icon.db_set('hidden', hidden)
else:
icon = frappe.get_doc('Desktop Icon', {'standard': 1, 'module_name': module_name})
# blocked is globally hidden
icon.db_set('blocked', hidden)
def get_all_icons():
return [d.module_name for d in frappe.get_all('Desktop Icon',
filters={'standard': 1}, fields=['module_name'])]
def clear_desktop_icons_cache(user=None):
frappe.cache().hdel('desktop_icons', user or frappe.session.user)
frappe.cache().hdel('bootinfo', user or frappe.session.user)
def get_user_copy(module_name, user=None):
'''Return user copy (Desktop Icon) of the given module_name. If user copy does not exist, create one.
:param module_name: Name of the module
:param user: User for which the copy is required (optional)
'''
if not user:
user = frappe.session.user
desktop_icon_name = frappe.db.get_value('Desktop Icon',
{'module_name': module_name, 'owner': user, 'standard': 0})
if desktop_icon_name:
return frappe.get_doc('Desktop Icon', desktop_icon_name)
else:
return make_user_copy(module_name, user)
def make_user_copy(module_name, user):
'''Insert and return the user copy of a standard Desktop Icon'''
standard_name = frappe.db.get_value('Desktop Icon', {'module_name': module_name, 'standard': 1})
if not standard_name:
frappe.throw(_('{0} not found').format(module_name), frappe.DoesNotExistError)
original = frappe.get_doc('Desktop Icon', standard_name)
desktop_icon = frappe.get_doc({
'doctype': 'Desktop Icon',
'standard': 0,
'owner': user,
'module_name': module_name
})
for key in ('app', 'label', 'route', 'type', '_doctype', 'idx', 'reverse', 'force_show', 'link', 'icon', 'color'):
if original.get(key):
desktop_icon.set(key, original.get(key))
desktop_icon.insert(ignore_permissions=True)
return desktop_icon
def sync_desktop_icons():
'''Sync desktop icons from all apps'''
for app in frappe.get_installed_apps():
sync_from_app(app)
def sync_from_app(app):
'''Sync desktop icons from app. To be called during install'''
try:
modules = frappe.get_attr(app + '.config.desktop.get_data')() or {}
except ImportError:
return []
if isinstance(modules, dict):
modules_list = []
for m, desktop_icon in modules.items():
desktop_icon['module_name'] = m
modules_list.append(desktop_icon)
else:
modules_list = modules
for i, m in enumerate(modules_list):
desktop_icon_name = frappe.db.get_value('Desktop Icon',
{'module_name': m['module_name'], 'app': app, 'standard': 1})
if desktop_icon_name:
desktop_icon = frappe.get_doc('Desktop Icon', desktop_icon_name)
else:
# new icon
desktop_icon = frappe.get_doc({
'doctype': 'Desktop Icon',
'idx': i,
'standard': 1,
'app': app,
'owner': 'Administrator'
})
if 'doctype' in m:
m['_doctype'] = m.pop('doctype')
desktop_icon.update(m)
try:
desktop_icon.save()
except frappe.exceptions.UniqueValidationError:
pass
return modules_list
@frappe.whitelist()
def update_icons(hidden_list, user=None):
"""update modules"""
if not user:
frappe.only_for('System Manager')
set_hidden_list(hidden_list, user)
frappe.msgprint(frappe._('Updated'), indicator='green', title=_('Success'), alert=True)
def get_context(context):
context.icons = get_user_icons(frappe.session.user)
context.user = frappe.session.user
if 'System Manager' in frappe.get_roles():
context.users = frappe.db.get_all('User', filters={'user_type': 'System User', 'enabled': 1},
fields = ['name', 'first_name', 'last_name'])
@frappe.whitelist()
def get_module_icons(user=None):
if user != frappe.session.user:
frappe.only_for('System Manager')
if not user:
icons = frappe.db.get_all('Desktop Icon',
fields='*', filters={'standard': 1}, order_by='idx')
else:
frappe.cache().hdel('desktop_icons', user)
icons = get_user_icons(user)
for icon in icons:
icon.value = frappe.db.escape(_(icon.label or icon.module_name))
return {'icons': icons, 'user': user}
def get_user_icons(user):
'''Get user icons for module setup page'''
user_perms = UserPermissions(user)
user_perms.build_permissions()
from frappe.boot import get_allowed_pages
allowed_pages = get_allowed_pages()
icons = []
for icon in get_desktop_icons(user):
add = True
if icon.hidden_in_standard:
add = False
if not icon.custom:
if icon.module_name==['Help', 'Settings']:
pass
elif icon.type=="page" and icon.link not in allowed_pages:
add = False
elif icon.type=="module" and icon.module_name not in user_perms.allow_modules:
add = False
if add:
icons.append(icon)
return icons
palette = (
('#FFC4C4',),
('#FFE8CD',),
('#FFD2C2',),
('#FF8989',),
('#FFD19C',),
('#FFA685',),
('#FF4D4D', 1),
('#FFB868',),
('#FF7846', 1),
('#A83333', 1),
('#A87945', 1),
('#A84F2E', 1),
('#D2D2FF',),
('#F8D4F8',),
('#DAC7FF',),
('#A3A3FF',),
('#F3AAF0',),
('#B592FF',),
('#7575FF', 1),
('#EC7DEA', 1),
('#8E58FF', 1),
('#4D4DA8', 1),
('#934F92', 1),
('#5E3AA8', 1),
('#EBF8CC',),
('#FFD7D7',),
('#D2F8ED',),
('#D9F399',),
('#FFB1B1',),
('#A4F3DD',),
('#C5EC63',),
('#FF8989', 1),
('#77ECCA',),
('#7B933D', 1),
('#A85B5B', 1),
('#49937E', 1),
('#FFFACD',),
('#D2F1FF',),
('#CEF6D1',),
('#FFF69C',),
('#A6E4FF',),
('#9DECA2',),
('#FFF168',),
('#78D6FF',),
('#6BE273',),
('#A89F45', 1),
('#4F8EA8', 1),
('#428B46', 1)
)
@frappe.whitelist()
def hide(name, user = None):
if not user:
user = frappe.session.user
try:
set_hidden(name, user, hidden = 1)
clear_desktop_icons_cache()
except Exception:
return False
return True
| 27.554415 | 115 | 0.690588 |
5b6efbb202f8af7f6c81d760e27085bd2295fca0 | 158 | py | Python | demo4/single_file_executor/my_executor.py | nan-wang/python-meetup-hangzhou-202203 | f4867f6a4a3c1f90da41ad46e9597ef9e398d146 | [
"Apache-2.0"
] | 3 | 2022-03-06T13:24:21.000Z | 2022-03-31T09:30:26.000Z | demo4/single_file_executor/my_executor.py | nan-wang/python-meetup-hangzhou-202203 | f4867f6a4a3c1f90da41ad46e9597ef9e398d146 | [
"Apache-2.0"
] | null | null | null | demo4/single_file_executor/my_executor.py | nan-wang/python-meetup-hangzhou-202203 | f4867f6a4a3c1f90da41ad46e9597ef9e398d146 | [
"Apache-2.0"
] | null | null | null | from jina import Executor, requests
class FooExecutor(Executor):
@requests
def foo(self, **kwargs):
print('this is a single-file executor')
| 19.75 | 47 | 0.683544 |
261ff58679c234dd43868942d8bc1ca7abe28080 | 5,528 | py | Python | calculations/AllMetrics.py | leogoesger/func-flow | c81f73998df9b02c04c19a6beae463121d5a8898 | [
"MIT"
] | 11 | 2018-04-14T00:34:34.000Z | 2021-05-04T17:23:50.000Z | calculations/AllMetrics.py | Yesicaleon/func-flow | c81f73998df9b02c04c19a6beae463121d5a8898 | [
"MIT"
] | 15 | 2019-04-02T03:35:22.000Z | 2022-02-12T13:17:11.000Z | calculations/AllMetrics.py | Yesicaleon/func-flow | c81f73998df9b02c04c19a6beae463121d5a8898 | [
"MIT"
] | 9 | 2018-12-01T19:46:11.000Z | 2022-03-31T17:18:15.000Z | import numpy as np
from utils.calc_drh import calc_drh
from utils.calc_all_year import calc_all_year
from utils.calc_winter_highflow import calc_winter_highflow_annual
from utils.calc_summer_baseflow import calc_start_of_summer, calc_summer_baseflow_durations_magnitude
from utils.calc_fall_flush import calc_fall_flush_timings_durations
from utils.calc_spring_transition import calc_spring_transition_timing_magnitude, calc_spring_transition_roc, calc_spring_transition_duration
from utils.calc_fall_winter_baseflow import calc_fall_winter_baseflow
from params import general_params, winter_params, spring_params, summer_params, fall_params
class Metrics:
exceedance_percent = [2, 5, 10, 20, 50]
# exceedance_percent = [2, 5, 10, 20, 12, 15, 110, 120]
def __init__(self, flow_matrix, year_ranges, start_year, end_year, params, flow_class):
self.flow_matrix = flow_matrix
self.year_ranges = year_ranges
self.start_year = start_year
self.end_year = end_year
self.params = params
self.flow_class = flow_class
if(self.start_year and self.end_year):
self.year_ranges = year_ranges[start_year:end_year]
self.flow_matrix = flow_matrix[:, start_year:end_year]
self.all_year()
self.winter_highflow_annual()
self.start_of_summer()
self.fall_flush_timings_durations()
self.spring_transition_timing_magnitude()
self.spring_transition_duration()
self.spring_transition_roc()
self.fall_winter_baseflow()
self.summer_baseflow_durations_magnitude()
self.get_DRH()
def get_DRH(self):
drh = calc_drh(self.flow_matrix)
self.drh = drh
def all_year(self):
params = self.params['general_params'] if self.params else general_params
average_annual_flows, standard_deviations, coefficient_variations = calc_all_year(
self.flow_matrix, params)
self.average_annual_flows = average_annual_flows
self.standard_deviations = standard_deviations
self.coefficient_variations = coefficient_variations
def winter_highflow_annual(self):
params = self.params['winter_params'] if self.params else winter_params
winter_timings, winter_durations, winter_frequencys, winter_magnitudes = calc_winter_highflow_annual(
self.flow_matrix, self.exceedance_percent, params)
self.winter_timings = {}
self.winter_durations = {}
self.winter_frequencys = {}
self.winter_magnitudes = {}
for percent in self.exceedance_percent:
self.winter_timings[percent] = winter_timings[percent]
self.winter_durations[percent] = list(
map(lambda x: int(x) if isinstance(x, np.int64) else x, winter_durations[percent]))
self.winter_frequencys[percent] = list(
map(lambda x: int(x) if isinstance(x, np.int64) else x, winter_frequencys[percent]))
self.winter_magnitudes[percent] = winter_magnitudes[percent]
def start_of_summer(self):
params = self.params['summer_params'] if self.params else summer_params
summer_timings = calc_start_of_summer(
self.flow_matrix, self.flow_class, params)
self.summer_timings = summer_timings
def fall_flush_timings_durations(self):
params = self.params['fall_params'] if self.params else fall_params
fall_timings, fall_magnitudes, fall_wet_timings, fall_durations = calc_fall_flush_timings_durations(
self.flow_matrix, self.summer_timings, self.flow_class, params)
self.fall_timings = fall_timings
self.fall_magnitudes = fall_magnitudes
self.fall_wet_timings = fall_wet_timings
self.fall_durations = fall_durations
def summer_baseflow_durations_magnitude(self):
summer_90_magnitudes, summer_50_magnitudes, summer_flush_durations, summer_wet_durations, summer_no_flow_counts = calc_summer_baseflow_durations_magnitude(
self.flow_matrix, self.summer_timings, self.fall_timings, self.fall_wet_timings)
self.summer_90_magnitudes = summer_90_magnitudes
self.summer_50_magnitudes = summer_50_magnitudes
self.summer_flush_durations = summer_flush_durations
self.summer_wet_durations = summer_wet_durations
self.summer_no_flow_counts = summer_no_flow_counts
def spring_transition_timing_magnitude(self):
params = self.params['spring_params'] if self.params else spring_params
spring_timings, spring_magnitudes = calc_spring_transition_timing_magnitude(
self.flow_matrix, self.flow_class, self.summer_timings, params)
self.spring_timings = spring_timings
self.spring_magnitudes = spring_magnitudes
def spring_transition_duration(self):
spring_durations = calc_spring_transition_duration(
self.spring_timings, self.summer_timings)
self.spring_durations = spring_durations
def spring_transition_roc(self):
spring_rocs = calc_spring_transition_roc(
self.flow_matrix, self.spring_timings, self.summer_timings)
self.spring_rocs = spring_rocs
def fall_winter_baseflow(self):
wet_baseflows_10, wet_baseflows_50, wet_bfl_durs = calc_fall_winter_baseflow(
self.flow_matrix, self.fall_wet_timings, self.spring_timings)
self.wet_baseflows_10 = wet_baseflows_10
self.wet_baseflows_50 = wet_baseflows_50
self.wet_bfl_durs = wet_bfl_durs
| 46.453782 | 163 | 0.736614 |
a806348715fed64e1268759522d4fb261a9f51c8 | 3,137 | py | Python | src/haddock/modules/analysis/seletopclusts/__init__.py | rvhonorato/haddock3 | 84866ecab73a56c20c3e457abfc5077233b311b3 | [
"Apache-2.0"
] | null | null | null | src/haddock/modules/analysis/seletopclusts/__init__.py | rvhonorato/haddock3 | 84866ecab73a56c20c3e457abfc5077233b311b3 | [
"Apache-2.0"
] | 1 | 2021-07-24T15:34:58.000Z | 2021-07-24T15:34:58.000Z | src/haddock/modules/analysis/seletopclusts/__init__.py | rvhonorato/haddock3 | 84866ecab73a56c20c3e457abfc5077233b311b3 | [
"Apache-2.0"
] | null | null | null | """HADDOCK3 module to select a top cluster/model."""
from pathlib import Path
from haddock import log
from haddock.libs.libontology import ModuleIO
from haddock.modules import BaseHaddockModule
RECIPE_PATH = Path(__file__).resolve().parent
DEFAULT_CONFIG = Path(RECIPE_PATH, "defaults.cfg")
class HaddockModule(BaseHaddockModule):
"""Haddock Module for 'seletopclusts'."""
name = RECIPE_PATH.name
def __init__(
self,
order,
path,
*ignore,
init_params=DEFAULT_CONFIG,
**everything):
super().__init__(order, path, init_params)
@classmethod
def confirm_installation(cls):
"""Confirm if module is installed."""
return
def _run(self):
"""Execute the module's protocol."""
# Get the models generated in previous step
if not type(self.previous_io) == iter:
# this module needs to come after one that produced an iterable
pass
# retrieve the clusters from a dictionary generated in the previous
# step the cluster_id just tells us how populated a given cluster is
# if we discard this value we can have lists
average_dic = {}
cluster_dic = {}
# Q: Why this [0] here?
for cluster_id in self.previous_io.output[0]:
cluster_id = int(cluster_id)
# sort the models inside the cluster based on its score
# TODO: refactor this, its ugly :p
list_to_be_sorted = [
(e, e.score)
for e in self.previous_io.output[0][str(cluster_id)]
]
list_to_be_sorted.sort(key=lambda x: x[1])
structure_list = [e[0] for e in list_to_be_sorted]
cluster_dic[cluster_id] = structure_list
# get the average score of the cluster based on ALL the elements
scores = [e[1] for e in list_to_be_sorted]
average_score = sum(scores) / float(len(scores))
average_dic[cluster_id] = average_score
# sort the clusters based on their average
sorted_dic = sorted(average_dic.items(), key=lambda item: item[1])
sorted_dic = dict(sorted_dic)
# how many models should we output?
models = []
for select_id in self.params['top_cluster']:
# which cluster should we retrieve?
# top_cluster = 1 == the best one, should be index 0
try:
target_id = list(sorted_dic.keys())[select_id - 1]
except IndexError:
log.warning(
f'Cluster ranking #{select_id} not found,'
' skipping selection'
)
continue
if self.params['top_models'] == 'all':
for pdb in cluster_dic[target_id]:
models.append(pdb)
else:
for pdb in cluster_dic[target_id][:self.params['top_models']]:
models.append(pdb)
# Save module information
io = ModuleIO()
io.add(models, "o")
io.save()
| 34.472527 | 78 | 0.579853 |
707580bfe960150f043dc66a0cd9aea09f87c432 | 9,857 | py | Python | pandas/tests/io/test_common.py | maketestsgofaster/pandas | 3493abaa9c47e39b410752833c901fd27f5b3a76 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"BSD-3-Clause"
] | 3 | 2017-02-09T20:01:04.000Z | 2021-08-11T00:33:41.000Z | pandas/tests/io/test_common.py | mwaskom/pandas | d1010643fea058ba43c2c7124af75cc462ccf242 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"BSD-3-Clause"
] | null | null | null | pandas/tests/io/test_common.py | mwaskom/pandas | d1010643fea058ba43c2c7124af75cc462ccf242 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"BSD-3-Clause"
] | 1 | 2021-02-05T02:22:31.000Z | 2021-02-05T02:22:31.000Z | """
Tests for the pandas.io.common functionalities
"""
import mmap
import pytest
import os
from os.path import isabs
import pandas as pd
import pandas.util.testing as tm
from pandas.io import common
from pandas.compat import is_platform_windows, StringIO, FileNotFoundError
from pandas import read_csv, concat
class CustomFSPath(object):
"""For testing fspath on unknown objects"""
def __init__(self, path):
self.path = path
def __fspath__(self):
return self.path
# Functions that consume a string path and return a string or path-like object
path_types = [str, CustomFSPath]
try:
from pathlib import Path
path_types.append(Path)
except ImportError:
pass
try:
from py.path import local as LocalPath
path_types.append(LocalPath)
except ImportError:
pass
HERE = os.path.dirname(__file__)
class TestCommonIOCapabilities(object):
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_expand_user(self):
filename = '~/sometest'
expanded_name = common._expand_user(filename)
assert expanded_name != filename
assert isabs(expanded_name)
assert os.path.expanduser(filename) == expanded_name
def test_expand_user_normal_path(self):
filename = '/somefolder/sometest'
expanded_name = common._expand_user(filename)
assert expanded_name == filename
assert os.path.expanduser(filename) == expanded_name
def test_stringify_path_pathlib(self):
tm._skip_if_no_pathlib()
rel_path = common._stringify_path(Path('.'))
assert rel_path == '.'
redundant_path = common._stringify_path(Path('foo//bar'))
assert redundant_path == os.path.join('foo', 'bar')
def test_stringify_path_localpath(self):
tm._skip_if_no_localpath()
path = os.path.join('foo', 'bar')
abs_path = os.path.abspath(path)
lpath = LocalPath(path)
assert common._stringify_path(lpath) == abs_path
def test_stringify_path_fspath(self):
p = CustomFSPath('foo/bar.csv')
result = common._stringify_path(p)
assert result == 'foo/bar.csv'
@pytest.mark.parametrize('extension,expected', [
('', None),
('.gz', 'gzip'),
('.bz2', 'bz2'),
('.zip', 'zip'),
('.xz', 'xz'),
])
@pytest.mark.parametrize('path_type', path_types)
def test_infer_compression_from_path(self, extension, expected, path_type):
path = path_type('foo/bar.csv' + extension)
compression = common._infer_compression(path, compression='infer')
assert compression == expected
def test_get_filepath_or_buffer_with_path(self):
filename = '~/sometest'
filepath_or_buffer, _, _ = common.get_filepath_or_buffer(filename)
assert filepath_or_buffer != filename
assert isabs(filepath_or_buffer)
assert os.path.expanduser(filename) == filepath_or_buffer
def test_get_filepath_or_buffer_with_buffer(self):
input_buffer = StringIO()
filepath_or_buffer, _, _ = common.get_filepath_or_buffer(input_buffer)
assert filepath_or_buffer == input_buffer
def test_iterator(self):
reader = read_csv(StringIO(self.data1), chunksize=1)
result = concat(reader, ignore_index=True)
expected = read_csv(StringIO(self.data1))
tm.assert_frame_equal(result, expected)
# GH12153
it = read_csv(StringIO(self.data1), chunksize=1)
first = next(it)
tm.assert_frame_equal(first, expected.iloc[[0]])
tm.assert_frame_equal(concat(it), expected.iloc[1:])
@pytest.mark.parametrize('reader, module, error_class, fn_ext', [
(pd.read_csv, 'os', FileNotFoundError, 'csv'),
(pd.read_table, 'os', FileNotFoundError, 'csv'),
(pd.read_fwf, 'os', FileNotFoundError, 'txt'),
(pd.read_excel, 'xlrd', FileNotFoundError, 'xlsx'),
(pd.read_feather, 'feather', Exception, 'feather'),
(pd.read_hdf, 'tables', FileNotFoundError, 'h5'),
(pd.read_stata, 'os', FileNotFoundError, 'dta'),
(pd.read_sas, 'os', FileNotFoundError, 'sas7bdat'),
(pd.read_json, 'os', ValueError, 'json'),
(pd.read_msgpack, 'os', ValueError, 'mp'),
(pd.read_pickle, 'os', FileNotFoundError, 'pickle'),
])
def test_read_non_existant(self, reader, module, error_class, fn_ext):
pytest.importorskip(module)
path = os.path.join(HERE, 'data', 'does_not_exist.' + fn_ext)
with pytest.raises(error_class):
reader(path)
@pytest.mark.parametrize('reader, module, path', [
(pd.read_csv, 'os', os.path.join(HERE, 'data', 'iris.csv')),
(pd.read_table, 'os', os.path.join(HERE, 'data', 'iris.csv')),
(pd.read_fwf, 'os', os.path.join(HERE, 'data',
'fixed_width_format.txt')),
(pd.read_excel, 'xlrd', os.path.join(HERE, 'data', 'test1.xlsx')),
(pd.read_feather, 'feather', os.path.join(HERE, 'data',
'feather-0_3_1.feather')),
(pd.read_hdf, 'tables', os.path.join(HERE, 'data', 'legacy_hdf',
'datetimetz_object.h5')),
(pd.read_stata, 'os', os.path.join(HERE, 'data', 'stata10_115.dta')),
(pd.read_sas, 'os', os.path.join(HERE, 'sas', 'data',
'test1.sas7bdat')),
(pd.read_json, 'os', os.path.join(HERE, 'json', 'data',
'tsframe_v012.json')),
(pd.read_msgpack, 'os', os.path.join(HERE, 'msgpack', 'data',
'frame.mp')),
(pd.read_pickle, 'os', os.path.join(HERE, 'data',
'categorical_0_14_1.pickle')),
])
def test_read_fspath_all(self, reader, module, path):
pytest.importorskip(module)
mypath = CustomFSPath(path)
result = reader(mypath)
expected = reader(path)
if path.endswith('.pickle'):
# categorical
tm.assert_categorical_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('writer_name, writer_kwargs, module', [
('to_csv', {}, 'os'),
('to_excel', {'engine': 'xlwt'}, 'xlwt'),
('to_feather', {}, 'feather'),
('to_html', {}, 'os'),
('to_json', {}, 'os'),
('to_latex', {}, 'os'),
('to_msgpack', {}, 'os'),
('to_pickle', {}, 'os'),
('to_stata', {}, 'os'),
])
def test_write_fspath_all(self, writer_name, writer_kwargs, module):
p1 = tm.ensure_clean('string')
p2 = tm.ensure_clean('fspath')
df = pd.DataFrame({"A": [1, 2]})
with p1 as string, p2 as fspath:
pytest.importorskip(module)
mypath = CustomFSPath(fspath)
writer = getattr(df, writer_name)
writer(string, **writer_kwargs)
with open(string, 'rb') as f:
expected = f.read()
writer(mypath, **writer_kwargs)
with open(fspath, 'rb') as f:
result = f.read()
assert result == expected
def test_write_fspath_hdf5(self):
# Same test as write_fspath_all, except HDF5 files aren't
# necessarily byte-for-byte identical for a given dataframe, so we'll
# have to read and compare equality
pytest.importorskip('tables')
df = pd.DataFrame({"A": [1, 2]})
p1 = tm.ensure_clean('string')
p2 = tm.ensure_clean('fspath')
with p1 as string, p2 as fspath:
mypath = CustomFSPath(fspath)
df.to_hdf(mypath, key='bar')
df.to_hdf(string, key='bar')
result = pd.read_hdf(fspath, key='bar')
expected = pd.read_hdf(string, key='bar')
tm.assert_frame_equal(result, expected)
class TestMMapWrapper(object):
def setup_method(self, method):
self.mmap_file = os.path.join(tm.get_data_path(),
'test_mmap.csv')
def test_constructor_bad_file(self):
non_file = StringIO('I am not a file')
non_file.fileno = lambda: -1
# the error raised is different on Windows
if is_platform_windows():
msg = "The parameter is incorrect"
err = OSError
else:
msg = "[Errno 22]"
err = mmap.error
tm.assert_raises_regex(err, msg, common.MMapWrapper, non_file)
target = open(self.mmap_file, 'r')
target.close()
msg = "I/O operation on closed file"
tm.assert_raises_regex(
ValueError, msg, common.MMapWrapper, target)
def test_get_attr(self):
with open(self.mmap_file, 'r') as target:
wrapper = common.MMapWrapper(target)
attrs = dir(wrapper.mmap)
attrs = [attr for attr in attrs
if not attr.startswith('__')]
attrs.append('__next__')
for attr in attrs:
assert hasattr(wrapper, attr)
assert not hasattr(wrapper, 'foo')
def test_next(self):
with open(self.mmap_file, 'r') as target:
wrapper = common.MMapWrapper(target)
lines = target.readlines()
for line in lines:
next_line = next(wrapper)
assert next_line.strip() == line.strip()
pytest.raises(StopIteration, next, wrapper)
def test_unknown_engine(self):
with tm.ensure_clean() as path:
df = tm.makeDataFrame()
df.to_csv(path)
with tm.assert_raises_regex(ValueError, 'Unknown engine'):
read_csv(path, engine='pyt')
| 33.989655 | 79 | 0.59369 |
4a17a8f610810fcb135cfeb49b9d7e6dc26b24a1 | 20,455 | py | Python | paypal/express/views.py | evonove/django-oscar-paypal | f3561efb4654470e84087c2a7823d95feb8d28f1 | [
"BSD-3-Clause"
] | null | null | null | paypal/express/views.py | evonove/django-oscar-paypal | f3561efb4654470e84087c2a7823d95feb8d28f1 | [
"BSD-3-Clause"
] | 2 | 2022-02-02T10:13:09.000Z | 2022-02-02T12:07:25.000Z | paypal/express/views.py | evonove/django-oscar-paypal | f3561efb4654470e84087c2a7823d95feb8d28f1 | [
"BSD-3-Clause"
] | null | null | null | import logging
from decimal import Decimal as D
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponse, HttpResponseBadRequest
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse
from django.utils.http import urlencode
from django.utils.translation import gettext_lazy as _
from django.views.generic import RedirectView, View
from oscar.apps.payment.exceptions import UnableToTakePayment
from oscar.apps.shipping.methods import FixedPrice, NoShippingRequired
from oscar.core.exceptions import ModuleNotFoundError
from oscar.core.loading import get_class, get_model
from paypal.exceptions import PayPalError
from paypal.express.exceptions import (
EmptyBasketException, InvalidBasket, MissingShippingAddressException, MissingShippingMethodException)
from paypal.express.facade import confirm_transaction, fetch_transaction_details, get_paypal_url
from paypal.express.gateway import buyer_pays_on_paypal
# Load views dynamically
PaymentDetailsView = get_class('checkout.views', 'PaymentDetailsView')
CheckoutSessionMixin = get_class('checkout.session', 'CheckoutSessionMixin')
User = get_user_model()
ShippingAddress = get_model('order', 'ShippingAddress')
Country = get_model('address', 'Country')
Basket = get_model('basket', 'Basket')
Repository = get_class('shipping.repository', 'Repository')
Selector = get_class('partner.strategy', 'Selector')
Source = get_model('payment', 'Source')
SourceType = get_model('payment', 'SourceType')
try:
Applicator = get_class('offer.applicator', 'Applicator')
except ModuleNotFoundError:
# fallback for django-oscar<=1.1
Applicator = get_class('offer.utils', 'Applicator')
logger = logging.getLogger('paypal.express')
class RedirectView(CheckoutSessionMixin, RedirectView):
"""
Initiate the transaction with Paypal and redirect the user
to PayPal's Express Checkout to perform the transaction.
"""
permanent = False
# Setting to distinguish if the site has already collected a shipping
# address. This is False when redirecting to PayPal straight from the
# basket page but True when redirecting from checkout.
as_payment_method = False
# If True redirect directly to credit card payment
ccard = False
def get_redirect_url(self, **kwargs):
try:
basket = self.build_submission()['basket']
url = self._get_redirect_url(basket, **kwargs)
except PayPalError as ppe:
messages.error(self.request, str(ppe))
if self.as_payment_method:
url = reverse('checkout:payment-details')
else:
url = reverse('basket:summary')
return url
except InvalidBasket as e:
messages.warning(self.request, str(e))
return reverse('basket:summary')
except EmptyBasketException:
messages.error(self.request, _("Your basket is empty"))
return reverse('basket:summary')
except MissingShippingAddressException:
messages.error(
self.request, _("A shipping address must be specified"))
return reverse('checkout:shipping-address')
except MissingShippingMethodException:
messages.error(
self.request, _("A shipping method must be specified"))
return reverse('checkout:shipping-method')
else:
# Transaction successfully registered with PayPal. Now freeze the
# basket so it can't be edited while the customer is on the PayPal
# site.
basket.freeze()
logger.info("Basket #%s - redirecting to %s", basket.id, url)
return url
def _get_redirect_url(self, basket, **kwargs):
if basket.is_empty:
raise EmptyBasketException()
params = {
'basket': basket,
'shipping_methods': [] # setup a default empty list
} # to support no_shipping
user = self.request.user
if self.as_payment_method:
if basket.is_shipping_required():
# Only check for shipping details if required.
shipping_addr = self.get_shipping_address(basket)
if not shipping_addr:
raise MissingShippingAddressException()
shipping_method = self.get_shipping_method(
basket, shipping_addr)
if not shipping_method:
raise MissingShippingMethodException()
params['shipping_address'] = shipping_addr
params['shipping_method'] = shipping_method
params['shipping_methods'] = []
else:
# Maik doubts that this code ever worked. Assigning
# shipping method instances to Paypal params
# isn't going to work, is it?
shipping_methods = Repository().get_shipping_methods(
user=user, basket=basket, request=self.request)
params['shipping_methods'] = shipping_methods
if settings.DEBUG:
# Determine the localserver's hostname to use when
# in testing mode
params['host'] = self.request.META['HTTP_HOST']
if user.is_authenticated:
params['user'] = user
params['paypal_params'] = self._get_paypal_params()
params['ccard'] = self.ccard
return get_paypal_url(**params)
def _get_paypal_params(self):
"""
Return any additional PayPal parameters
"""
return {}
class CancelResponseView(RedirectView):
permanent = False
def get(self, request, *args, **kwargs):
basket = get_object_or_404(Basket, id=kwargs['basket_id'],
status=Basket.FROZEN)
basket.thaw()
logger.info("Payment cancelled (token %s) - basket #%s thawed",
request.GET.get('token', '<no token>'), basket.id)
return super(CancelResponseView, self).get(request, *args, **kwargs)
def get_redirect_url(self, **kwargs):
messages.error(self.request, _("PayPal transaction cancelled"))
return reverse('basket:summary')
# Upgrading notes: when we drop support for Oscar 0.6, this class can be
# refactored to pass variables around more explicitly (instead of assigning
# things to self so they are accessible in a later method).
class SuccessResponseView(PaymentDetailsView):
template_name_preview = 'paypal/express/preview.html'
preview = True
error_message = _("A problem occurred communicating with PayPal - please try again later")
@property
def pre_conditions(self):
return []
def get(self, request, *args, **kwargs):
"""
Fetch details about the successful transaction from PayPal.
We use these details to show a preview of the order with a 'submit' button to place it.
The preview step can be skipped with `PAYPAL_BUYER_PAYS_ON_PAYPAL=True` inside settings.
"""
try:
self.payer_id = request.GET['PayerID']
self.token = request.GET['token']
except KeyError:
# Manipulation - redirect to basket page with warning message
logger.warning("Missing GET params on success response page")
messages.error(self.request, _("Unable to determine PayPal transaction details"))
return redirect('basket:summary')
try:
self.txn = fetch_transaction_details(self.token)
except PayPalError as e:
logger.warning("Unable to fetch transaction details for token %s: %s", self.token, e)
messages.error(self.request, self.error_message)
return redirect('basket:summary')
# Reload frozen basket which is specified in the URL
kwargs['basket'] = self.load_frozen_basket(kwargs['basket_id'])
if not kwargs['basket']:
logger.warning("Unable to load frozen basket with ID %s", kwargs['basket_id'])
messages.error(self.request, _("No basket was found that corresponds to your PayPal transaction"))
return redirect('basket:summary')
if buyer_pays_on_paypal():
return self.submit(**self.build_submission(basket=kwargs['basket']))
logger.info(
"Basket #%s - showing preview with payer ID %s and token %s",
kwargs['basket'].id, self.payer_id, self.token)
return super(SuccessResponseView, self).get(request, *args, **kwargs)
def load_frozen_basket(self, basket_id):
# Lookup the frozen basket that this txn corresponds to
try:
basket = Basket.objects.get(id=basket_id, status=Basket.FROZEN)
except Basket.DoesNotExist:
return None
# Assign strategy to basket instance
if Selector:
basket.strategy = Selector().strategy(self.request)
# Find the logged user (if any)
try:
if self.request and self.request.user:
user = User.objects.get(id=self.request.user.id)
else:
user = None
except User.DoesNotExist:
user = None
# Re-apply any offers
Applicator().apply(request=self.request, basket=basket, user=user)
return basket
def get_context_data(self, **kwargs):
ctx = super(SuccessResponseView, self).get_context_data(**kwargs)
if not hasattr(self, 'payer_id'):
return ctx
# This context generation only runs when in preview mode
ctx.update({
'payer_id': self.payer_id,
'token': self.token,
'paypal_user_email': self.txn.value('EMAIL'),
'paypal_amount': D(self.txn.value('AMT')),
})
return ctx
def post(self, request, *args, **kwargs):
"""
Place an order.
We fetch the txn details again and then proceed with oscar's standard
payment details view for placing the order.
"""
if buyer_pays_on_paypal():
return HttpResponseBadRequest() # we don't expect any user here if we let users buy on PayPal
try:
self.payer_id = request.POST['payer_id']
self.token = request.POST['token']
except KeyError:
# Probably suspicious manipulation if we get here
messages.error(self.request, self.error_message)
return redirect('basket:summary')
try:
self.txn = fetch_transaction_details(self.token)
except PayPalError:
# Unable to fetch txn details from PayPal - we have to bail out
messages.error(self.request, self.error_message)
return redirect('basket:summary')
# Reload frozen basket which is specified in the URL
basket = self.load_frozen_basket(kwargs['basket_id'])
if not basket:
messages.error(self.request, self.error_message)
return redirect('basket:summary')
submission = self.build_submission(basket=basket)
return self.submit(**submission)
def build_submission(self, **kwargs):
submission = super(
SuccessResponseView, self).build_submission(**kwargs)
# Pass the user email so it can be stored with the order
submission['order_kwargs']['guest_email'] = self.txn.value('EMAIL')
# Pass PP params
submission['payment_kwargs']['payer_id'] = self.payer_id
submission['payment_kwargs']['token'] = self.token
submission['payment_kwargs']['txn'] = self.txn
return submission
def handle_payment(self, order_number, total, **kwargs):
"""
Complete payment with PayPal - this calls the 'DoExpressCheckout'
method to capture the money from the initial transaction.
"""
try:
confirm_txn = confirm_transaction(
kwargs['payer_id'], kwargs['token'], kwargs['txn'].amount,
kwargs['txn'].currency)
except PayPalError:
raise UnableToTakePayment()
if not confirm_txn.is_successful:
raise UnableToTakePayment()
# Record payment source and event
source_type, is_created = SourceType.objects.get_or_create(
name='PayPal')
source = Source(source_type=source_type,
currency=confirm_txn.currency,
amount_allocated=confirm_txn.amount,
amount_debited=confirm_txn.amount,
reference=confirm_txn.token)
self.add_payment_source(source)
self.add_payment_event('Settled', confirm_txn.amount,
reference=confirm_txn.correlation_id)
def get_shipping_address(self, basket):
"""
Return a created shipping address instance, created using
the data returned by PayPal.
"""
# Determine names - PayPal uses a single field
ship_to_name = self.txn.value('PAYMENTREQUEST_0_SHIPTONAME')
if ship_to_name is None:
return None
first_name = last_name = ''
parts = ship_to_name.split()
if len(parts) == 1:
last_name = ship_to_name
elif len(parts) > 1:
first_name = parts[0]
last_name = " ".join(parts[1:])
return ShippingAddress(
first_name=first_name,
last_name=last_name,
line1=self.txn.value('PAYMENTREQUEST_0_SHIPTOSTREET'),
line2=self.txn.value('PAYMENTREQUEST_0_SHIPTOSTREET2', default=""),
line4=self.txn.value('PAYMENTREQUEST_0_SHIPTOCITY', default=""),
state=self.txn.value('PAYMENTREQUEST_0_SHIPTOSTATE', default=""),
postcode=self.txn.value('PAYMENTREQUEST_0_SHIPTOZIP', default=""),
country=Country.objects.get(iso_3166_1_a2=self.txn.value('PAYMENTREQUEST_0_SHIPTOCOUNTRYCODE')),
phone_number=self.txn.value('PAYMENTREQUEST_0_SHIPTOPHONENUM', default=""),
)
def _get_shipping_method_by_name(self, name, basket, shipping_address=None):
methods = Repository().get_shipping_methods(
basket=basket, user=self.request.user,
shipping_addr=shipping_address, request=self.request)
for method in methods:
if method.name == name:
return method
def get_shipping_method(self, basket, shipping_address=None, **kwargs):
"""
Return the shipping method used
"""
if not basket.is_shipping_required():
return NoShippingRequired()
# Instantiate a new FixedPrice shipping method instance
charge_incl_tax = D(self.txn.value('PAYMENTREQUEST_0_SHIPPINGAMT'))
# Assume no tax for now
charge_excl_tax = charge_incl_tax
name = self.txn.value('SHIPPINGOPTIONNAME')
session_method = super(SuccessResponseView, self).get_shipping_method(
basket, shipping_address, **kwargs)
if not session_method or (name and name != session_method.name):
if name:
method = self._get_shipping_method_by_name(name, basket, shipping_address)
else:
method = None
if not method:
method = FixedPrice(charge_excl_tax, charge_incl_tax)
if session_method:
method.name = session_method.name
method.code = session_method.code
else:
method = session_method
return method
class ShippingOptionsView(View):
def get(self, request, *args, **kwargs):
"""
We use the shipping address given to use by PayPal to
determine the available shipping method
"""
# Basket ID is passed within the URL path. We need to do this as some
# shipping options depend on the user and basket contents. PayPal do
# pass back details of the basket contents but it would be royal pain to
# reconstitute the basket based on those - easier to just to piggy-back
# the basket ID in the callback URL.
basket = get_object_or_404(Basket, id=kwargs['basket_id'])
user = basket.owner
if not user:
user = AnonymousUser()
# Create a shipping address instance using the data passed back
country_code = self.request.GET.get(
'SHIPTOCOUNTRY', None)
try:
country = Country.objects.get(iso_3166_1_a2=country_code)
except Country.DoesNotExist:
country = Country()
shipping_address = ShippingAddress(
line1=self.request.GET.get('SHIPTOSTREET', ''),
line2=self.request.GET.get('SHIPTOSTREET2', ''),
line4=self.request.GET.get('SHIPTOCITY', ''),
state=self.request.GET.get('SHIPTOSTATE', ''),
postcode=self.request.GET.get('SHIPTOZIP', ''),
country=country
)
methods = Repository().get_shipping_methods(
basket=basket, shipping_addr=shipping_address,
request=self.request, user=user)
return self.render_to_response(methods, basket)
def post(self, request, *args, **kwargs):
"""
We use the shipping address given to use by PayPal to
determine the available shipping method
"""
# Basket ID is passed within the URL path. We need to do this as some
# shipping options depend on the user and basket contents. PayPal do
# pass back details of the basket contents but it would be royal pain to
# reconstitute the basket based on those - easier to just to piggy-back
# the basket ID in the callback URL.
basket = get_object_or_404(Basket, id=kwargs['basket_id'])
user = basket.owner
if not user:
user = AnonymousUser()
# Create a shipping address instance using the data passed back
country_code = self.request.POST.get(
'SHIPTOCOUNTRY', None)
try:
country = Country.objects.get(iso_3166_1_a2=country_code)
except Country.DoesNotExist:
country = Country()
shipping_address = ShippingAddress(
line1=self.request.POST.get('SHIPTOSTREET', ''),
line2=self.request.POST.get('SHIPTOSTREET2', ''),
line4=self.request.POST.get('SHIPTOCITY', ''),
state=self.request.POST.get('SHIPTOSTATE', ''),
postcode=self.request.POST.get('SHIPTOZIP', ''),
country=country
)
methods = Repository().get_shipping_methods(
basket=basket, shipping_addr=shipping_address,
request=self.request, user=user)
return self.render_to_response(methods, basket)
def render_to_response(self, methods, basket):
pairs = [
('METHOD', 'CallbackResponse'),
('CALLBACKVERSION', '61.0'),
('CURRENCYCODE', self.request.POST.get('CURRENCYCODE', 'GBP')),
]
if methods:
for index, method in enumerate(methods):
charge = method.calculate(basket).incl_tax
pairs.append(('L_SHIPPINGOPTIONNAME%d' % index,
str(method.name)))
pairs.append(('L_SHIPPINGOPTIONLABEL%d' % index,
str(method.description)))
pairs.append(('L_SHIPPINGOPTIONAMOUNT%d' % index, charge))
# For now, we assume tax and insurance to be zero
pairs.append(('L_TAXAMT%d' % index, D('0.00')))
pairs.append(('L_INSURANCEAMT%d' % index, D('0.00')))
# We assume that the first returned method is the default one
pairs.append(('L_SHIPPINGOPTIONISDEFAULT%d' % index,
1 if index == 0 else 0))
else:
# No shipping methods available - we flag this up to PayPal indicating that we
# do not ship to the shipping address.
pairs.append(('NO_SHIPPING_OPTION_DETAILS', 1))
payload = urlencode(pairs)
logger.debug("Basket #%s - returning postage costs payload = '%s'", basket.id, payload)
return HttpResponse(payload)
| 40.991984 | 110 | 0.629919 |
0a5f9ac7b8bd891ef7b10a7fc9b8b9f2cb347dea | 564 | py | Python | driver/migrations/0004_auto_20180520_1323.py | sami-mai/Carpool-R-Us | 306c60788e3dc123c3ac85e0c40ac5a291590709 | [
"MIT"
] | null | null | null | driver/migrations/0004_auto_20180520_1323.py | sami-mai/Carpool-R-Us | 306c60788e3dc123c3ac85e0c40ac5a291590709 | [
"MIT"
] | 5 | 2020-02-12T00:43:34.000Z | 2021-06-10T20:18:42.000Z | driver/migrations/0004_auto_20180520_1323.py | sami-mai/Carpool-R-Us | 306c60788e3dc123c3ac85e0c40ac5a291590709 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-20 10:23
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('driver', '0003_auto_20180520_1217'),
]
operations = [
migrations.AlterModelOptions(
name='car',
options={'ordering': ['seat_capacity']},
),
migrations.RenameField(
model_name='car',
old_name='seats_available',
new_name='seat_capacity',
),
]
| 22.56 | 52 | 0.588652 |
e9a6faccfda8b808af3a40d6f1649d2e1390f4e0 | 459 | py | Python | Week 5/Solution_5_3.py | ChanchalKumarMaji/Competitive-Programmer-s-Core-Skills | 26102bfe6c708af732349ec7f085dc87939e4cb2 | [
"MIT"
] | 4 | 2020-01-20T01:07:42.000Z | 2022-01-09T01:18:21.000Z | Week 5/Solution_5_3.py | ChanchalKumarMaji/Competitive-Programmer-s-Core-Skills | 26102bfe6c708af732349ec7f085dc87939e4cb2 | [
"MIT"
] | null | null | null | Week 5/Solution_5_3.py | ChanchalKumarMaji/Competitive-Programmer-s-Core-Skills | 26102bfe6c708af732349ec7f085dc87939e4cb2 | [
"MIT"
] | 1 | 2021-09-23T04:24:34.000Z | 2021-09-23T04:24:34.000Z | # python3
dp = [[-1]*(162+1) for _ in range(18+1)]
def f(L, S):
if S < 0:
return 0
if L == 0:
return S == 0
if dp[L][S] != -1:
return dp[L][S]
res = 0
for k in range(10):
res += f(L-1, S-k)
dp[L][S] = res
return res
def solve(L, S):
if S == 0 and L == 1:
return 1
return f(L, S) - f(L-1, S)
if __name__ == '__main__':
S, L = map(int, input().split())
print(solve(L, S))
| 17 | 40 | 0.440087 |
612837417445f392d1011c1b31774354294d9828 | 14,673 | py | Python | examples/learning/reinforcement/flowControl/_model/KS.py | JonathanLehner/korali | 90f97d8e2fed2311f988f39cfe014f23ba7dd6cf | [
"MIT"
] | 43 | 2018-07-26T07:20:42.000Z | 2022-03-02T10:23:12.000Z | examples/learning/reinforcement/flowControl/_model/KS.py | JonathanLehner/korali | 90f97d8e2fed2311f988f39cfe014f23ba7dd6cf | [
"MIT"
] | 212 | 2018-09-21T10:44:07.000Z | 2022-03-22T14:33:05.000Z | examples/learning/reinforcement/flowControl/_model/KS.py | JonathanLehner/korali | 90f97d8e2fed2311f988f39cfe014f23ba7dd6cf | [
"MIT"
] | 16 | 2018-07-25T15:00:36.000Z | 2022-03-22T14:19:46.000Z | from numpy import pi
from scipy.fftpack import fft, ifft
import numpy as np
np.seterr(over='raise', invalid='raise')
def gaussian( x, mean, sigma ):
return 1/np.sqrt(2*np.pi*sigma**2)*np.exp(-1/2*( (x-mean)/sigma )**2)
class KS:
#
# Solution of the 1D Kuramoto-Sivashinsky equation
#
# u_t + u*u_x + u_xx + u_xxxx = 0,
# with periodic BCs on x \in [0, 2*pi*L]: u(x+2*pi*L,t) = u(x,t).
#
# The nature of the solution depends on the system size L and on the initial
# condition u(x,0). Energy enters the system at long wavelengths via u_xx
# (an unstable diffusion term), cascades to short wavelengths due to the
# nonlinearity u*u_x, and dissipates via diffusion with u_xxxx.
# see P Cvitanović, RL Davidchack, and E Siminos, SIAM Journal on Applied Dynamical Systems 2010
#
# Spatial discretization: spectral (Fourier)
# Temporal discretization: exponential time differencing fourth-order Runge-Kutta
# see AK Kassam and LN Trefethen, SISC 2005
def __init__(self, L=16, N=128, dt=0.25, nsteps=None, tend=150, iout=1, u0=None, v0=None, RL=False, nActions=4, sigma=0.4, case=None ):
#
# Initialize
L = float(L); dt = float(dt); tend = float(tend)
if (nsteps is None):
nsteps = int(tend/dt)
else:
nsteps = int(nsteps)
# override tend
tend = dt*nsteps
#
# save to self
self.L = L
self.N = N
self.dx = 2*pi*L/N
self.dt = dt
self.nsteps = nsteps
self.iout = iout
self.nout = int(nsteps/iout)
self.RL = RL
#
# get targets for control:
if self.RL:
self.case = case
self.setup_targets()
#
# set initial condition
if (u0 is None) or (v0 is None):
self.IC()
elif (u0 is not None):
self.IC(u0 = u0)
elif (v0 is not None):
self.IC(v0 = v0)
#
# initialize simulation arrays
self.setup_timeseries()
#
# precompute Fourier-related quantities
self.setup_fourier()
#
# precompute ETDRK4 scalar quantities:
self.setup_etdrk4()
#
# precompute Gaussians for control:
if self.RL:
self.nActions = nActions
self.sigma = sigma
self.x = np.arange(self.N)*self.L/(self.N-1)
self.setup_gaussians()
def setup_timeseries(self, nout=None):
if (nout != None):
self.nout = int(nout)
# nout+1 so we store the IC as well
self.vv = np.zeros([self.nout+1, self.N], dtype=np.complex64)
self.tt = np.zeros(self.nout+1)
#
# store the IC in [0]
self.vv[0,:] = self.v0
self.tt[0] = 0.
def setup_fourier(self, coeffs=None):
self.x = 2*pi*self.L*np.r_[0:self.N]/self.N
self.k = np.r_[0:self.N/2, 0, -self.N/2+1:0]/self.L # Wave numbers
# Fourier multipliers for the linear term Lu
if (coeffs is None):
# normal-form equation
self.l = self.k**2 - self.k**4
else:
# altered-coefficients
self.l = - coeffs[0]*np.ones(self.k.shape) \
- coeffs[1]*1j*self.k \
+ (1 + coeffs[2]) *self.k**2 \
+ coeffs[3]*1j*self.k**3 \
- (1 + coeffs[4]) *self.k**4
def setup_etdrk4(self):
self.E = np.exp(self.dt*self.l)
self.E2 = np.exp(self.dt*self.l/2.)
self.M = 62 # no. of points for complex means
self.r = np.exp(1j*pi*(np.r_[1:self.M+1]-0.5)/self.M) # roots of unity
self.LR = self.dt*np.repeat(self.l[:,np.newaxis], self.M, axis=1) + np.repeat(self.r[np.newaxis,:], self.N, axis=0)
self.Q = self.dt*np.real(np.mean((np.exp(self.LR/2.) - 1.)/self.LR, 1))
self.f1 = self.dt*np.real( np.mean( (-4. - self.LR + np.exp(self.LR)*( 4. - 3.*self.LR + self.LR**2) )/(self.LR**3) , 1) )
self.f2 = self.dt*np.real( np.mean( ( 2. + self.LR + np.exp(self.LR)*(-2. + self.LR ) )/(self.LR**3) , 1) )
self.f3 = self.dt*np.real( np.mean( (-4. - 3.*self.LR - self.LR**2 + np.exp(self.LR)*( 4. - self.LR ) )/(self.LR**3) , 1) )
self.g = -0.5j*self.k
def setup_gaussians(self):
self.gaussians = np.zeros((self.nActions, self.N))
for i in range(self.nActions):
mean = i*self.L/4
self.gaussians[i,:] = gaussian( self.x, mean, self.sigma )
def setup_targets(self):
self.targets = np.zeros((3,self.N))
for i in range(3):
self.targets[i,:] = np.loadtxt("_model/u{}.dat".format(i+1))
def IC(self, u0=None, v0=None, seed=42):
#
# Set initial condition
if (v0 is None):
if (u0 is None):
# set u0
if self.RL:
# initial condition for chosen RL case
if self.case == "E31":
u0 = self.targets[2,:]
elif self.case == "E12":
u0 = self.targets[0,:]
elif self.case == "E23":
u0 = self.targets[1,:]
else:
assert False, print("RL case {} unknown...".format(self.case))
else:
print("Using random initial condition...")
# uniform noise
# u0 = (np.random.rand(self.N) -0.5)*0.01
# Gaussian noise (according to https://arxiv.org/pdf/1906.07672.pdf)
np.random.seed( seed )
u0 = np.random.normal(0., 1e-4, self.N)
else:
# check the input size
if (np.size(u0,0) != self.N):
print('Error: wrong IC array size')
return -1
else:
print("Using given (real) flow field...")
# if ok cast to np.array
u0 = np.array(u0)
# in any case, set v0:
v0 = fft(u0)
else:
# the initial condition is provided in v0
# check the input size
if (np.size(v0,0) != self.N):
print('Error: wrong IC array size')
return -1
else:
print("Using given (Fourier) flow field...")
# if ok cast to np.array
v0 = np.array(v0)
# and transform to physical space
u0 = ifft(v0)
#
# and save to self
self.u0 = u0
self.v0 = v0
self.v = v0
self.t = 0.
self.stepnum = 0
self.ioutnum = 0 # [0] is the initial condition
def step( self, action=None ):
forcing = np.zeros(self.N)
Fforcing = np.zeros(self.N)
if (action is not None):
assert len(action) == self.nActions, print("Wrong number of actions. provided {}/{}".format(len(action), self.nActions))
for i, a in enumerate(action):
forcing += a*self.gaussians[i,:]
Fforcing = fft( forcing )
#
# Computation is based on v = fft(u), so linear term is diagonal.
# The time-discretization is done via ETDRK4
# (exponential time differencing - 4th order Runge Kutta)
#
v = self.v; Nv = self.g*fft(np.real(ifft(v))**2)
a = self.E2*v + self.Q*Nv; Na = self.g*fft(np.real(ifft(a))**2)
b = self.E2*v + self.Q*Na; Nb = self.g*fft(np.real(ifft(b))**2)
c = self.E2*a + self.Q*(2.*Nb - Nv); Nc = self.g*fft(np.real(ifft(c))**2)
#
if (action is not None):
self.v = self.E*v + (Nv + Fforcing)*self.f1 + 2.*(Na + Nb + 2*Fforcing)*self.f2 + (Nc + Fforcing)*self.f3
else:
self.v = self.E*v + Nv*self.f1 + 2.*(Na + Nb)*self.f2 + Nc*self.f3
self.stepnum += 1
self.t += self.dt
def simulate(self, nsteps=None, iout=None, restart=False, correction=[]):
#
# If not provided explicitly, get internal values
if (nsteps is None):
nsteps = self.nsteps
else:
nsteps = int(nsteps)
self.nsteps = nsteps
if (iout is None):
iout = self.iout
nout = self.nout
else:
self.iout = iout
if restart:
# update nout in case nsteps or iout were changed
nout = int(nsteps/iout)
self.nout = nout
# reset simulation arrays with possibly updated size
self.setup_timeseries(nout=self.nout)
#
# advance in time for nsteps steps
if (correction==[]):
for n in range(1,self.nsteps+1):
try:
self.step()
except FloatingPointError:
#
# something exploded
# cut time series to last saved solution and return
self.nout = self.ioutnum
self.vv.resize((self.nout+1,self.N)) # nout+1 because the IC is in [0]
self.tt.resize(self.nout+1) # nout+1 because the IC is in [0]
return -1
if ( (self.iout>0) and (n%self.iout==0) ):
self.ioutnum += 1
self.vv[self.ioutnum,:] = self.v
self.tt[self.ioutnum] = self.t
else:
# lots of code duplication here, but should improve speed instead of having the 'if correction' at every time step
for n in range(1,self.nsteps+1):
try:
self.step()
self.v += correction
except FloatingPointError:
#
# something exploded
# cut time series to last saved solution and return
self.nout = self.ioutnum
self.vv.resize((self.nout+1,self.N)) # nout+1 because the IC is in [0]
self.tt.resize(self.nout+1) # nout+1 because the IC is in [0]
return -1
if ( (self.iout>0) and (n%self.iout==0) ):
self.ioutnum += 1
self.vv[self.ioutnum,:] = self.v
self.tt[self.ioutnum] = self.t
def fou2real(self):
#
# Convert from spectral to physical space
self.uu = np.real(ifft(self.vv))
def state(self):
u = np.real(ifft(self.v))
state = np.full(8,fill_value=np.inf)
for i in range(1,17,2):
indexState = int( i/2 )
indexField = int( i*self.N/16 )
state[indexState] = u[indexState]
return state
def reward(self):
u = np.real(ifft(self.v))
if self.case == "E31":
return np.linalg.norm( u - self.targets[0,:] )
elif self.case == "E12":
return np.linalg.norm( u - self.targets[1,:] )
elif self.case == "E23":
return np.linalg.norm( u - self.targets[2,:] )
def compute_Ek(self):
#
# compute all forms of kinetic energy
#
# Kinetic energy as a function of wavenumber and time
self.compute_Ek_kt()
# Time-averaged energy spectrum as a function of wavenumber
self.Ek_k = np.sum(self.Ek_kt, 0)/(self.ioutnum+1) # not self.nout because we might not be at the end; ioutnum+1 because the IC is in [0]
# Total kinetic energy as a function of time
self.Ek_t = np.sum(self.Ek_kt, 1)
# Time-cumulative average as a function of wavenumber and time
self.Ek_ktt = np.cumsum(self.Ek_kt, 0) / np.arange(1,self.ioutnum+2)[:,None] # not self.nout because we might not be at the end; ioutnum+1 because the IC is in [0] +1 more because we divide starting from 1, not zero
# Time-cumulative average as a function of time
self.Ek_tt = np.cumsum(self.Ek_t, 0) / np.arange(1,self.ioutnum+2)[:,None] # not self.nout because we might not be at the end; ioutnum+1 because the IC is in [0] +1 more because we divide starting from 1, not zero
def compute_Ek_kt(self):
try:
self.Ek_kt = 1./2.*np.real( self.vv.conj()*self.vv / self.N ) * self.dx
except FloatingPointError:
#
# probable overflow because the simulation exploded, try removing the last solution
problem=True
remove=1
self.Ek_kt = np.zeros([self.nout+1, self.N]) + 1e-313
while problem:
try:
self.Ek_kt[0:self.nout+1-remove,:] = 1./2.*np.real( self.vv[0:self.nout+1-remove].conj()*self.vv[0:self.nout+1-remove] / self.N ) * self.dx
problem=False
except FloatingPointError:
remove+=1
problem=True
return self.Ek_kt
def space_filter(self, k_cut=2):
#
# spatially filter the time series
self.uu_filt = np.zeros([self.nout+1, self.N])
for n in range(self.nout+1):
v_filt = np.copy(self.vv[n,:]) # copy vv[n,:] (otherwise python treats it as reference and overwrites vv on the next line)
v_filt[np.abs(self.k)>=k_cut] = 0 # set to zero wavenumbers > k_cut
self.uu_filt[n,:] = np.real(ifft(v_filt))
#
# compute u_resid
self.uu_resid = self.uu - self.uu_filt
def space_filter_int(self, k_cut=2, N_int=10):
#
# spatially filter the time series
self.N_int = N_int
self.uu_filt = np.zeros([self.nout+1, self.N])
self.uu_filt_int = np.zeros([self.nout+1, self.N_int])
self.x_int = 2*pi*self.L*np.r_[0:self.N_int]/self.N_int
for n in range(self.nout+1):
v_filt = np.copy(self.vv[n,:]) # copy vv[n,:] (otherwise python treats it as reference and overwrites vv on the next line)
v_filt[np.abs(self.k)>=k_cut] = 313e6
v_filt_int = v_filt[v_filt != 313e6] * self.N_int/self.N
self.uu_filt_int[n,:] = np.real(ifft(v_filt_int))
v_filt[np.abs(self.k)>=k_cut] = 0
self.uu_filt[n,:] = np.real(ifft(v_filt))
#
# compute u_resid
self.uu_resid = self.uu - self.uu_filt
| 40.871866 | 223 | 0.510052 |
e896c4c7113726282453b1cfb2e768db50878492 | 585 | py | Python | Chapter13/ch13_BIRCH.py | PacktPublishing/Applied-Computational-Thinking-with-Python | fd9982383c5b473ffa1640998540d602876816e5 | [
"MIT"
] | 18 | 2020-11-27T22:41:12.000Z | 2021-12-27T08:20:46.000Z | Chapter13/ch13_BIRCH.py | PacktPublishing/Applied-Computational-Thinking-with-Python | fd9982383c5b473ffa1640998540d602876816e5 | [
"MIT"
] | null | null | null | Chapter13/ch13_BIRCH.py | PacktPublishing/Applied-Computational-Thinking-with-Python | fd9982383c5b473ffa1640998540d602876816e5 | [
"MIT"
] | 8 | 2020-11-30T17:51:11.000Z | 2021-12-25T05:23:02.000Z | from numpy import unique
from numpy import where
from sklearn.datasets import make_classification
from sklearn.cluster import Birch
from matplotlib import pyplot
#Synthetic dataset definition
X, _ = make_classification(n_samples=1800, n_features=2, n_informative=2, n_redundant=0, n_clusters_per_class=1, random_state=4)
#Define the BIRCH model
model = Birch(threshold=0.01, n_clusters=2)
model.fit(X)
yhat = model.predict(X)
#Clusters
clusters = unique(yhat)
#Display
for cluster in clusters:
row_ix = where(yhat == cluster)
pyplot.scatter(X[row_ix, 0], X[row_ix, 1])
pyplot.show()
| 30.789474 | 128 | 0.791453 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.