id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
124227 | <gh_stars>1-10
from functools import partial
import arcade
from ..config import CONFIG
from .menu_view import MenuView, MenuField
from ..constants import *
class SettingsView(MenuView):
def __init__(self, views):
super().__init__(views)
self.width, self.height = self.window.get_size()
# use setting_index to grab the currently selected setting
# setting_list will store list of settings to add to the view
# full screen setting disabled as it create a ton of scaling issues
self.setting_list = [
partial(SettingToggle, text="Turn music on/off", binding="is_music_on"),
# partial(SettingToggle, text="Fullscreen", binding="is_fullscreen"),
partial(SettingSlider, text="Adjust volume", binding="music_volume"),
]
self.setting_list = [
setting(self.width // 4, self.height - i * 70 - self.height // 3)
for i, setting in enumerate(self.setting_list)
]
def on_draw(self):
arcade.start_render()
arcade.draw_text(
"Settings",
self.width // 2,
self.height * 0.85,
arcade.color.WHITE,
20,
anchor_x="center",
font_name=SETTINGS_FONT,
)
self.draw_information_text(arcade.color.WHITE, back=True)
arcade.draw_text(
"Left and right to change settings",
self.width // 16,
self.height // 8,
arcade.color.WHITE,
font_name=SETTINGS_FONT,
)
longest = self.width // 2
for setting in self.setting_list:
setting.draw(longest)
setting = self.setting_list[self.selection_index]
x = setting.x + (longest + 60) // 2
width = longest + 100
if type(setting) is SettingToggle:
arcade.draw_rectangle_outline(
center_x=x,
center_y=setting.y + 8,
width=width,
height=40,
color=arcade.color.WHITE,
)
else:
arcade.draw_rectangle_outline(
center_x=x,
center_y=setting.y,
width=width,
height=60,
color=arcade.color.WHITE,
)
def update(self, delta_time: float):
if (self.width, self.height) != (new_size := self.window.get_size()):
self.width, self.height = new_size
for i, setting in enumerate(self.setting_list):
setting.x = self.width // 4
setting.y = self.height - i * 70 - self.height // 3
def on_key_press(self, symbol, modifiers):
if symbol == arcade.key.UP:
self.selection_index -= 1
if self.selection_index < 0:
self.selection_index = len(self.setting_list) - 1
elif symbol == arcade.key.DOWN:
self.selection_index = (self.selection_index + 1) % len(self.setting_list)
elif symbol == arcade.key.LEFT:
self.setting_list[self.selection_index].decrease()
elif symbol == arcade.key.RIGHT:
self.setting_list[self.selection_index].increase()
elif symbol == arcade.key.ESCAPE:
self.switch_to_previous()
def on_hide_view(self):
self.ui_manager.unregister_handlers()
class SettingField(MenuField):
"""
Represents a setting the user can modify, with a text label.
"""
def __init__(self, x: int, y: int, text: str, binding: str):
super().__init__(x, y, text)
self.binding = binding
@property
def value(self):
return getattr(CONFIG, self.binding)
@value.setter
def value(self, value):
setattr(CONFIG, self.binding, value)
@value.getter
def value(self):
return getattr(CONFIG, self.binding)
def decrease(self):
...
def increase(self):
...
class SettingToggle(SettingField):
"""
Represents a toggleable setting
"""
def __init__(self, x, y, text, binding):
super().__init__(x, y, text, binding)
def decrease(self):
self.value = False
def increase(self):
self.value = True
def draw(self, longest=None):
arcade.draw_text(
self.text,
self.x,
self.y,
color=arcade.csscolor.WHITE,
font_name=SETTINGS_FONT,
)
arcade.draw_rectangle_outline(
self.x + longest + 35, self.y + 8, 49, 20, color=arcade.color.WHITE
)
if self.value:
arcade.draw_rectangle_filled(
self.x + longest + 47, self.y + 8, 23, 18, color=arcade.color.BUD_GREEN,
)
else:
arcade.draw_rectangle_filled(
self.x + longest + 23, self.y + 8, 23, 18, color=arcade.color.CG_RED,
)
class SettingSlider(SettingField):
"""
Represents a setting with a slider, with values ranging from [1, 10]
"""
def __init__(self, x, y, text, binding):
super().__init__(x, y, text, binding)
def decrease(self):
if 2 <= self.value:
self.value -= 1
def increase(self):
if self.value < 10:
self.value += 1
def draw(self, longest=None):
arcade.draw_text(
self.text,
self.x,
self.y,
color=arcade.csscolor.WHITE,
width=self.length + 10,
font_name=SETTINGS_FONT,
)
arcade.draw_line(
self.x, self.y - 15, self.x + longest, self.y - 15, arcade.color.WHITE
)
arcade.draw_text(
str(self.value),
self.x + longest + 35,
self.y - 10,
arcade.color.WHITE,
20,
anchor_x="center",
font_name=SETTINGS_FONT,
)
tick_len = longest // 9
arcade.draw_circle_filled(
self.x + (tick_len * (self.value - 1)), self.y - 15, 8.0, arcade.color.WHITE
)
| StarcoderdataPython |
11350783 | import configparser
def return_credentials():
config = configparser.ConfigParser()
config.read('credentials.ini')
return config
if __name__ == '__main__':
return_credentials()
| StarcoderdataPython |
5198556 | """A module defining dependencies of the `rules_rust` tests"""
load("//test/load_arbitrary_tool:load_arbitrary_tool_test.bzl", "load_arbitrary_tool_test")
def io_bazel_rules_rust_test_deps():
"""Load dependencies for rules_rust tests"""
load_arbitrary_tool_test()
| StarcoderdataPython |
8075080 | <gh_stars>1-10
layout = None | StarcoderdataPython |
3205680 | # Copyright 2019, Oath Inc.
# Licensed under the terms of the Apache 2.0 license. See the LICENSE file in the project root for terms
import configparser
import copy
import os
import sys
from tempfile import TemporaryDirectory
import unittest
from screwdrivercd.version import cli
from screwdrivercd.version.version_types import versioners
class TestCLI(unittest.TestCase):
cwd = None
orig_argv = None
orig_environ =None
tempdir = None
environ_keys = set('SD_BUILD')
origcwd = os.getcwd()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.orig_argv = sys.argv
self.cwd = os.getcwd()
self.orig_environ = copy.copy(os.environ)
def setUp(self):
super().setUp()
self.orig_argv = sys.argv
self.tempdir = TemporaryDirectory()
os.chdir(self.tempdir.name)
def tearDown(self):
super().tearDown()
if self.orig_argv:
sys.argv = self.orig_argv
for environ_key in self.environ_keys:
if self.orig_environ.get(environ_key, None):
os.environ[environ_key] = self.orig_environ[environ_key]
for versioner in versioners.values():
versioner.ignore_meta_version = True
os.chdir(self.origcwd)
self.tempdir.cleanup()
def _get_version(self, setup_cfg_filename='setup.cfg'):
config = configparser.ConfigParser()
config.read(setup_cfg_filename)
if 'metadata' in config.sections():
return config['metadata'].get('version', None)
def test__main__no_setup_cfg(self):
sys.argv = ['cli']
cli.main()
def test__main__setup_cfg__version(self):
sys.argv = ['cli', '--ignore_meta']
if 'SD_BUILD' in os.environ:
del os.environ['SD_BUILD']
print(f'current directory {os.getcwd()!r}')
with open('setup.cfg', 'w') as fh:
fh.write('[metadata]\nversion = 0.0.0\n[screwdrivercd.version]\nversion_type = git_revision_count\n')
cli.main()
with open('setup.cfg') as fh:
result = fh.read()
self.assertIn('version = 0.0.0', result)
def test__main__setup_cfg__version__update_meta(self):
sys.argv = ['cli', '--ignore_meta', '--update_meta']
if 'SD_BUILD' in os.environ:
del os.environ['SD_BUILD']
print(f'current directory {os.getcwd()!r}')
with open('setup.cfg', 'w') as fh:
fh.write('[metadata]\nversion = 0.0.0\n[screwdrivercd.version]\nversion_type = git_revision_count\n')
cli.main()
with open('setup.cfg') as fh:
result = fh.read()
self.assertIn('version = 0.0.0', result)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
12812224 | from pbxproj import PBXGenericObject
class XCConfigurationList(PBXGenericObject):
def _get_comment(self):
info = self._get_section()
return f'Build configuration list for {info[0]} "{info[1]}"'
def _get_section(self):
objects = self.get_parent()
target_id = self.get_id()
for obj in objects.get_objects_in_section('PBXNativeTarget', 'PBXAggregateTarget'):
if target_id in obj.buildConfigurationList:
return obj.isa, obj.name
projects = filter(lambda o: target_id in o.buildConfigurationList, objects.get_objects_in_section('PBXProject'))
project = projects.__next__()
target = objects[project.targets[0]]
name = target.name if hasattr(target, 'name') else target.productName
return project.isa, name
| StarcoderdataPython |
6701802 | <reponame>pyensemble/wildwood
"""
In this module we use wildwood on a binary classification problem with 2 features but
with a very large sample size (to check that parallization works, and to track the
evolution of computing times
"""
from time import time
import logging
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.datasets import make_circles, make_moons
from sklearn.model_selection import train_test_split
# from sklearn.tree import DecisionTreeClassifier as SkDecisionTreeClassifier
# from sklearn.tree import ExtraTreeClassifier as SkExtraTreeClassifier
from sklearn.ensemble import RandomForestClassifier
# from wildwood._classes import DecisionTreeClassifier
from wildwood.forest import ForestClassifier
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
np.set_printoptions(precision=2)
#
# logging.info("JIT compiling...")
# tic = time()
# X, y = make_circles(n_samples=5, noise=0.2, factor=0.5, random_state=1)
# clf = DecisionTreeClassifier(min_samples_split=3)
# clf.fit(X, y)
# clf.predict_proba(X)
# toc = time()
# logging.info("Spent {time} compiling.".format(time=toc - tic))
# n_samples = 1000
# n_samples = 2_000_000
n_samples = 1_000_000
# n_samples = 10
random_state = 42
data_random_state = 123
datasets = [
# (
# "circles",
# make_circles(
# n_samples=n_samples, noise=0.2, factor=0.5, random_state=data_random_state
# ),
# ),
(
"moons",
make_moons(n_samples=n_samples, noise=0.2, random_state=data_random_state),
),
]
clf_kwargs = {
"n_estimators": 100,
"max_features": 2,
"min_samples_split": 2,
"random_state": random_state,
"n_jobs": -1,
"dirichlet": 1e-8,
"step": 1.0,
"aggregation": True,
"verbose": True
}
# classifiers = [
# ("tree", DecisionTreeClassifier),
# ("sk_tree", SkDecisionTreeClassifier)
# ]
classifiers = [
# ("forest", ForestBinaryClassifier(n_estimators=1, **clf_kwargs)),
("forest", ForestClassifier(**clf_kwargs)),
# ("sk_forest", RandomForestClassifier(**clf_kwargs))
# ("tree", DecisionTreeClassifier(**clf_kwargs)),
# ("sk_tree", SkDecisionTreeClassifier(**clf_kwargs)),
]
n_classifiers = len(classifiers)
n_datasets = len(datasets)
h = 0.2
i = 1
# iterate over datasets
# for ds_cnt, ds in enumerate(datasets):
# # preprocess datasets, split into training and test part
# ds_name, (X, y) = ds
# X_train, X_test, y_train, y_test = train_test_split(
# X, y, test_size=0.4, random_state=42
# )
# # x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5
# # y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5
# # xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# # # just plot the datasets first
# # cm = plt.cm.RdBu
# # cm_bright = ListedColormap(["#FF0000", "#0000FF"])
# # ax = plt.subplot(n_datasets, n_classifiers + 1, i)
# # if ds_cnt == 0:
# # ax.set_title("Input data")
# # Plot the training points
# # ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, s=10, cmap=cm)
# # # and testing points
# # ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm, s=10, alpha=0.6)
# # ax.set_xlim(xx.min(), xx.max())
# # ax.set_ylim(yy.min(), yy.max())
# # ax.set_xticks(())
# # ax.set_yticks(())
# # i += 1
# # iterate over classifiers
# for name, clf in classifiers:
# # ax = plt.subplot(n_datasets, n_classifiers + 1, i)
# clf.fit(X_train, y_train)
# # # logging.info("%s had %d nodes" % (name, clf.tree_.node_count))
# # truc = np.empty((xx.ravel().shape[0], 2))
# # truc[:, 0] = xx.ravel()
# # truc[:, 1] = yy.ravel()
#
# clf.predict_proba(X_test)
#
# # Z = clf.predict_proba(truc)[:, 1]
# # # score = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
# # # Put the result into a color plot
# # Z = Z.reshape(xx.shape)
# # ax.contourf(xx, yy, Z, cmap=cm, alpha=0.8)
# # ax.set_xlim(xx.min(), xx.max())
# # ax.set_ylim(yy.min(), yy.max())
# # ax.set_xticks(())
# # ax.set_yticks(())
# # if ds_cnt == 0:
# # ax.set_title(name)
# # i += 1
#
# exit(0)
def plot_decision_classification(classifiers, datasets):
n_classifiers = len(classifiers)
n_datasets = len(datasets)
h = 0.2
fig = plt.figure(figsize=(2 * (n_classifiers + 1), 2 * n_datasets))
i = 1
# iterate over datasets
for ds_cnt, ds in enumerate(datasets):
# preprocess datasets, split into training and test part
ds_name, (X, y) = ds
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.4, random_state=42
)
x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5
y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# just plot the datasets first
cm = plt.cm.RdBu
cm_bright = ListedColormap(["#FF0000", "#0000FF"])
ax = plt.subplot(n_datasets, n_classifiers + 1, i)
if ds_cnt == 0:
ax.set_title("Input data")
# Plot the training points
# ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, s=10, cmap=cm)
# and testing points
# ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm, s=10, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in classifiers:
ax = plt.subplot(n_datasets, n_classifiers + 1, i)
clf.fit(X_train, y_train)
clf.apply(X_train)
# logging.info("%s had %d nodes" % (name, clf.tree_.node_count))
truc = np.empty((xx.ravel().shape[0], 2))
truc[:, 0] = xx.ravel()
truc[:, 1] = yy.ravel()
Z = clf.predict_proba(truc)[:, 1]
# Z = clf.predict_proba_trees(truc)[0][:, 1]
# score = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=0.8)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name)
i += 1
plt.tight_layout()
#
tic = time()
plot_decision_classification(classifiers, datasets)
toc = time()
print("time: ", toc-tic)
plt.show()
| StarcoderdataPython |
12823198 | <gh_stars>0
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import inspect
from importer.common.handler_options import HandlerOptions, handler_option
from .. import common
@handler_option('remove_quantize_ops', val_type=bool, default=True, desc="remove cast and quantization operations on non-constant tensors")
@handler_option('load_quantization', val_type=bool, default=False, desc="load TFLITE tensor quantization", shortcut='q')
@handler_option('use_lut_sigmoid', val_type=bool, default=False, desc="Map logistic node from tflite onto LUT based sigmoid operation (supported only with sq8 quantization)")
@handler_option('use_lut_tanh', val_type=bool, default=False, desc="Map TANH node from tflite onto LUT based tanh operation (supported only with sq8 quantization)")
# @handler_option('insert_relus', val_type=bool, default=False, desc="Insert RELUs if quantization scaling implies that they may be necessary to ensure float calculation")
class Handler(HandlerOptions):
""" This class is base handler class.
Base backend and frontend base handler class inherit this class.
All operator handler MUST put decorator @tflite_op to register corresponding op.
"""
TFLITE_OP = None
TFLITE_CUSTOM_OP = None
GRAPH_VERSION = 0
PARTIAL_SUPPORT = False
NOT_SUPPORTED = False
PS_DESCRIPTION = ''
@classmethod
def check_cls(cls):
if not cls.TFLITE_OP:
common.LOG.warning(
"%s doesn't have TFLITE_OP. "
"Please use Handler.tflite_op decorator to register TFLITE_OP.",
cls.__name__)
@classmethod
def handle(cls, node, **kwargs):
""" Main method in handler. It will find corresponding versioned handle method,
whose name format is `version_%d`. So prefix `version_` is reserved.
DON'T use it for other purpose.
:param node: Operator for backend.
:param kwargs: Other args.
:return: TensorflowNode for backend.
"""
possible_versions = [ver for ver in cls.get_versions() if ver <= node.op_version]
if possible_versions:
handle_version = max(possible_versions)
ver_handle = getattr(cls, "version_{}".format(handle_version), None)
#pylint: disable=not-callable
return ver_handle(node, **kwargs)
raise ValueError(
"{} version {} is not implemented.".format(node.op_type, node.op_version))
@classmethod
def get_versions(cls):
""" Get all support versions.
:return: Version list.
"""
versions = []
for k, _ in inspect.getmembers(cls, inspect.ismethod):
if k.startswith("version_"):
versions.append(int(k.replace("version_", "")))
return versions
@staticmethod
def tflite_op(op):
return Handler.property_register("TFLITE_OP", op)
@staticmethod
def tflite_custom_op(op):
return Handler.property_register("TFLITE_CUSTOM_OP", op)
@staticmethod
def partial_support(ps):
return Handler.property_register("PARTIAL_SUPPORT", ps)
@staticmethod
def not_supported(ps):
return Handler.property_register("NOT_SUPPORTED", ps)
@staticmethod
def ps_description(psd):
return Handler.property_register("PS_DESCRIPTION", psd)
@staticmethod
def property_register(name, value):
def deco(cls):
setattr(cls, name, value)
return cls
return deco
tflite_op = Handler.tflite_op
tflite_custom_op = Handler.tflite_custom_op
partial_support = Handler.partial_support
not_supported = Handler.not_supported
ps_description = Handler.ps_description
property_register = Handler.property_register
| StarcoderdataPython |
4948006 | import abstract_interface
import logging
from game_code import interactions
from game_code.core import exceptions
log = logging.getLogger('interface.python')
class PythonDecision(abstract_interface.Decision):
def __init__(self, interface, prompt_id, prompt, choices, **kwargs):
assert isinstance(interface, PythonInterface)
choice_map, choice_display = interface.format_choices(
choices,
kwargs.get('add_menu_choices', True),
kwargs.get('add_menu_exit', False),
)
self.choice_map = choice_map
self.choice_display = choice_display
super(PythonDecision, self).__init__(interface, prompt_id, prompt, choices, **kwargs)
@property
def valid_choices(self):
return self.choice_map.keys()
def set_choice(self, choice):
choice_obj = self.choice_map.get(choice, None)
if choice_obj is None:
raise exceptions.GameInvalidChoice(choice)
self.choice = choice_obj
def add_choice(self, choice, choice_key=None):
"""for debugging or cheats via choice_hook"""
choice_key = choice_key or choice.key or choice.text
self.choice_map[choice_key] = choice
class ConsumerPackage(object):
def __init__(self, pid, title, text, prompt, choices, events):
self.pid = pid
self.title = title
self.text = text
self.prompt = prompt
self.choices = choices
self.events = events
super(ConsumerPackage, self).__init__()
def __str__(self):
return 'ConsumerPackage(pid={} title="{}" choices={})'.format(self.pid, self.title, self.choices)
def __repr__(self):
return str(self)
class PythonInterface(abstract_interface.Interface):
decision_class = PythonDecision
def __init__(self, menu_choices=None):
self.__current_decision = None
self.__current_consumer_package = None
super(PythonInterface, self).__init__(menu_choices)
def format_choices(self, choices, add_menu_choices=True, add_menu_exit=False):
index = 1
choice_map = {}
for choice in choices:
if choice.key:
# todo: hardening so that choice key can not be a number and clash with indexed choices
key = str(choice.key)
else:
key = index
index += 1
choice_map[key] = choice
if add_menu_choices and self.menu_choices:
choice_map.update({c.key: c for c in self.menu_choices})
elif isinstance(add_menu_exit, interactions.choices.ChoiceExitMenu):
choice_map.update({add_menu_exit.key: add_menu_exit})
choice_display = {key: choice.text for key, choice in choice_map.items() if not choice.hidden}
return choice_map, choice_display
def start(self):
pass
def reset_status(self):
self.__current_decision = None
self.__current_consumer_package = None
def display(self, text):
print text
@property
def is_in_progress(self):
return bool(self.__current_decision or self.__current_consumer_package)
def get_next_screen(self):
if not self.game.operating:
if self.game.the_end is not None:
consumer_package = ConsumerPackage(-1, 'THE END', self.game.the_end, None, None, None)
self.__current_consumer_package = consumer_package
raise exceptions.GameNotOperating()
elif not self.is_in_progress:
screen = self.game.get_state()
events = self.game.do_screen(screen)
# default choices is always go back.. unless there are others
choices = [interactions.choices.ChoiceGoBack()]
if screen.choices:
enabled_choices = self.game.parse_choices(screen.choices)
if enabled_choices:
choices = enabled_choices
# make a decision with the choices available
decision = self.create_decision(
prompt=screen.prompt,
choices=choices,
**screen.kwargs
)
self.__current_decision = decision
# make a package to display to the consumer
consumer_package = ConsumerPackage(
pid=decision.prompt_id,
title=screen.title,
text=screen.text,
prompt=screen.prompt,
choices=decision.choice_display,
events=events,
)
self.__current_consumer_package = consumer_package
return self.__current_consumer_package
def put_choice(self, choice):
try:
self.__current_decision.set_choice(choice)
except exceptions.GameInvalidChoice:
return False
else:
self.game.handle_choice(self.__current_decision.choice)
self.reset_status()
return True
| StarcoderdataPython |
11214007 | def eval_numerical_gradient(f, x):
"""
a naive implementation of numerical gradient of f at x
- f should be a function that takes a single argument
- x is the point (numpy array) to evaluate the gradient at
"""
import numpy as np
fx = f(x) # evaluate function value at original point
grad = np.zeros(x.shape)
h = 0.00001
# iterate over all indexes in x
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
# evaluate function at x+h
ix = it.multi_index
old_value = x[ix]
x[ix] = old_value + h # increment by h
fxh = f(x) # evalute f(x + h)
x[ix] = old_value # restore to previous value (very important!)
# compute the partial derivative
grad[ix] = (fxh - fx) / h # the slope
it.iternext() # step to next dimension
return grad
X_train, Y_train, X_test, Y_test = load_CIFAR10('data/cifar10/') # a magic function we provide
def CIFAR10_loss_fun(W):
return L(X_train, Y_train, W)
W = np.random.rand(10, 3073) * 0.001 # random weight vector
df = eval_numerical_gradient(CIFAR10_loss_fun, W) # get the gradient
loss_original = CIFAR10_loss_fun(W) # the original loss
print ('original loss: %f' % (loss_original, ))
# lets see the effect of multiple step sizes
for step_size_log in [-10, -9, -8, -7, -6, -5,-4,-3,-2,-1]:
step_size = 10 ** step_size_log
W_new = W - step_size * df # new position in the weight space
loss_new = CIFAR10_loss_fun(W_new)
print ('for step size %f new loss: %f' % (step_size, loss_new)) | StarcoderdataPython |
9754623 | import numpy as np
import pandas as pd
from scipy.spatial.distance import cdist
from .base import Sampler
class VoxelgridSampler(Sampler):
def __init__(self, *, pyntcloud, voxelgrid_id):
super().__init__(pyntcloud=pyntcloud)
self.voxelgrid_id = voxelgrid_id
def extract_info(self):
self.voxelgrid = self.pyntcloud.structures[self.voxelgrid_id]
class VoxelgridCentersSampler(VoxelgridSampler):
"""Returns the points that represent each occupied voxel's center."""
def compute(self):
return pd.DataFrame(
self.voxelgrid.voxel_centers[np.unique(self.voxelgrid.voxel_n)],
columns=["x", "y", "z"])
class VoxelgridCentroidsSampler(VoxelgridSampler):
"""Returns the centroid of each group of points inside each occupied voxel."""
def compute(self):
df = pd.DataFrame(self.pyntcloud.xyz, columns=["x", "y", "z"])
df["voxel_n"] = self.voxelgrid.voxel_n
return df.groupby("voxel_n").mean()
class VoxelgridNearestSampler(VoxelgridSampler):
"""Returns the N closest points to each occupied voxel's center."""
def __init__(self, *, pyntcloud, voxelgrid_id, n=1):
super().__init__(pyntcloud=pyntcloud, voxelgrid_id=voxelgrid_id)
self.n = n
def compute(self):
voxel_n_id = "voxel_n({})".format(self.voxelgrid_id)
if voxel_n_id not in self.pyntcloud.points:
self.pyntcloud.points[voxel_n_id] = self.voxelgrid.voxel_n
nearests = []
for voxel_n, x in self.pyntcloud.points.groupby(voxel_n_id, sort=False):
xyz = x.loc[:, ["x", "y", "z"]].values
center = self.voxelgrid.voxel_centers[voxel_n]
voxel_nearest = cdist([center], xyz)[0].argsort()[:self.n]
nearests.extend(x.index.values[voxel_nearest])
return self.pyntcloud.points.iloc[nearests].reset_index(drop=True)
class VoxelgridHighestSampler(VoxelgridSampler):
"""Returns the highest points of each voxel."""
def compute(self):
voxel_n_id = "voxel_n({})".format(self.voxelgrid_id)
if voxel_n_id not in self.pyntcloud.points:
self.pyntcloud.points[voxel_n_id] = self.voxelgrid.voxel_n
return self.pyntcloud.points.iloc[
self.pyntcloud.points.groupby(voxel_n_id)["z"].idxmax()].reset_index(drop=True)
| StarcoderdataPython |
1710477 | from tb_profil import Tb_profil
from tb_login import Tb_Login
from termcolor import colored
from Validation import Validations
from os import system
class Main:
Valid = Validations()
TbProfil = Tb_profil()
Tblogin = Tb_Login()
def __init__(self):
self.User = ""
#username : ardiansah
#password : <PASSWORD>
self.show_login()
def show_login(self):
self.User = self.Tblogin.login()
print ("Login By : {}".format(self.User))
self.show_menu()
def show_menu(self):
while True:
self.Menu = "Menu"
print(self.Menu.center(20))
print ("-------------------")
print ("1. Tampilkan Profil")
print ("2. Tambah Profil")
print ("3. Edit Profil")
print ("4. Hapus Profil")
print ("5. Keluar")
self.Inputan = self.Valid.validation_Menu("Pilih menu [1 - 5] : ",True,1,5)
if (self.Inputan ==1):
system("cls")
self.TbProfil.show_Profil(self.Inputan)
print("\n")
elif (self.Inputan == 2):
system("cls")
self.TbProfil.insert_Profil()
print("\n")
elif (self.Inputan == 3):
system("cls")
self.TbProfil.edit_Profil()
print("\n")
elif (self.Inputan == 4):
system("cls")
self.TbProfil.delete_Profil()
print("\n")
else:
system("cls")
print("Bye bye : {}".format(self.User))
exit()
if __name__ == "__main__":
try:
system("cls")
Main()
except Exception as err:
print(colored("class menu -> {}".format(err),"red")) | StarcoderdataPython |
1911860 | <gh_stars>0
""" After pressed enter, the program retrieve the position of the mouse."""
from gui import PyGuiAutomation
def main():
print('Press Ctrl+C to quit.')
input()
auto = PyGuiAutomation()
try:
while True:
x, y = auto.position()
print(f"X:{x} Y:{y}")
except KeyboardInterrupt:
print('\nDone')
if __name__ == '__main__':
main()
| StarcoderdataPython |
11283705 | <filename>upy/contrib/tree/views.py<gh_stars>1-10
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from upy.contrib.tree.utility import UPYRobotTXT, UPYSitemap
def upy_render(request, upy_context, vars_dictionary):
"""
It renders template defined in upy_context's page passed in arguments
"""
page = upy_context['PAGE']
return render_to_response(page.template.file_name, vars_dictionary, context_instance=RequestContext(request))
def view_404(request, url=None):
"""
It returns a 404 http response
"""
res = render_to_response("404.html", {"PAGE_URL": request.get_full_path()},
context_instance=RequestContext(request))
res.status_code = 404
return res
def view_500(request, url=None):
"""
it returns a 500 http response
"""
res = render_to_response("500.html", context_instance=RequestContext(request))
res.status_code = 500
return res
def sitemap(request):
"""
It returns sitemap.xml as http response
"""
upysitemap = UPYSitemap(request)
return HttpResponse(upysitemap._do_sitemap(), content_type="text/xml")
def robots(request):
"""
It returns robots.txt as http response
"""
upyrobottxt = UPYRobotTXT(request)
return HttpResponse(upyrobottxt._do_robotstxt(), content_type="text")
def favicon(request):
"""
It returns favicon's location
"""
favicon = "/upy_static/images/favicon.ico"
try:
from upy.contrib.seo.models import MetaSite
site = MetaSite.objects.get(default=True)
return HttpResponseRedirect(site.favicon.url)
except:
return HttpResponseRedirect(favicon)
| StarcoderdataPython |
6617954 | from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import Group
from django.conf import settings
from .forms import StudentUserCreationForm, StudentUserChangeForm
from .models import StudentUser
import os
class StudentUserAdmin(UserAdmin):
add_form = StudentUserCreationForm
form = StudentUserChangeForm
model = StudentUser
list_display = ['email', 'first_name', 'last_name', 'username', 'is_active', 'is_ta', 'is_superuser']
fieldsets = (
('User Information', {'fields': ('first_name', 'last_name', 'email', 'username')}),
('Permissions', {'fields': ('is_active', 'is_ta', 'is_superuser')}),
('Important Dates', {'fields': ('date_joined',)})
)
list_filter = ('is_superuser', 'is_ta', 'is_active')
admin.site.site_header = os.environ.get('COURSE_TITLE','') + ' Office Hours Admin'
admin.site.register(StudentUser, StudentUserAdmin)
admin.site.unregister(Group) | StarcoderdataPython |
6472033 | <reponame>0xflotus/voctomix<gh_stars>100-1000
from mock import ANY
from lib.response import NotifyResponse
from tests.commands.commands_test_base import CommandsTestBase
class SetVideoTest(CommandsTestBase):
def test_set_video_a(self):
response = self.commands.set_video_a("cam2")
self.pipeline_mock.vmix.setVideoSourceA.assert_called_with(1)
self.assertIsInstance(response, NotifyResponse)
self.assertEqual(response.args, ('video_status', ANY, ANY))
def test_cant_set_video_a_to_unknown_value(self):
with self.assertRaises(ValueError):
self.commands.set_video_a("foobar")
self.pipeline_mock.vmix.setVideoSourceA.assert_not_called()
def test_cant_set_video_a_to_int(self):
with self.assertRaises(ValueError):
self.commands.set_video_a(1)
self.pipeline_mock.vmix.setVideoSourceA.assert_not_called()
def test_set_video_b(self):
response = self.commands.set_video_b("grabber")
self.pipeline_mock.vmix.setVideoSourceB.assert_called_with(2)
self.assertIsInstance(response, NotifyResponse)
self.assertEqual(response.args, ('video_status', ANY, ANY))
def test_cant_set_video_b_to_unknown_value(self):
with self.assertRaises(ValueError):
self.commands.set_video_b("moobar")
self.pipeline_mock.vmix.setVideoSourceB.assert_not_called()
def test_cant_set_video_b_to_int(self):
with self.assertRaises(ValueError):
self.commands.set_video_b(2)
self.pipeline_mock.vmix.setVideoSourceB.assert_not_called()
| StarcoderdataPython |
87489 | <reponame>ashishdhngr/baserow
from unittest.mock import patch, call, ANY
import pytest
from django.db import transaction
from baserow.contrib.database.api.constants import PUBLIC_PLACEHOLDER_ENTITY_ID
from baserow.contrib.database.rows.handler import RowHandler
from baserow.contrib.database.views.handler import ViewHandler
from baserow.core.trash.handler import TrashHandler
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_when_row_created_public_views_receive_restricted_row_created_ws_event(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_only_showing_one_field = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
public_view_showing_all_fields = data_fixture.create_grid_view(
user, table=table, public=True, order=1
)
# No public events should be sent to this form view
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_only_showing_one_field, hidden_field, hidden=True
)
row = RowHandler().create_row(
user=user,
table=table,
values={
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "Hidden",
},
)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_only_showing_one_field.slug}",
{
"type": "row_created",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
"metadata": {},
"before_row_id": None,
},
None,
),
call(
f"view-{public_view_showing_all_fields.slug}",
{
"type": "row_created",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
f"field_{visible_field.id}": "Visible",
# This field is not hidden for this public view and so should be
# included
f"field_{hidden_field.id}": "Hidden",
},
"metadata": {},
"before_row_id": None,
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_when_row_created_public_views_receive_row_created_only_when_filters_match(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_showing_row = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
public_view_hiding_row = data_fixture.create_grid_view(
user, table=table, public=True, order=1
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_showing_row, hidden_field, hidden=True
)
data_fixture.create_grid_view_field_option(
public_view_hiding_row, hidden_field, hidden=True
)
# Match the visible field
data_fixture.create_view_filter(
view=public_view_hiding_row, field=visible_field, type="equal", value="Visible"
)
# But filter out based on the hidden field
data_fixture.create_view_filter(
view=public_view_hiding_row, field=hidden_field, type="equal", value="Not Match"
)
# Match
data_fixture.create_view_filter(
view=public_view_showing_row, field=visible_field, type="equal", value="Visible"
)
# Match
data_fixture.create_view_filter(
view=public_view_showing_row, field=hidden_field, type="equal", value="Hidden"
)
row = RowHandler().create_row(
user=user,
table=table,
values={
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "Hidden",
},
)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_showing_row.slug}",
{
"type": "row_created",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
"metadata": {},
"before_row_id": None,
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_when_row_deleted_public_views_receive_restricted_row_deleted_ws_event(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_only_showing_one_field = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
public_view_showing_all_fields = data_fixture.create_grid_view(
user, table=table, public=True, order=1
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_only_showing_one_field, hidden_field, hidden=True
)
model = table.get_model()
row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "Hidden",
},
)
RowHandler().delete_row(user, table, row.id, model)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_only_showing_one_field.slug}",
{
"type": "row_deleted",
"row_id": row.id,
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
},
None,
),
call(
f"view-{public_view_showing_all_fields.slug}",
{
"type": "row_deleted",
"row_id": row.id,
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
f"field_{visible_field.id}": "Visible",
# This field is not hidden for this public view and so should be
# included
f"field_{hidden_field.id}": "Hidden",
},
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_when_row_deleted_public_views_receive_row_deleted_only_when_filters_match(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_showing_row = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
public_view_hiding_row = data_fixture.create_grid_view(
user, table=table, public=True, order=1
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_showing_row, hidden_field, hidden=True
)
data_fixture.create_grid_view_field_option(
public_view_hiding_row, hidden_field, hidden=True
)
# Match the visible field
data_fixture.create_view_filter(
view=public_view_hiding_row, field=visible_field, type="equal", value="Visible"
)
# But filter out based on the hidden field
data_fixture.create_view_filter(
view=public_view_hiding_row, field=hidden_field, type="equal", value="Not Match"
)
# Match
data_fixture.create_view_filter(
view=public_view_showing_row, field=visible_field, type="equal", value="Visible"
)
# Match
data_fixture.create_view_filter(
view=public_view_showing_row, field=hidden_field, type="equal", value="Hidden"
)
model = table.get_model()
row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "Hidden",
},
)
RowHandler().delete_row(user, table, row.id, model)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_showing_row.slug}",
{
"type": "row_deleted",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row_id": row.id,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_given_row_not_visible_in_public_view_when_updated_to_be_visible_event_sent(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_with_filters_initially_hiding_all_rows = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_with_filters_initially_hiding_all_rows, hidden_field, hidden=True
)
# Match the visible field
data_fixture.create_view_filter(
view=public_view_with_filters_initially_hiding_all_rows,
field=visible_field,
type="equal",
value="Visible",
)
# But filter out based on the hidden field
data_fixture.create_view_filter(
view=public_view_with_filters_initially_hiding_all_rows,
field=hidden_field,
type="equal",
value="ValueWhichMatchesFilter",
)
model = table.get_model()
initially_hidden_row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "ValueWhichDoesntMatchFilter",
},
)
# Double check the row isn't visible in any views to begin with
row_checker = ViewHandler().get_public_views_row_checker(
table, model, only_include_views_which_want_realtime_events=True
)
assert row_checker.get_public_views_where_row_is_visible(initially_hidden_row) == []
RowHandler().update_row(
user,
table,
initially_hidden_row.id,
values={f"field_{hidden_field.id}": "ValueWhichMatchesFilter"},
)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_with_filters_initially_hiding_all_rows.slug}",
{
# The row should appear as a created event as for the public view
# it effectively has been created as it didn't exist before.
"type": "row_created",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": initially_hidden_row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
"metadata": {},
"before_row_id": None,
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_given_row_visible_in_public_view_when_updated_to_be_not_visible_event_sent(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_with_row_showing = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_with_row_showing, hidden_field, hidden=True
)
# Match the visible field
data_fixture.create_view_filter(
view=public_view_with_row_showing,
field=visible_field,
type="contains",
value="Visible",
)
# But filter out based on the hidden field
data_fixture.create_view_filter(
view=public_view_with_row_showing,
field=hidden_field,
type="equal",
value="ValueWhichMatchesFilter",
)
model = table.get_model()
initially_visible_row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "ValueWhichMatchesFilter",
},
)
# Double check the row is visible in the view to start with
row_checker = ViewHandler().get_public_views_row_checker(
table, model, only_include_views_which_want_realtime_events=True
)
assert row_checker.get_public_views_where_row_is_visible(initially_visible_row) == [
public_view_with_row_showing.view_ptr
]
# Update the row so it is no longer visible
RowHandler().update_row(
user,
table,
initially_visible_row.id,
values={
f"field_{hidden_field.id}": "ValueWhichDoesNotMatchFilter",
f"field_{visible_field.id}": "StillVisibleButNew",
},
)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_with_row_showing.slug}",
{
# The row should appear as a deleted event as for the public view
# it effectively has been.
"type": "row_deleted",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row_id": initially_visible_row.id,
"row": {
"id": initially_visible_row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent in its state before it
# was updated
f"field_{visible_field.id}": "Visible",
},
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_given_row_visible_in_public_view_when_updated_to_still_be_visible_event_sent(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_with_row_showing = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_with_row_showing, hidden_field, hidden=True
)
# Match the visible field
data_fixture.create_view_filter(
view=public_view_with_row_showing,
field=visible_field,
type="contains",
value="Visible",
)
# But filter out based on the hidden field
data_fixture.create_view_filter(
view=public_view_with_row_showing,
field=hidden_field,
type="contains",
value="e",
)
model = table.get_model()
initially_visible_row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "e",
},
)
# Double check the row is visible in the view to start with
row_checker = ViewHandler().get_public_views_row_checker(
table, model, only_include_views_which_want_realtime_events=True
)
assert row_checker.get_public_views_where_row_is_visible(initially_visible_row) == [
public_view_with_row_showing.view_ptr
]
# Update the row so it is still visible but changed
RowHandler().update_row(
user,
table,
initially_visible_row.id,
values={
f"field_{hidden_field.id}": "eee",
f"field_{visible_field.id}": "StillVisibleButUpdated",
},
)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_with_row_showing.slug}",
{
"type": "row_updated",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row_before_update": {
"id": initially_visible_row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
"row": {
"id": initially_visible_row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "StillVisibleButUpdated",
},
"metadata": {},
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_when_row_restored_public_views_receive_restricted_row_created_ws_event(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_only_showing_one_field = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
public_view_showing_all_fields = data_fixture.create_grid_view(
user, table=table, public=True, order=1
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_only_showing_one_field, hidden_field, hidden=True
)
model = table.get_model()
row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "Hidden",
},
)
TrashHandler.trash(
user, table.database.group, table.database, row, parent_id=table.id
)
TrashHandler.restore_item(user, "row", row.id, parent_trash_item_id=table.id)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_only_showing_one_field.slug}",
{
"type": "row_created",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
"metadata": {},
"before_row_id": None,
},
None,
),
call(
f"view-{public_view_showing_all_fields.slug}",
{
"type": "row_created",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
f"field_{visible_field.id}": "Visible",
# This field is not hidden for this public view and so should be
# included
f"field_{hidden_field.id}": "Hidden",
},
"metadata": {},
"before_row_id": None,
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_when_row_restored_public_views_receive_row_created_only_when_filters_match(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view_showing_row = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
public_view_hiding_row = data_fixture.create_grid_view(
user, table=table, public=True, order=1
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(
public_view_showing_row, hidden_field, hidden=True
)
data_fixture.create_grid_view_field_option(
public_view_hiding_row, hidden_field, hidden=True
)
# Match the visible field
data_fixture.create_view_filter(
view=public_view_hiding_row, field=visible_field, type="equal", value="Visible"
)
# But filter out based on the hidden field
data_fixture.create_view_filter(
view=public_view_hiding_row, field=hidden_field, type="equal", value="Not Match"
)
# Match
data_fixture.create_view_filter(
view=public_view_showing_row, field=visible_field, type="equal", value="Visible"
)
# Match
data_fixture.create_view_filter(
view=public_view_showing_row, field=hidden_field, type="equal", value="Hidden"
)
model = table.get_model()
row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "Hidden",
},
)
TrashHandler.trash(
user, table.database.group, table.database, row, parent_id=table.id
)
TrashHandler.restore_item(user, "row", row.id, parent_trash_item_id=table.id)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view_showing_row.slug}",
{
"type": "row_created",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row": {
"id": row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
"metadata": {},
"before_row_id": None,
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_given_row_visible_in_public_view_when_moved_row_updated_sent(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(public_view, hidden_field, hidden=True)
# Match the visible field
data_fixture.create_view_filter(
view=public_view,
field=visible_field,
type="contains",
value="Visible",
)
# But filter out based on the hidden field
data_fixture.create_view_filter(
view=public_view,
field=hidden_field,
type="equal",
value="ValueWhichMatchesFilter",
)
model = table.get_model()
visible_moving_row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "ValueWhichMatchesFilter",
},
)
invisible_row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "ValueWhichDoesNotMatchesFilter",
},
)
# Double check the row is visible in the view to start with
row_checker = ViewHandler().get_public_views_row_checker(
table, model, only_include_views_which_want_realtime_events=True
)
assert row_checker.get_public_views_where_row_is_visible(visible_moving_row) == [
public_view.view_ptr
]
# Move the visible row behind the invisible one
with transaction.atomic():
RowHandler().move_row(
user, table, visible_moving_row.id, before=invisible_row, model=model
)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
call(
f"view-{public_view.slug}",
{
# The row should appear as a deleted event as for the public view
# it effectively has been.
"type": "row_updated",
"table_id": PUBLIC_PLACEHOLDER_ENTITY_ID,
"row_before_update": {
"id": visible_moving_row.id,
"order": "1.00000000000000000000",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
"row": {
"id": visible_moving_row.id,
"order": "0.99999999999999999999",
# Only the visible field should be sent
f"field_{visible_field.id}": "Visible",
},
"metadata": {},
},
None,
),
]
)
@pytest.mark.django_db(transaction=True)
@patch("baserow.ws.registries.broadcast_to_channel_group")
def test_given_row_invisible_in_public_view_when_moved_no_update_sent(
mock_broadcast_to_channel_group, data_fixture
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
visible_field = data_fixture.create_text_field(table=table)
hidden_field = data_fixture.create_text_field(table=table)
public_view = data_fixture.create_grid_view(
user, create_options=False, table=table, public=True, order=0
)
# Should not appear in any results
data_fixture.create_form_view(user, table=table, public=True)
data_fixture.create_grid_view_field_option(public_view, hidden_field, hidden=True)
# Match the visible field
data_fixture.create_view_filter(
view=public_view,
field=visible_field,
type="contains",
value="Visible",
)
# But filter out based on the hidden field
data_fixture.create_view_filter(
view=public_view,
field=hidden_field,
type="equal",
value="ValueWhichMatchesFilter",
)
model = table.get_model()
visible_row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "ValueWhichMatchesFilter",
},
)
invisible_moving_row = model.objects.create(
**{
f"field_{visible_field.id}": "Visible",
f"field_{hidden_field.id}": "ValueWhichDoesNotMatchesFilter",
},
)
# Double check the row is visible in the view to start with
row_checker = ViewHandler().get_public_views_row_checker(
table, model, only_include_views_which_want_realtime_events=True
)
assert row_checker.get_public_views_where_row_is_visible(invisible_moving_row) == []
# Move the invisible row
with transaction.atomic():
RowHandler().move_row(
user, table, invisible_moving_row.id, before=visible_row, model=model
)
assert mock_broadcast_to_channel_group.delay.mock_calls == (
[
call(f"table-{table.id}", ANY, ANY),
]
)
| StarcoderdataPython |
6561437 | <reponame>Kipngetich33/Potrait-HAll
from django.test import TestCase
from .models import Category, Image , Location
class ImageTestClass(TestCase):
# Set up method
def setUp(self):
self.image= Image(image = 'imageurl', name ='test_image', image_description ='image test description')
# Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.image, Image))
def test_save_images(self):
self.image.save_images()
images = Image.objects.all()
self.assertTrue(len(images)>0)
def test_delete_images(self):
self.image.save_images()
self.image.delete_images()
images = Image.objects.all()
self.assertTrue(len(images)==0)
def test_all_images(self):
self.image.save_images()
retrieved_images = Image.get_images()
saved_images = Image.objects.all()
self.assertTrue(len(retrieved_images)==len(saved_images))
def test_update_image(self):
self.image.save_images()
self.image.update_image('vincent','new_description','new_image_url')
self.image.save_images()
updated_name = self.image.name
update_description = self.image.image_description
updated_url = self.image.image
self.assertTrue(updated_name == 'vincent' and update_description== 'new_description' and updated_url == 'new_image_url')
def test_get_image_by_id(self):
self.image.save_images()
gotten_image = Image.get_image_by_id(5)
self.assertEqual(gotten_image.id == 5)
class LocationTestClass(TestCase):
# Set up method
def setUp(self):
self.location= Location(name = 'test_location')
# Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.location, Location))
def test_save_locations(self):
self.location.save_locations()
locations = Location.objects.all()
self.assertTrue(len(locations)>0)
def test_delete_locations(self):
self.location.save_locations()
self.location.delete_locations()
locations = Location.objects.all()
self.assertTrue(len(locations)==0)
class CategoryTestClass(TestCase):
# Set up method
def setUp(self):
self.category= Category(name = 'test_category')
# Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.category, Category))
def test_save_categories(self):
self.category.save_categories()
categories = Category.objects.all()
self.assertTrue(len(categories)>0)
def test_delete_categories(self):
self.category.save_categories()
self.category.delete_categories()
categories = Category.objects.all()
self.assertTrue(len(categories)==0)
| StarcoderdataPython |
4997728 | <reponame>hammad26/MedViLL
"""
MedViLL, pre-training model main run.py
"""
import os
import argparse
from datetime import datetime
from data.dataset_origin import CXRDataset
from torch.utils.data import DataLoader
from utils.utils import *
from models.train_origin import CXRBERT_Trainer # CXR-BERT
from transformers import BertTokenizer, AlbertTokenizer, AutoTokenizer
def train(args):
set_seed(args.seed)
# TODO: bert-base,small,tiny tokenizer
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", do_lower_case=True).tokenize
print("Load Train dataset", args.train_dataset)
train_dataset = CXRDataset(args.train_dataset, tokenizer, args)
print("Load Test dataset", args.test_dataset)
test_dataset = CXRDataset(args.test_dataset, tokenizer, args) \
if args.test_dataset is not None else None
print("Create DataLoader")
train_data_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.num_workers, shuffle=True)
test_data_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.num_workers, shuffle=False) \
if test_dataset is not None else None
print("Creating BERT Trainer")
trainer = CXRBERT_Trainer(args, train_dataloader=train_data_loader, test_dataloader=test_data_loader)
print("Training Start!")
for epoch in range(args.epochs):
trainer.train(epoch)
trainer.save(epoch, args.output_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--train_dataset", type=str,
default='data/mimic/Train.jsonl',
help="train dataset for training")
parser.add_argument("--test_dataset", type=str,
default='data/mimic/Valid.jsonl',
help='test dataset for evaluating train set')
output_path = 'output/' + str(datetime.now())
if not os.path.exists(output_path):
os.mkdir(output_path)
os.chmod(output_path, 0o777)
parser.add_argument("--output_path", type=str, default=output_path, help="ex)path/to/save/model")
parser.add_argument("--log_freq", type=int, default=10, help="printing loss every n inter: setting n")
parser.add_argument("--with_cuda", type=bool, default=True, help="training with CUDA: True or False")
parser.add_argument("--cuda_devices", type=int, nargs='+', default=None, help="CUDA device ids")
parser.add_argument("--mlm_task", type=str, default=True,
help="The model will train only mlm task!! | True | False")
parser.add_argument("--itm_task", type=str, default=True,
help="The model will train only itm task!! | True | False")
parser.add_argument('--attn_1d', type=bool, default=False, help='choose 1d attn(True) or full attn(False)')
parser.add_argument('--BAR_attn', default=True, type=bool, help="Bidirectional Auto Regressive attn mask")
parser.add_argument('--Mixed', default=False, type=bool, help="Mixed attn mask")
parser.add_argument('--s2s_prob', default=1.0, type=float, help="S2S attention prob.")
parser.add_argument('--bi_prob', default=0.0, type=float, help="Full_attention prob.")
parser.add_argument('--disturbing_mask', default=False, type=bool, help="Baseline attn mask(I-I, T-T)")
parser.add_argument("--epochs", type=int, default=50, help='number of epochs')
parser.add_argument("--batch_size", type=int, default=36, help="number of batch size")
parser.add_argument("--num_workers", type=int, default=20, help="dataloader worker size")
# TODO: init model
parser.add_argument("--hidden_size", type=int, default=768, choices=[768, 512, 128])
parser.add_argument("--embedding_size", type=int, default=768, choices=[768, 512, 128])
## pre_trained_model_path, weight_load
parser.add_argument("--weight_load", type=bool, default=False, help='pre-trained_model_mid_epoch_load')
parser.add_argument("--pre_trained_model_path", type=str, default='/home/cxr-bert/clinicalbert_vlp_re35_5')
parser.add_argument("--bert_model", type=str, default="bert-base-scratch") # pre-trained CXR-BERT
parser.add_argument("--vocab_size", type=int, default=30522)
parser.add_argument("--img_postion", default=True, help='img_postion use!')
parser.add_argument("--seq_len", type=int, default=253, help="maximum sequence len")
parser.add_argument("--max_seq_len", type=int, default=512, help="total sequence len")
parser.add_argument("--img_hidden_sz", type=int, default=2048)
parser.add_argument("--img_encoder", type=str, default='random-pixel', choices=['random-pixel', 'full-fiber', 'ViT'])
parser.add_argument("--img_channel", type=int, default=3)
parser.add_argument("--num_image_embeds", type=int, default=180, choices=[180, 256])
parser.add_argument("--img_size", type=int, default=512)
parser.add_argument("--img_embed_pool_type", type=str, default="max", choices=["max", "avg"])
parser.add_argument("--lr", type=float, default=1e-5)
parser.add_argument("--gradient_accumulation_steps", type=int, default=4) # loss, optimizer.step() slowly
parser.add_argument("--warmup", type=float, default=0.1) # optimizer = BertAdam(warmup=args.warmup)
parser.add_argument("--seed", type=int, default=123)
parser.add_argument("--warmup_steps", type=int, default=0)
parser.add_argument("--dropout_prob", type=float, default=0.1)
parser.add_argument("--beta1", type=float, default=0.9, help="adams first beta value")
parser.add_argument("--beta2", type=float, default=0.999, help="adams first beta value")
parser.add_argument("--eps", type=float, default=1e-6, help="adams epsilon")
parser.add_argument("--weight_decay", type=float, default=0.01, help="weight_decay of AdamW") # 0.01 , AdamW
args = parser.parse_args()
train(args)
| StarcoderdataPython |
6651156 | #!/usr/bin/python
'''
# TrueRNG Read - Simple Example
# <NAME>
# 8/21/2016
#
# Requires Python 2.7, pyserial
# On Linux - may need to be root or set /dev/tty port permissions to 666
#
# Python 2.7.xx is available here: https://www.python.org/
# Install Pyserial package with: python -m pip install pyserial
'''
import serial
import time
import numpy as np
from bitstring import BitArray
from serial.tools import list_ports
class TrueRNG(object):
""" TrueRNG object for reading random bits from the TrueRNG 3 USB device """
def __init__(self, blocksize=1024, blocksize_type='bytes', return_type='string', port='default'):
self.set_blocksize(blocksize)
self.set_blocksize_type(blocksize_type)
self.set_return_type(return_type)
self.set_port(port)
# sets #####################################################################
def set_blocksize(self, blocksize):
self.blocksize = blocksize
return self.blocksize
def set_blocksize_type(self, blocksize_type):
self.blocksize_type = blocksize_type
if blocksize_type.startswith('b'):
self.blocksize_modulator = 1
elif blocksize_type.startswith('k'):
self.blocksize_modulator = 1024
elif blocksize_type.startswith('m'):
self.blocksize_modulator = 1024*1024
else:
raise 'invalid blocksize_type. options include: [bytes, kilobytes, megabytes]'
return self.blocksize_type
def set_return_type(self, return_type='string'):
self.return_type = return_type
if not (return_type.startswith('s')
or blocksize_type.startswith('b')
or blocksize_type.startswith('a')):
raise 'invalid return_type. options include: [string, bits, array]'
return self.return_type
def set_port(self, rng_com_port='default'):
def _look_for_port(available_port, name="TrueRNG"):
rng_com_port = None
for temp in ports_avaiable:
if temp[1].startswith(name):
# always chooses the 1st TrueRNG found
if rng_com_port == None:
return str(temp[0]), str(temp[0])
return None, None
if rng_com_port == 'default':
ports=dict()
ports_avaiable = list(list_ports.comports())
rng_com_port = None
# Loop on all available ports to find TrueRNG
rng_com_port, self.port = _look_for_port(ports_avaiable, name="TrueRNG")
if rng_com_port == None:
rng_com_port, self.port = _look_for_port(ports_avaiable, name="USB Serial Device")
else:
self.port = rng_com_port
return self.port
# gets #####################################################################
def get_blocksize(self):
return self.blocksize
def get_blocksize_type(self):
return self.blocksize_type
def get_port(self):
return self.port
def get_return_type(self):
return self.return_type
# generate #################################################################
def generate(self, blocksize=None, return_type=None):
''' generate and return a bits as bits, string or array '''
blocksize = blocksize or self.blocksize
return_type = return_type or self.return_type
# Try to setup and open the comport
try:
# timeout set at 10 seconds in case the read fails
ser = serial.Serial(port=self.port, timeout=10)
except:
print('Port Not Usable!')
print(f'Do you have permissions set to read {self.port}?')
# Open the serial port if it isn't open
if(ser.isOpen() == False):
ser.open()
# Set Data Terminal Ready to start flow
ser.setDTR(True)
# This clears the receive buffer so we aren't using buffered data
ser.flushInput()
# Try to read the port
success = False
y = 0
while not success and y < 10:
try:
bits = ser.read(blocksize * self.blocksize_modulator)
success = True
except:
print('Read Failed!!!')
y += 1
ser.close()
# convery if required
if return_type == 'bits':
return bits
# If we were able to open the file, write to disk
bytes = BitArray(bits).bin
if return_type == 'array':
return self.convert_to_array(bytes)
# as string
return bytes
@staticmethod
def convert_to_array(bitstring: str) -> np.ndarray:
return np.fromstring(bitstring, 'u1') - ord('0')
| StarcoderdataPython |
4804845 | # -*- coding: utf-8 -*-
"""
A script that selects random users and add them as approved submitters
Written by /u/SmBe19
"""
import praw
import random
import time
import OAuth2Util
# ### USER CONFIGURATION ### #
# The bot's useragent. It should contain a short description of what it does and your username. e.g. "RSS Bot by /u/SmBe19"
USERAGENT = ""
# The name of the subreddit to post to. e.g. "funny"
SUBREDDIT = ""
# Number of users to select
USERS_COUNT = 10
# Number of comments from which the users are selected (max is 1000)
SAMPLE_SIZE = 1000
# Whether to check whether the selected user is already a contributor (does not work atm)
CHECK_CONTRIBUTOR = False
# Whether to add the selected users as approved submitters.
# Note that that running the script with this flag set to True is considered spam.
ADD_CONTRIBUTOR = False
# ### END USER CONFIGURATION ### #
try:
# A file containing data for global constants.
import bot
for k in dir(bot):
if k.upper() in globals():
globals()[k.upper()] = getattr(bot, k)
except ImportError:
pass
# main procedure
def run_bot():
r = praw.Reddit(USERAGENT)
if CHECK_CONTRIBUTOR or ADD_CONTRIBUTOR:
o = OAuth2Util.OAuth2Util(r)
o.refresh()
sub = r.get_subreddit(SUBREDDIT)
print("Start bot for subreddit", SUBREDDIT)
print("Select", USERS_COUNT, "users from", SAMPLE_SIZE, "comments")
sub = r.get_subreddit(SUBREDDIT)
if CHECK_CONTRIBUTOR:
contributors = list(sub.get_contributors())
comments = list(r.get_comments("all", limit=SAMPLE_SIZE))
added_users = []
for i in range(USERS_COUNT):
user = random.choice(comments).author
while (CHECK_CONTRIBUTOR and user in contributors) or user in added_users:
user = random.choice(comments).author
added_users.append(user)
print(user.name)
if ADD_CONTRIBUTOR:
sub.add_contributor(user)
if __name__ == "__main__":
if not USERAGENT:
print("missing useragent")
elif not SUBREDDIT:
print("missing subreddit")
else:
run_bot()
| StarcoderdataPython |
1625486 | from django.contrib import admin
from django.urls import path, include
from . import views
# /student/..
urlpatterns = [
path('', views.studentDashboard, name="student_dashboard"),
path('postad/<str:pk>/', views.postAd, name="post_ad"),
path('ads/', views.Ads, name="ads"),
path('wishlist/', views.wishList, name="wishlist"),
path('<str:pk>/ads/', views.AdsDelete, name="ads_delete"),
path('tutors/', views.allTutors, name="all_tutors"),
path('tutors/<int:id>', views.SpecificTutor, name="specific_tutor"),
path('tutors/<int:id>/like/', views.PostLikeToggle.as_view(), name="post_like_std"),
path('tutors/<int:id>/like/api/', views.PostLikeAPIToggle.as_view(), name="post_like_api_std"),
path('tutors/<int:id>/wish-list/', views.WishlistApi.as_view(), name="wish_list"),
path('tutors/<int:id>/', views.inviteFordemo, name="tutor_invite"),
path('tutors/invited/', views.invited, name="invited"),
path('invitaions/', views.invitations, name="invitations_student"),
path("confirminvite/<int:id>/", views.acceptInvitation , name="accept_invite"),
path("rejectinvite/<int:id>/", views.rejectInvite , name="reject_invite_std"),
path("about/", views.aboutStudent , name="student_about"),
path("delaccount/", views.del_account_student , name="del_account"),
path("yourad/<int:id>/", views.view_your_ad, name="view_your_ad_std"),
path("activate/<uidb64>/<token>/", views.activate_view, name="activate"),
]
| StarcoderdataPython |
11304642 | <filename>mscience_cachetclient/cachet.py
# Copyright Red Hat, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from decorator import decorator
import mscience_cachetclient.client as client
import mscience_cachetclient.exceptions as exceptions
@decorator
def api_token_required(f, *args, **kwargs):
"""
Decorator helper function to ensure some methods aren't needlessly called
without an api_token configured.
"""
try:
if args[0].api_token is None:
raise AttributeError('Parameter api_token is required.')
except AttributeError:
raise AttributeError('Parameter api_token is required.')
return f(*args, **kwargs)
def check_required_args(required_args, args):
"""
Checks if all required_args have a value.
:param required_args: list of required args
:param args: kwargs
:return: True (if an exception isn't raised)
"""
for arg in required_args:
if arg not in args:
raise KeyError('Required argument: %s' % arg)
return True
class Cachet(client.CachetClient):
"""
Base class that extends CachetClient and defaults API methods to
unimplemented.
"""
def __init__(self, **kwargs):
super(Cachet, self).__init__(**kwargs)
# Default to unimplemented methods
def delete(self, **kwargs):
raise exceptions.UnimplementedException
def get(self, **kwargs):
raise exceptions.UnimplementedException
def post(self, **kwargs):
raise exceptions.UnimplementedException
def put(self, **kwargs):
raise exceptions.UnimplementedException
class Factory(Cachet):
"""
/ping API endpoint
"""
def __init__(self, **kwargs):
required_args = ['api_token', 'endpoint']
check_required_args(required_args, kwargs)
super(Factory, self).__init__(**kwargs)
def get(self, model):
"""
Instantiate a specific model and return it.
"""
switcher = {
"Ping": Ping,
"Version": Version,
"Components": Components,
"Groups": Groups,
"Runs": Runs,
"RunComments": RunComments,
"Incidents": Incidents,
"Metrics": Metrics,
"Subscribers": Subscribers
}
if model not in switcher:
raise Exception('Model %s Not in Cachet!' % model)
func = switcher.get(model)
return func(api_token=self.api_token, endpoint=self.endpoint)
class Ping(Cachet):
"""
/ping API endpoint
"""
def __init__(self, **kwargs):
super(Ping, self).__init__(**kwargs)
def get(self, **kwargs):
"""
https://docs.cachethq.io/docs/ping
"""
return self._get('ping')
class Version(Cachet):
"""
/version API endpoint
"""
def __init__(self, **kwargs):
super(Version, self).__init__(**kwargs)
def get(self, **kwargs):
"""
https://docs.cachethq.io/docs/version
"""
return self._get('version')
class Components(Cachet):
"""
/components API endpoint
"""
def __init__(self, **kwargs):
super(Components, self).__init__(**kwargs)
@api_token_required
def delete(self, id):
"""
https://docs.cachethq.io/docs/delete-a-component
"""
return self._delete('components/%s' % id)
@api_token_required
def get(self, id=None, **kwargs):
"""
https://docs.cachethq.io/docs/get-components
https://docs.cachethq.io/docs/get-a-component
"""
if id is not None:
return self._get('components/%s' % id, data=kwargs)
elif 'params' in kwargs:
data = dict(kwargs)
params = data.pop('params')
return self._get('components', data=data, params=params)
else:
return self._get('components', data=kwargs)
@api_token_required
def post(self, **kwargs):
"""
https://docs.cachethq.io/docs/components
"""
# default values
kwargs.setdefault('enabled', kwargs.get('enabled', True))
required_args = ['name', 'status', 'enabled']
check_required_args(required_args, kwargs)
return self._post('components', data=kwargs)
@api_token_required
def put(self, **kwargs):
"""
https://docs.cachethq.io/docs/update-a-component
"""
required_args = ['id']
check_required_args(required_args, kwargs)
return self._put('components/%s' % kwargs['id'], data=kwargs)
class Runs(Cachet):
"""
/Runs Endpoint
"""
def __init__(self, **kwargs):
super(Runs, self).__init__(**kwargs)
@api_token_required
def get(self, id=None, **kwargs):
if id is not None:
return self._get('runs/%s' % id, data=kwargs)
elif 'params' in kwargs:
data = dict(kwargs)
params = data.pop('params')
return self._get('runs', data=data, params=params)
else:
return self._get('runs', data=kwargs)
@api_token_required
def post(self, id=None, **kwargs):
required_args = ['name', 'component_id',]
check_required_args(required_args, kwargs)
return self._post('runs', data=kwargs)
@api_token_required
def put(self, **kwargs):
"""
"""
required_args = ['id']
check_required_args(required_args, kwargs)
return self._put('runs/%s' % kwargs['id'], data=kwargs)
class RunComments(Cachet):
"""
/RunComments Endpoint
"""
def __init__(self, **kwargs):
super(RunComments, self).__init__(**kwargs)
@api_token_required
def get(self, id=None, **kwargs):
if id is not None:
return self._get('runcomments/%s' % id, data=kwargs)
elif 'params' in kwargs:
data = dict(kwargs)
params = data.pop('params')
return self._get('runcomments', data=data, params=params)
else:
return self._get('runcomments', data=kwargs)
@api_token_required
def post(self, id=None, **kwargs):
required_args = ['comment', 'component_run_id', 'type',]
check_required_args(required_args, kwargs)
return self._post('runcomments', data=kwargs)
class Groups(Cachet):
"""
/components/groups API endpoint
"""
def __init__(self, **kwargs):
super(Groups, self).__init__(**kwargs)
@api_token_required
def delete(self, id):
"""
https://docs.cachethq.io/docs/delete-component-group
"""
return self._delete('components/groups/%s' % id)
@api_token_required
def get(self, id=None, **kwargs):
"""
https://docs.cachethq.io/docs/get-componentgroups
https://docs.cachethq.io/docs/get-a-component-group
"""
if id is not None:
return self._get('components/groups/%s' % id, data=kwargs)
elif 'params' in kwargs:
data = dict(kwargs)
params = data.pop('params')
return self._get('components/groups', data=data, params=params)
else:
return self._get('components/groups', data=kwargs)
@api_token_required
def post(self, **kwargs):
"""
https://docs.cachethq.io/docs/post-componentgroups
"""
required_args = ['name']
check_required_args(required_args, kwargs)
return self._post('components/groups', data=kwargs)
@api_token_required
def put(self, **kwargs):
"""
https://docs.cachethq.io/docs/put-component-group
"""
required_args = ['id']
check_required_args(required_args, kwargs)
return self._put('components/groups/%s' % kwargs['id'], data=kwargs)
class Incidents(Cachet):
"""
/incidents API endpoint
"""
def __init__(self, **kwargs):
super(Incidents, self).__init__(**kwargs)
@api_token_required
def delete(self, id):
"""
https://docs.cachethq.io/docs/delete-an-incident
"""
return self._delete('incidents/%s' % id)
@api_token_required
def get(self, id=None, **kwargs):
"""
https://docs.cachethq.io/docs/get-incidents
https://docs.cachethq.io/docs/get-an-incident
"""
if id is not None:
return self._get('incidents/%s' % id, data=kwargs)
elif 'params' in kwargs:
data = dict(kwargs)
params = data.pop('params')
return self._get('incidents', data=data, params=params)
else:
return self._get('incidents', data=kwargs)
@api_token_required
def post(self, **kwargs):
"""
https://docs.cachethq.io/docs/incidents
"""
# default values
kwargs.setdefault('visible', kwargs.get('visible', True))
kwargs.setdefault('notify', kwargs.get('notify', False))
required_args = ['name', 'message', 'status', 'visible', 'notify']
check_required_args(required_args, kwargs)
return self._post('incidents', data=kwargs)
@api_token_required
def put(self, **kwargs):
"""
https://docs.cachethq.io/docs/update-an-incident
"""
required_args = ['id']
check_required_args(required_args, kwargs)
return self._put('incidents/%s' % kwargs['id'], data=kwargs)
class Metrics(Cachet):
"""
/metrics API endpoint
"""
def __init__(self, **kwargs):
super(Metrics, self).__init__(**kwargs)
@api_token_required
def delete(self, id):
"""
https://docs.cachethq.io/docs/delete-a-metric
"""
return self._delete('metrics/%s' % id)
@api_token_required
def get(self, id=None, **kwargs):
"""
https://docs.cachethq.io/docs/get-metrics
https://docs.cachethq.io/docs/get-a-metric
"""
if id is not None:
return self._get('metrics/%s' % id, data=kwargs)
else:
return self._get('metrics', data=kwargs)
@api_token_required
def post(self, **kwargs):
"""
https://docs.cachethq.io/docs/metrics
"""
# default values
kwargs.setdefault('default_value', kwargs.get('default_value', 0))
required_args = ['name', 'suffix', 'description', 'default_value']
check_required_args(required_args, kwargs)
return self._post('metrics', data=kwargs)
class Points(Cachet):
"""
/metrics/<metric>/points API endpoint
"""
def __init__(self, **kwargs):
super(Points, self).__init__(**kwargs)
@api_token_required
def delete(self, metric_id, point_id):
"""
https://docs.cachethq.io/docs/delete-a-metric-point
"""
return self._delete('metrics/%s/points/%s' % (metric_id, point_id))
@api_token_required
def get(self, metric_id=None, **kwargs):
"""
https://docs.cachethq.io/docs/get-metric-points
"""
if metric_id is None:
raise AttributeError('metric_id is required to get metric points.')
return self._get('metrics/%s/points' % metric_id, data=kwargs)
@api_token_required
def post(self, **kwargs):
"""
https://docs.cachethq.io/docs/post-metric-points
"""
required_args = ['id', 'value']
check_required_args(required_args, kwargs)
return self._post('metrics/%s/points' % kwargs['id'], data=kwargs)
class Subscribers(Cachet):
"""
/subscribers API endpoint
"""
def __init__(self, **kwargs):
super(Subscribers, self).__init__(**kwargs)
@api_token_required
def delete(self, id):
"""
https://docs.cachethq.io/docs/delete-subscriber
"""
return self._delete('subscribers/%s' % id)
@api_token_required
def get(self, **kwargs):
"""
https://docs.cachethq.io/docs/get-subscribers
"""
return self._get('subscribers', data=kwargs)
@api_token_required
def post(self, **kwargs):
"""
https://docs.cachethq.io/docs/subscribers
"""
required_args = ['email']
check_required_args(required_args, kwargs)
return self._post('subscribers', data=kwargs)
| StarcoderdataPython |
4836470 | <gh_stars>1-10
import numpy as np
class MeanIonization:
""" Compute the mean ionization state (<Z> or Zbar) for a given system.
Parameters
----------
Am : float or arrary_like
Atomic mass of element (or isotope) in units of grams [g].
mass_density : float or array_like
Range of mass densities in units of grams per
cubic centimeter [g/cc].
T : float or array_like
Temperature range in units of elevtron-volts [eV]
Z : int or arrray_like
Atomic number for each element
"""
def __init__(self, Am, mass_density, T, Z):
# Check type of input and deal with float cases
self.Am = Am
if str(type(self.Am)) != "<class 'numpy.ndarray'>":
self.Am = np.array([Am])
self.Z = Z
if str(type(self.Z)) != "<class 'numpy.ndarray'>":
self.Z = np.array([Z])
try:
self.num_density = np.tile(mass_density, len(Am))/np.repeat(Am, len(mass_density))
self.num_density = np.reshape(self.num_density, (len(Am) ,len(mass_density)))
except:
self.num_density = np.array([mass_density])/Am
self.T = T
if str(type(self.T)) != "<class 'numpy.ndarray'>":
self.T = np.array([T])
def tf_zbar(self):
""" Thomas Fermi Zbar model.
References
----------
Finite Temperature Thomas Fermi Charge State using
R.M. More, "Pressure Ionization, Resonances, and the
Continuity of Bound and Free States", Adv. in Atomic
Mol. Phys., Vol. 21, p. 332 (Table IV).
"""
# Single mass density, single element
if len(self.num_density.shape) == 1 and len(self.Z) == 1:
alpha = 14.3139
beta = 0.6624
a1 = 0.003323
a2 = 0.9718
a3 = 9.26148e-5
a4 = 3.10165
b0 = -1.7630
b1 = 1.43175
b2 = 0.31546
c1 = -0.366667
c2 = 0.983333
convert = self.num_density * 1.6726219e-24
R = convert/self.Z
T0 = self.T/self.Z**(4./3.)
Tf = T0/(1 + T0)
A = a1*T0**a2 + a3*T0**a4
B = -np.exp(b0 + b1*Tf + b2*Tf**7)
C = c1*Tf + c2
Q1 = A*R**B
Q = (R**C + Q1**C)**(1/C)
x = alpha*Q**beta
Zbar = self.Z * x/(1 + x + np.sqrt(1 + 2*x))
# Single mass density, multiple elements
elif len(self.num_density.shape) == 1 and len(self.Z) != 1:
Zbar = np.zeros([1, len(self.T), len(self.Z)])
for k in range(len(self.Z)):
alpha = 14.3139
beta = 0.6624
a1 = 0.003323
a2 = 0.9718
a3 = 9.26148e-5
a4 = 3.10165
b0 = -1.7630
b1 = 1.43175
b2 = 0.31546
c1 = -0.366667
c2 = 0.983333
convert = self.num_density[k] * 1.6726219e-24
R = convert/self.Z[k]
T0 = self.T/self.Z[k]**(4./3.)
Tf = T0/(1 + T0)
A = a1*T0**a2 + a3*T0**a4
B = -np.exp(b0 + b1*Tf + b2*Tf**7)
C = c1*Tf + c2
Q1 = A*R**B
Q = (R**C + Q1**C)**(1/C)
x = alpha*Q**beta
Zbar[0,:,k] = self.Z[k] * x/(1 + x + np.sqrt(1 + 2*x))
# Multiple mass densities, multiple elements
else:
Zbar = np.zeros([self.num_density.shape[1], len(self.T), len(self.Z)])
for k in range(len(self.Z)):
for i in range(self.num_density.shape[1]):
alpha = 14.3139
beta = 0.6624
a1 = 0.003323
a2 = 0.9718
a3 = 9.26148e-5
a4 = 3.10165
b0 = -1.7630
b1 = 1.43175
b2 = 0.31546
c1 = -0.366667
c2 = 0.983333
convert = self.num_density[k,i] * 1.6726219e-24
R = convert/self.Z[k]
T0 = self.T/self.Z[k]**(4./3.)
Tf = T0/(1 + T0)
A = a1*T0**a2 + a3*T0**a4
B = -np.exp(b0 + b1*Tf + b2*Tf**7)
C = c1*Tf + c2
Q1 = A*R**B
Q = (R**C + Q1**C)**(1/C)
x = alpha*Q**beta
Zbar[i,:,k] = self.Z[k] * x/(1 + x + np.sqrt(1 + 2*x))
return Zbar | StarcoderdataPython |
1696051 | import re
from django.conf import settings
from html import unescape
from urllib import parse as urlparse
from .constants import deprecated_page_redirects, FrontEndSection
from common.helpers.dictionaries import keys_omit
def args_dict_to_query_string(args_dict, urlencode=False):
'''
Takes argument dictionary and converts into query string
:param args_dict: Dictionary of url arguments
:param urlencode: Whether to url encode the dictionary values
:return: Query string result
'''
def arg_string(idx, key, value):
prefix = '?' if idx == 0 else '&'
return '{prefix}{key}={value}'.format(prefix=prefix, key=key, value=urlparse.quote(value) if urlencode else value)
return "".join(map(lambda ikv: arg_string(idx=ikv[0], key=ikv[1][0], value=ikv[1][1]), enumerate(args_dict.items())))
def section_url(section, args_dict=None):
return settings.PROTOCOL_DOMAIN + section_path(section, args_dict)
def _section_path_special_cases(section_string, args_dict=None):
# TODO: Fix the url template generators to handle these
if section_string == FrontEndSection.CreateEventProject.value:
return "/events/{event_id}/projects/create/{project_id}".format(event_id=args_dict['event_id'], project_id=args_dict['project_id'])
elif section_string == FrontEndSection.AboutEventProject.value:
return "/events/{event_id}/projects/{project_id}".format(event_id=args_dict['event_id'], project_id=args_dict['project_id'])
def section_path(section, args_dict=None):
from common.urls import url_generators
if args_dict is None:
args_dict = {}
section_string = section.value if hasattr(section, 'value') else section
id_arg = {'id': ''}
if args_dict and 'id' in args_dict:
id_arg = {'id': args_dict['id']}
args_dict = keys_omit(args_dict, ['id'])
section_path_url = _section_path_special_cases(section_string, args_dict)
if section_path_url:
return section_path_url
section_path_url = '/' + url_generators[section_string]['generator'].format(**id_arg)
section_path_url += args_dict_to_query_string(args_dict)
return section_path_url
def get_page_section_generator(url):
from common.urls import url_generators
for key in url_generators:
url_generator = url_generators[key]
regex = url_generator['regex']
match = regex.match(url)
if match is not None:
return url_generator
def get_page_section(url):
url_generator = get_page_section_generator(url)
return url_generator and url_generator['section']
def get_page_path_parameters(url, page_section_generator=None):
page_section_generator = page_section_generator or get_page_section_generator(url)
match = page_section_generator['regex'].search(url)
return match.groupdict()
def clean_invalid_args(url_args):
"""Filter out invalid query string arguments from old url system
Extract args dictionary
Remove id and section from dictionary
Reconstruct url from dictionary using args_dict_to_string
Args:
url_args(str) : URL query string arguments
Returns:
str: clean URL query string arguments
"""
# Sanity check
if url_args == "":
return url_args
from urllib import parse as urlparse
# The format of url_args_dict is {'a': ['1'], 'b': ['2']}
url_args_dict = urlparse.parse_qs(url_args, keep_blank_values=0, strict_parsing=0)
url_args_dict.pop('section', None)
url_args_dict.pop('id', None)
url_args_dict = {key: value[0] for key, value in url_args_dict.items()}
return args_dict_to_query_string(url_args_dict, urlencode=True)
def get_clean_url(url):
clean_url = unescape(url)
return clean_url
def redirect_from_deprecated_url(section_name):
# Redirect deprecated Press section
if section_name == FrontEndSection.Press.value:
return settings.BLOG_URL
if section_name in deprecated_page_redirects:
return section_url(deprecated_page_redirects[section_name])
| StarcoderdataPython |
9698519 | from exp_base import *
# THIS FILE IS FOR STORING STANDARD EXPERIMENTS/BASELINES FOR CARLA_STA MODE
############## choose an experiment ##############
# current = 'builder'
# current = 'trainer_sb'
# current = 'builder'
current = 'res_trainer'
# current = 'vis_trainer'
# current = 'occvis_trainer'
# current = 'emb_trainer_sb'
# current = 'emb_trainer'
# current = 'emb_trainer_kitti'
# current = 'tow_trainer'
# (NO MODS HERE)
mod = '""'
############## define experiments ##############
exps['builder'] = ['carla_sta', # mode
'carla_sta10_data', # dataset
'3_iters',
'lr0',
'B2',
'no_shuf',
# 'no_backprop',
'train_feat',
'train_occ',
'train_view',
'train_emb',
'fastest_logging',
]
exps['trainer'] = ['carla_sta', # mode
'carla_sta_data', # dataset
'200k_iters',
'lr3',
'B4',
'train_feat',
'train_occ',
'train_view',
'train_emb',
# 'pretrained_carl_feat',
# 'pretrained_carl_occ',
# 'pretrained_carl_view',
# 'pretrained_carl_emb',
'faster_logging',
# 'resume'
]
exps['res_trainer'] = ['carla_sta', # mode
'carla_sta_data', # dataset
'200k_iters',
'lr3',
'B4',
'train_feat_res',
'train_occ',
'train_view',
'train_emb',
# 'pretrained_carl_feat',
# 'pretrained_carl_occ',
# 'pretrained_carl_view',
# 'pretrained_carl_emb',
'faster_logging',
# 'resume'
]
exps['emb_trainer'] = ['carla_sta', # mode
'carla_static_data', # dataset
'300k_iters',
'lr3',
'B1',
'train_feat',
'train_occ',
'train_emb_view',
'faster_logging',
]
exps['trainer_sb'] = ['carla_sta', # mode
'carla_sta_data', # dataset
'300k_iters',
'lr3',
'B4',
'train_feat_sb',
'train_occ_notcheap',
'train_view',
'train_emb',
'faster_logging',
#'fast_logging',
#'fastest_logging',
]
exps['emb_trainer_noocc'] = ['carla_sta', # mode
'carla_static_data', # dataset
'300k_iters',
'lr3',
'B2',
'train_feat',
'train_emb_view',
'resume',
'slow_logging',
]
exps['emb_trainer_kitti'] = ['carla_sta', # mode
'kitti_static_data', # dataset
'300k_iters',
'lr3',
'B2',
'train_feat',
'train_occ',
'train_emb_view',
'fast_logging',
# 'synth_rt',
# 'resume',
# 'pretrained_carl_feat',
# 'pretrained_carl_view',
# 'pretrained_carl_emb',
# 'pretrained_carl_occ',
]
exps['tow_trainer'] = ['carla_sta', # mode
'carla_static_data', # dataset
'100k_iters',
'lr4',
'B4',
'train_tow',
'fast_logging',
]
exps['vis_trainer'] = ['carla_sta', # mode
'carla_static_data', # dataset
'50k_iters',
'lr3',
'B2',
'pretrained_carl_occ',
'pretrained_carl_vis',
'frozen_occ',
'frozen_vis',
'train_feat',
'train_emb',
'slow_logging',
]
exps['occvis_trainer'] = ['carla_sta', # mode
'carla_static_data', # dataset
'200k_iters',
'lr3',
'B4',
'train_occ',
'train_vis',
'slow_logging',
]
############## net configs ##############
groups['train_box'] = ['do_box = True',
'box_sup_coeff = 0.01', # penalty for expanding the box min/max range
# 'box_cs_coeff = 1.0', # center-surround loss
]
groups['train_ort'] = ['do_ort = True',
# 'ort_coeff = 1.0', # sup loss (for debug)
'ort_warp_coeff = 1.0', # weight on 3D loss against the sta tensors
]
groups['train_inp'] = ['do_inp = True',
'inp_coeff = 1.0',
# 'inp_dim = 8', # size of bottleneck maybe; currently unused
]
groups['train_traj'] = ['do_traj = True',
'traj_dim = 8',
]
groups['train_feat'] = ['do_feat = True',
'feat_dim = 32',
'feat_do_rt = True',
'feat_do_flip = True',
# 'feat_dim = 16',
# 'feat_dim = 8',
]
groups['train_feat_res'] = ['do_feat = True',
'feat_dim = 32',
'feat_do_rt = True',
'feat_do_flip = True',
'feat_do_res = True',
# 'feat_dim = 16',
# 'feat_dim = 8',
]
groups['train_feat_sb'] = ['do_feat = True',
'feat_dim = 32',
'feat_do_sb = True',
'feat_do_res = True',
'feat_do_flip = True',
'feat_do_rt = True',
# 'feat_dim = 16',
# 'feat_dim = 8',
]
groups['train_feat_vae'] = ['do_feat = True',
'feat_dim = 32',
'feat_do_vae = True',
'feat_kl_coeff = 1.0',
]
groups['train_occ'] = ['do_occ = True',
'occ_do_cheap = True',
'occ_coeff = 1.0',
'occ_smooth_coeff = 1.0',
]
groups['train_view'] = ['do_view = True',
'view_depth = 32',
'view_l1_coeff = 1.0',
]
groups['train_occ_notcheap'] = ['do_occ = True',
'occ_coeff = 1.0',
'occ_do_cheap = False',
'occ_smooth_coeff = 0.1',
]
# 02_m32x128x128_p64x192_1e-3_F16_Oc_c1_s.1_Ve_d32_E16_c1_l.01_d.1_m.01_cals2c1o0t_cals2c1o0v_e17
# 02_m32x128x128_p64x192_1e-3_F32_Oc_c1_s.1_Ve_d32_E32_a1_i1_cals2c1o0t_cals2c1o0v_caos2c0o1v_i12
groups['train_emb_view'] = [
'do_view = True',
'do_emb = True',
'view_depth = 32',
'emb_2D_coeff = 1.0',
'emb_3D_coeff = 1.0',
'emb_samp = "rand"',
'emb_dim = 32',
'view_pred_embs = True',
'do_eval_recall = True',
]
groups['train_rgb_view'] = ['do_view = True',
'view_pred_rgb = True',
'view_use_halftanh = True',
# 'view_l1_coeff = 1.0', # 2d to rgb consistency
'view_ce_coeff = 1.0', # 2d to rgb consistency
'view_depth = 32',
'do_eval_recall = True',
]
groups['train_tow'] = ['do_tow = True',
'tow_view_coeff = 1.0',
'tow_kl_coeff = 1.0',
'do_eval_recall = True',
]
groups['train_vis'] = ['do_vis = True',
# 'vis_debug = True',
'vis_softmax_coeff = 1.0',
'vis_hard_coeff = 1.0',
'view_depth = 32',
]
groups['train_flow'] = ['do_flow = True',
'flow_huber_coeff = 1.0',
'flow_smooth_coeff = 0.01',
# 'flow_coeff = 10.0',
# 'flow_rgb_coeff = 1.0',
# 'flow_smooth_coeff = 40.0',
# 'flow_smooth_coeff = 30.0',
# 'flow_smooth_coeff = 20.0',
# 'flow_smooth_coeff = 10.0',
# 'flow_smooth_coeff = 5.0',
# 'flow_smooth_coeff = 2.0',
# 'snap_freq = 500',
]
groups['train_emb'] = ['do_emb = True',
'emb_smooth_coeff = 0.1',
'emb_2D_ml_coeff = 1.0',
'emb_2D_l2_coeff = 0.1',
'emb_3D_ml_coeff = 1.0',
'emb_3D_l2_coeff = 0.1',
]
############## datasets ##############
# DHW for mem stuff
SIZE = 32
Z = SIZE*4
Y = SIZE*1
X = SIZE*4
K = 2 # how many objects to consider
S = 2
H = 128
W = 384
# H and W for proj stuff
PH = int(H/2.0)
PW = int(W/2.0)
groups['carla_sta1_data'] = ['dataset_name = "carla"',
'H = %d' % H,
'W = %d' % W,
'trainset = "caus2i6c1o0one"',
'dataset_list_dir = "/projects/katefgroup/datasets/multistage_dyno/carla/tfrs"',
'dataset_location = "/projects/katefgroup/datasets/multistage_dyno/carla/tfrs"',
]
groups['carla_sta10_data'] = ['dataset_name = "carla"',
'H = %d' % H,
'W = %d' % W,
'trainset = "caus2i6c1o0ten"',
'dataset_list_dir = "/projects/katefgroup/datasets/multistage_dyno/carla/tfrs"',
'dataset_location = "/projects/katefgroup/datasets/multistage_dyno/carla/tfrs"',
]
groups['carla_sta_data'] = ['dataset_name = "carla"',
'H = %d' % H,
'W = %d' % W,
#'trainset = "caus2i6c1o0t"',
'trainset = "caas2i6c0o1t"',
#'dataset_list_dir = "/projects/katefgroup/datasets/multistage_dyno/carla/tfrs"',
'dataset_list_dir = "/projects/katefgroup/datasets/multistage_dyno/carla/npzrs"',
#'dataset_location = "/projects/katefgroup/datasets/multistage_dyno/carla/tfrs"',
'dataset_location = "/projects/katefgroup/datasets/multistage_dyno/carla/npzrs"',
'dataset_format = "npz"'
]
############## verify and execute ##############
def _verify_(s):
varname, eq, val = s.split(' ')
assert varname in globals()
assert eq == '='
assert type(s) is type('')
print(current)
assert current in exps
for group in exps[current]:
print(" " + group)
assert group in groups
for s in groups[group]:
print(" " + s)
_verify_(s)
exec(s)
s = "mod = " + mod
_verify_(s)
exec(s)
| StarcoderdataPython |
5137423 | #!/usr/bin/env python
import sys#!/usr/bin/env python
import sys
import traceback
import os
import imp
import json
from multiprocessing import Process, Queue
import prometheus_client as prom
import boto3
mod_name = os.getenv('MOD_NAME')
func_handler = os.getenv('FUNC_HANDLER')
queue_name = os.getenv('QUEUE_NAME')
timeout = float(os.getenv('FUNC_TIMEOUT', 180))
aws_access_key = os.getenv('AWS_ACCESS_KEY_ID')
aws_secret_key = os.getenv('AWS_SECRET_ACCESS_KEY')
aws_region_name = os.getenv('AWS_REGION','us-east-1')
sqs = boto3.resource('sqs', region_name=aws_region_name)
group = mod_name + func_handler
queue = sqs.get_queue_by_name(QueueName=queue_name)
mod = imp.load_source('function', '/kubeless/%s.py' % mod_name)
func = getattr(mod, func_handler)
func_hist = prom.Histogram('function_duration_seconds',
'Duration of user function in seconds',
['queue'])
func_calls = prom.Counter('function_calls_total',
'Number of calls to user function',
['queue'])
func_errors = prom.Counter('function_failures_total',
'Number of exceptions in user function',
['queue'])
def funcWrap(q, payload):
q.put(func(payload))
def handle(msg):
func_calls.labels(queue_name).inc()
with func_errors.labels(queue_name).count_exceptions():
with func_hist.labels(queue_name).time():
q = Queue()
p = Process(target=funcWrap, args=(q,msg,))
p.start()
p.join(timeout)
# If thread is still active
if p.is_alive():
p.terminate()
p.join()
raise Exception('Timeout while processing the function')
else:
return q.get()
if __name__ == '__main__':
prom.start_http_server(8080)
while True:
for msg in queue.receive_messages():
try:
res = handle(msg)
sys.stdout.write(str(res) + '\n')
sys.stdout.flush()
except Exception:
traceback.print_exc()
msg.delete()
| StarcoderdataPython |
246883 | from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import inferModel.infer as Infer
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["GET"],
allow_headers=["*"],
)
@app.get("/")
async def root(url: str = None):
if url == None:
return {"Error":"url is None!"}
else:
w2i = Infer.wordStr2IntVec()
result = 0 if Infer.infer(url, w2i)[0]== "good" else 1
return {"result": result}
# uvicorn main:app --reload --workers 1 --host 0.0.0.0 --port 8080
# http://localhost:8080 | StarcoderdataPython |
3403889 | <gh_stars>1-10
import os
def getFilesList(folder):
if folder=='.':
#if folder name was dot, get current folder dir
filesList=os.listdir()
else:
#if a folder name was passed, get folder dir of that parameter
filesList=os.listdir(folder)
#return the resultant list
return filesList
#print(getFilesList('.'))
#print(getFilesList('c://')) | StarcoderdataPython |
3535327 | import sys
import os
import numpy as np
import acl
import cv2
import glob
path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(path, ".."))
sys.path.append(os.path.join(path, "../../../../common/"))
sys.path.append(os.path.join(path, "../../../../common/acllite"))
from acllite_resource import AclLiteResource
from acllite_model import AclLiteModel
from utils import display_time, check_ret
from constants import ACL_FLOAT, ACL_FORMAT_NCHW, ACL_MEM_MALLOC_NORMAL_ONLY, \
ACL_MEM_MALLOC_HUGE_FIRST, ACL_MEMCPY_HOST_TO_DEVICE, ACL_MEMCPY_DEVICE_TO_HOST
currentPath = os.path.join(path, "..")
OUTPUT_DIR = os.path.join(currentPath, 'out/')
MODEL_PATH = os.path.join(currentPath, "model/hifill.om")
OP_TYPE = "BatchMatMul"
MODEL_MATMUL_PATH =os.path.join(currentPath, "model")
print("MODEL_MATMUL_PATH", MODEL_MATMUL_PATH)
INPUT_SIZE = 512
ATTENTION_SIZE = 32
MULTIPLE = 6
NPTYPE_FLOAT32 = np.float32
def sort(str_lst):
return [s for s in sorted(str_lst)]
def reconstruct_residual_from_patches(residual, multiple):
"""
reconstruct residual from patches
"""
residual = np.reshape(residual, [ATTENTION_SIZE, ATTENTION_SIZE, multiple, multiple, 3])
residual = np.transpose(residual, [0, 2, 1, 3, 4])
return np.reshape(residual, [ATTENTION_SIZE * multiple, ATTENTION_SIZE * multiple, 3])
def extract_image_patches(img, multiple):
"""
extract image patch
"""
h, w, c = img.shape
img = np.reshape(img, [h // multiple, multiple, w // multiple, multiple, c])
img = np.transpose(img, [0, 2, 1, 3, 4])
return img
@display_time
def pre_process(raw_img, raw_mask):
"""
process raw image & mask
"""
raw_mask = raw_mask.astype(NPTYPE_FLOAT32) / 255.
raw_img = raw_img.astype(NPTYPE_FLOAT32)
# resize raw image & mask to desinated size
large_img = cv2.resize(raw_img, (MULTIPLE * INPUT_SIZE, MULTIPLE * INPUT_SIZE), interpolation = cv2. INTER_LINEAR)
large_mask = cv2.resize(raw_mask, (MULTIPLE * INPUT_SIZE, MULTIPLE * INPUT_SIZE), interpolation = cv2.INTER_NEAREST)
small_img = cv2.resize(raw_img, (INPUT_SIZE, INPUT_SIZE), interpolation = cv2.INTER_NEAREST)
small_mask = cv2.resize(raw_mask, (INPUT_SIZE, INPUT_SIZE), interpolation = cv2.INTER_NEAREST)
# set hole region to 1. and backgroun to 0.
small_mask = 1. - small_mask
mask_512_hwc = small_mask[:,:,0:1]
mask_512_chw = mask_512_hwc.transpose(2,0,1).copy()
return (large_img, large_mask, small_img, mask_512_chw)
"""
get img list
"""
def get_imgs_masks_file_list(images, masks):
paths_img = glob.glob(images + '/*.*[gG]')
paths_mask = glob.glob(masks + '/*.*[gG]')
paths_img = sort(paths_img)
paths_mask = sort(paths_mask)
return paths_img, paths_mask
"""
create input databuffer
"""
def create_input(np_data, size):
ptr, data_out = acl.util.numpy_contiguous_to_ptr(np_data)
dev_ptr, ret = acl.rt.malloc(size, ACL_MEM_MALLOC_HUGE_FIRST)
ret = acl.rt.memcpy(dev_ptr,
size,
ptr,
size,
ACL_MEMCPY_HOST_TO_DEVICE)
check_ret("acl.rt.memcpy", ret)
return acl.create_data_buffer(dev_ptr, size)
def get_forward_result(dev_ptr, size):
host_buffer, ret = acl.rt.malloc_host(size)
check_ret("acl.rt.malloc_host", ret)
ret = acl.rt.memcpy(host_buffer,
size,
dev_ptr,
size,
ACL_MEMCPY_DEVICE_TO_HOST)
check_ret("acl.rt.memcpy", ret)
return acl.util.ptr_to_numpy(host_buffer, (1024, 27648), 11)
def forward_op_batch_matmul(data, stream):
ret = acl.op.set_model_dir(MODEL_MATMUL_PATH)
check_ret("acl.op.set_model_dir", ret)
op_attr = acl.op.create_attr()
ret = acl.op.set_attr_bool(op_attr, "adj_x1", False)
check_ret("acl.op.set_attr_bool", ret)
ret = acl.op.set_attr_bool(op_attr, "adj_x2", False)
check_ret("acl.op.set_attr_bool", ret)
input_desc_batch_matmul_x1 = \
acl.create_tensor_desc(ACL_FLOAT,
[1, 1, 1024, 1024],
ACL_FORMAT_NCHW)
input_desc_batch_matmul_x2 = \
acl.create_tensor_desc(ACL_FLOAT,
[1, 1, 1024, 27648],
ACL_FORMAT_NCHW)
output_desc_batch_matmul_y = \
acl.create_tensor_desc(ACL_FLOAT,
[1, 1, 1024, 27648],
ACL_FORMAT_NCHW)
tensor_size_batch_matmul_x1 = \
acl.get_tensor_desc_size(input_desc_batch_matmul_x1)
tensor_size_batch_matmul_x2 = \
acl.get_tensor_desc_size(input_desc_batch_matmul_x2)
tensor_size_batch_matmul_y = \
acl.get_tensor_desc_size(output_desc_batch_matmul_y)
input_buffer_x1 = create_input(data[0], tensor_size_batch_matmul_x1)
input_buffer_x2 = create_input(data[1], tensor_size_batch_matmul_x2)
dev_buffer_batch_matmul, ret = \
acl.rt.malloc(tensor_size_batch_matmul_y,
ACL_MEM_MALLOC_NORMAL_ONLY)
check_ret("acl.rt.malloc", ret)
output_buffer_batch_matmul_y = \
acl.create_data_buffer(dev_buffer_batch_matmul,
tensor_size_batch_matmul_y)
ret = acl.op.execute_v2(
OP_TYPE,
[input_desc_batch_matmul_x1, input_desc_batch_matmul_x2],
[input_buffer_x1, input_buffer_x2],
[output_desc_batch_matmul_y],
[output_buffer_batch_matmul_y],
op_attr,
stream)
check_ret("acl.op.execute_v2", ret)
ret = acl.rt.synchronize_stream(stream)
check_ret("acl.rt.synchronize_stream", ret)
print("[SingleOp] batch_matmul run success")
return get_forward_result(dev_buffer_batch_matmul, tensor_size_batch_matmul_y)
@display_time
def matmul_om_large(attention, residual, stream):
"""
matul om large
"""
attention_reshape = attention.reshape(1024, 1024)
residual_reshape = residual.reshape(1024, 96 * 96 * 3)
matmul_ret = forward_op_batch_matmul([attention_reshape, residual_reshape], stream)
return matmul_ret.reshape(ATTENTION_SIZE, ATTENTION_SIZE, 3072 * 9)
def residual_aggregate(residual, attention, stream):
"""
MULTIPLE * INPUT_SIZE//ATTENTION_SIZE = 6*512/32 = 96
"""
residual = extract_image_patches(residual, MULTIPLE * INPUT_SIZE // ATTENTION_SIZE)
residual = np.reshape(residual, [1, residual.shape[0] * residual.shape[1], -1])
residual = matmul_om_large(attention, residual, stream)
residual = reconstruct_residual_from_patches(residual, MULTIPLE * INPUT_SIZE // ATTENTION_SIZE)
return residual
@display_time
def post_process(raw_img, large_img, large_mask, inpainted_512, img_512, mask_512, attention, stream):
"""
compute the raw residual map
s = time.time()
"""
h, w, c = raw_img.shape
low_base = cv2.resize(inpainted_512.astype(NPTYPE_FLOAT32),
(INPUT_SIZE * MULTIPLE, INPUT_SIZE * MULTIPLE), interpolation = cv2.INTER_LINEAR)
low_large = cv2.resize(img_512.astype(NPTYPE_FLOAT32),
(INPUT_SIZE * MULTIPLE, INPUT_SIZE * MULTIPLE), interpolation = cv2.INTER_LINEAR)
residual = (large_img - low_large) * large_mask
# reconstruct residual map using residual aggregation module
residual = residual_aggregate(residual, attention, stream)
# compute large inpainted result
res_large = low_base + residual
res_large = np.clip(res_large, 0., 255.)
# resize large inpainted result to raw size
res_raw = cv2.resize(res_large, (w, h), interpolation = cv2.INTER_LINEAR)
# paste the hole region to the original raw image
mask = cv2.resize(mask_512.astype(NPTYPE_FLOAT32), (w, h), interpolation = cv2.INTER_LINEAR)
mask = np.expand_dims(mask, axis=2)
res_raw = res_raw * mask + raw_img * (1. - mask)
return res_raw.astype(np.uint8)
@display_time
def readimages(img_path, mask_path):
"""
readimages
"""
raw_img = cv2.imread(img_path)
raw_mask = cv2.imread(mask_path)
return raw_img, raw_mask
@display_time
def inference(model, input_data):
resultList = model.execute(input_data)
inpainted_512 = resultList[0]
attention = resultList[1]
mask_512_new = resultList[2]
return inpainted_512, attention, mask_512_new
@display_time
def main(image_dirs, masks_dirs):
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
#acl init
acl_resource = AclLiteResource()
acl_resource.init()
stream, ret = acl.rt.create_stream()
check_ret("acl.rt.create_stream", ret)
#load model
model = AclLiteModel(MODEL_PATH)
paths_img, paths_mask = get_imgs_masks_file_list(image_dirs, masks_dirs)
for i in range(len(paths_img)):
print('==========')
raw_img, raw_mask = readimages(paths_img[i], paths_mask[i])
print("file: % s, shape= % s" % (paths_img[i], raw_img.shape))
(img_large, mask_large, img_512, mask_512) = pre_process(raw_img, raw_mask)
inpainted_512, attention, mask_512_new = inference(model,[img_512, mask_512,])
# post-processing
res_raw_size = post_process(raw_img, img_large, \
mask_large, inpainted_512[0], img_512, mask_512_new[0], attention[0], stream)
filename = os.path.join(OUTPUT_DIR, 'outpaint_' + os.path.basename(paths_img[i]))
cv2.imwrite(filename, res_raw_size)
print("Execute end")
if __name__ == '__main__':
image_dir = os.path.join(currentPath, "data" )
masks_dir = os.path.join(currentPath, "mask")
main(image_dir, masks_dir)
| StarcoderdataPython |
3547512 | <filename>CS1/0310_maze_navigator/working_code/main10.py
import pygame, random, time
pygame.init()
#Initialize variables:
clock = pygame.time.Clock()
size = 50 #size in pixels of each tile
maze_file = '../maze_navigator_starter_code/mazes/trial00.txt'
fps = 3 #Frames per second
green = 0,255,0
red = 255,0,0
yellow = 255,255,0
blue = 0,0,255
white = 255,255,255
north = 1
south = 2
east = 3
west = 4
victory = False
font = pygame.font.SysFont('Arial', 25)
font = pygame.font.SysFont('Arial', 64)
def draw():
surface.fill((0,0,0)) #fill surface with black
for row in map:
for col in row:
if col!=None:
col.draw()
pygame.display.flip()
#Delay to get desired frames per second
clock.tick(fps)
class Wall:
def __init__(self, surface, x, y, size, color):
self.surface = surface
self.x = x
self.y = y
self.rect = pygame.Rect(x,y,size,size)
self.color = color
def draw(self):
pygame.draw.rect(self.surface, self.color, self.rect)
class MazeRunner:
def __init__(self, surface, row, column, size, color):
self.surface = surface
self.col = column
self.row = row
self.color = color
self.heading = north
#Memory slots to use. For advanced navigation
self.memory0 = 0
self.memory1 = 0
self.memory2 = 0
self.memory3 = 0
self.memory4 = 0
self.memory5 = 0
self.memory6 = 0
self.memory7 = 0
self.memory8 = 0
self.memory9 = 0
def wallAhead(self, maze):
if self.heading == north:
return maze[self.row-1][self.col] != None and maze[self.row-1][self.col].color==blue
elif self.heading == south:
return maze[self.row+1][self.col] != None and maze[self.row+1][self.col].color==blue
elif self.heading == east:
return maze[self.row][self.col+1] != None and maze[self.row][self.col+1].color==blue
elif self.heading == west:
return maze[self.row][self.col-1] != None and maze[self.row][self.col-1].color==blue
else:
print('ERROR in wallAhead')
exit()
def setHeading(self, heading):
self.heading = heading
draw()
def turnLeft(self):
if self.heading == north:
self.setHeading(west)
elif self.heading == south:
self.setHeading(east)
elif self.heading == east:
self.setHeading(north)
elif self.heading == west:
self.setHeading(south)
def turnRight(self):
if self.heading == north:
self.setHeading(east)
elif self.heading == south:
self.setHeading(west)
elif self.heading == east:
self.setHeading(south)
elif self.heading == west:
self.setHeading(north)
def moveAhead(self, maze):
'''Move ahead one space if there is no wall blocking your path.'''
if not self.wallAhead(maze):
global victory
if self.heading == north:
victory = maze[self.row-1][self.col] != None and maze[self.row-1][self.col].color == red
maze[self.row-1][self.col] = self
maze[self.row][self.col] = None
self.row = self.row-1
elif self.heading == south:
victory = maze[self.row+1][self.col] != None and maze[self.row+1][self.col].color == red
maze[self.row+1][self.col] = self
maze[self.row][self.col] = None
self.row = self.row+1
elif self.heading == east:
victory = maze[self.row][self.col+1] != None and maze[self.row][self.col+1].color == red
maze[self.row][self.col+1] = self
maze[self.row][self.col] = None
self.col = self.col+1
elif self.heading == west:
victory = maze[self.row][self.col-1] != None and maze[self.row][self.col-1].color == red
maze[self.row][self.col-1] = self
maze[self.row][self.col] = None
self.col = self.col-1
draw()
def draw(self):
rect = pygame.Rect(self.col*size,self.row*size,size,size)
pygame.draw.rect(self.surface, self.color, rect)
#Draw a heading indicator
if self.heading == north:
rect = pygame.Rect(self.col*size+size/4,self.row*size+size/4-size/2,size/2,size/2)
elif self.heading == south:
rect = pygame.Rect(self.col*size+size/4,self.row*size+size/4+size/2,size/2,size/2)
elif self.heading == east:
rect = pygame.Rect(self.col*size+size/4+size/2,self.row*size+size/4,size/2,size/2)
elif self.heading == west:
rect = pygame.Rect(self.col*size+size/4-size/2,self.row*size+size/4,size/2,size/2)
pygame.draw.rect(self.surface, green, rect)
def goToGoal(self, maze, goal_row, goal_col):
#self.openNavigation(maze, goal_row, goal_col)
#self.randomNavigation(maze)
#self.deadEndNavigation(maze)
#self.handWallNavigation(maze)
#self.randomHybridNavigation(maze, goal_row, goal_col)
self.deterministicHybridNavigation(maze, goal_row, goal_col)
def deterministicHybridNavigation(self, maze, goal_row, goal_col):
'''This uses open navigation until it gets stuck at which
point it remembers how far it was from the goal and then
engages wall following until it gets closer than when it got
stuck, at which point it reverts to wall following.
This requires additional variables in order to remember the closest
distance.
There is one tiny randomness involved here and that's whether to
use left or right handed wall following procedures. It makes a
difference.'''
#Set memory of best_distance
if self.memory1 == 0:
self.memory1 = 999999 #closest we've gotten to goal
#Navigate to goal
if self.memory0: #Whether or not we are open navigating
#Use these variables to detect progress
self.memory2 = self.row
self.memory3 = self.col
#Take one open navigation step
self.openNavigation(maze, goal_row, goal_col)
print('open nav step')
#Detect if stuck and if so, revert to other navigation mode
stuck = (self.memory2==self.row and self.memory3==self.col)
if stuck:
self.memory0 = False #Whether or not we are open navigating
#Remember distance
temp_distance = abs(self.col-goal_col)+abs(self.row-goal_row)
if temp_distance<self.memory1: #closest we've gotten to goal
self.memory1 = temp_distance
#Choose to be left or right handed
self.memory4 = random.random()<0.5
if self.memory4: #Left handed if true
self.turnRight() #To put the wall on our left
else:
self.turnLeft() #To put the wall on our right
#Remember where we started
self.memory2=self.row
self.memory3=self.col
else:
#Take one step along the wall
if self.memory4: #Left handed if true
print('left hand step')
self.handWallNavigation(maze)
else:
self.handWallNavigationRight(maze)
print('right hand step')
#Check distance
new_distance = abs(self.col-goal_col)+abs(self.row-goal_row)
if new_distance <= self.memory1: #closest distance to goal
print('best distance: '+str(self.memory1))
self.memory1 = new_distance
self.memory0 = True #Whether or not we are open navigating
else:
#If we're back where we started then kick over to the
#other navigation
self.memory0 = (self.memory2==self.row and self.memory3==self.col) #Whether or not we are open navigating
def randomHybridNavigation(self, maze, goal_row, goal_col):
'''This is a combination wall following and open maze navigation
strategy that randomly switches between the behaviors. It is
weighted towards wall following. This DOES succeed on all mazes,
but it can take a long time.'''
if random.random() < 0.8:
self.handWallNavigation(maze)
else:
self.openNavigation(maze, goal_row, goal_col)
def handWallNavigation(self, maze):
'''This is your standard, put your hand on the wall and never
take it off strategy for a typical corn maze.
This strategy is stumped by trial05, when the goal is not against a
wall, but does well up until then.
This ASSUMES your left hand is on the wall. The version below
assumes a right hand on the wall.'''
self.turnLeft()
if self.wallAhead(maze):
self.turnRight()
if self.wallAhead(maze):
self.turnRight()
self.moveAhead(maze)
else:
self.moveAhead(maze)
def handWallNavigationRight(self, maze):
'''This is your standard, put your hand on the wall and never
take it off strategy for a typical corn maze.
This strategy is stumped by trial05, when the goal is not against a
wall, but does well up until then.
This ASSUMES your right hand is on the wall. The version above
is left handed.'''
self.turnRight()
if self.wallAhead(maze):
self.turnLeft()
if self.wallAhead(maze):
self.turnLeft()
self.moveAhead(maze)
else:
self.moveAhead(maze)
def deadEndNavigation(self, maze):
'''Moves straight until there is a wall then turns to next open
direction and moves again. This will miss openings along the sides.
This strategy is stumped by trial03, but does well up until then.'''
if self.wallAhead(maze):
self.turnLeft()
if self.wallAhead(maze):
self.turnRight()
self.turnRight()
self.moveAhead(maze)
def openNavigation(self, maze, goal_row, goal_col):
'''Navigation for open spaces.
Calls to draw are inserted to better see what's going on.'''
#Take one step in the best north/south direction
if goal_row<self.row:
self.setHeading(north)
self.moveAhead(maze)
elif goal_row>self.row:
self.setHeading(south)
self.moveAhead(maze)
#Take one step in the best east/west direction
if goal_col<self.col:
self.setHeading(west)
self.moveAhead(maze)
elif goal_col>self.col:
self.setHeading(east)
self.moveAhead(maze)
def randomNavigation(self, maze):
'''Move by pure randomness'''
r = random.randint(0,2)
if r == 0:
self.moveAhead(maze)
elif r == 1:
self.turnLeft()
elif r == 2:
self.turnRight()
#Open file to read in text representation of the maze
file_handle = open(maze_file, 'r')
line = file_handle.readline()
line = line.strip()
map_characters = [] #2d array
while line:
map_characters.append(line)
line = file_handle.readline()
line = line.strip()
#Now map_characters contains the maze file read in as a 2d array of characters.
#Create a screen of the appropriate dimensions
map_width = len(map_characters[0]) #width in number of tiles
map_height = len(map_characters) #height in number of tiles
surface = pygame.display.set_mode((map_width*size,map_height*size))
#Coordinates of the start and goal tiles
start_row = start_col = goal_row = goal_col = 0
#Convert map_characters to a 2d array of sprites now.
map = []
for row in range(len(map_characters)):
temp_row = []
for col in range(len(map_characters[row])):
if map_characters[row][col] == 'w':
temp_row.append(Wall(surface, col*size, row*size, size, blue))
elif map_characters[row][col] == 's':
temp_row.append(None)
start_row = row
start_col = col
elif map_characters[row][col] == 'e':
temp_row.append(Wall(surface, col*size, row*size, size, red))
goal_row = row
goal_col = col
else:
temp_row.append(None)
map.append(temp_row)
#Create maze runner
runner = MazeRunner(surface, start_row, start_col, size, yellow)
map[start_row][start_col] = runner
#Main program loop
done = False
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
done = True
if victory:
surface.blit(font.render('Winner!', True,white), (10,10))
pygame.display.flip()
#Delay to get desired frames per second
clock.tick(fps)
else:
runner.goToGoal(map, goal_row, goal_col)
pygame.quit() | StarcoderdataPython |
3520356 | #!/usr/bin/env python3
import sys
import argparse
import rclpy
from rclpy.node import Node
from rmf_traffic_msgs.msg import ItineraryClear
def main(argv = sys.argv):
'''
Example :
- participant_name: 0
- itinerary_version: 0
- topic_name: rmf_traffic/itinerary_clear
'''
default_participant_name = 0
default_itinerary_version = 0
default_topic_name = 'rmf_traffic/itinerary_clear'
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--participant_name', default=default_participant_name)
parser.add_argument('-v', '--itinerary_version', default=default_itinerary_version)
parser.add_argument('-t', '--topic_name', default=default_topic_name)
args = parser.parse_args(argv[1:])
print('participant_name: {}'.format(args.participant_name))
print('itinerary_version: {}'.format(args.itinerary_version))
print('topic_name: {}'.format(args.topic_name))
rclpy.init()
node = rclpy.create_node('test_node')
pub = node.create_publisher(ItineraryClear, args.topic_name, 10)
msg = ItineraryClear()
msg.participant = args.participant_name
msg.itinerary_version = args.itinerary_version
rclpy.spin_once(node, timeout_sec=1.0)
pub.publish(msg)
rclpy.spin_once(node, timeout_sec=0.5)
print('all done!')
if __name__ == '__main__':
main(sys.argv)
| StarcoderdataPython |
8144515 | import re
DURATION_REGEX = re.compile(r'PT(?P<minutes>\d+)M(?P<seconds>\d+)')
def minutes_and_seconds_from_duration(duration):
"""
Returns a tuple of integers minutes, seconds from the YouTube duration format
which is PT3M20S
:param duration: string YouTube duration
:return: tuple of integers
"""
match = DURATION_REGEX.match(duration)
minutes = match.group('minutes')
seconds = match.group('seconds')
return int(minutes), int(seconds)
def duration_in_seconds(duration):
minutes, seconds = minutes_and_seconds_from_duration(duration)
return minutes*60 + seconds
| StarcoderdataPython |
3472829 | """
N <NAME>
Monte 1 / 2 / 3 marche
"""
10
3 = 3 * 1 + 2 * 1 + 1 * 1
3 = 1 + 1 + 1
10 / 3
3 = 1 / 2
4
1 1 1 1
1 1 2
1 2 1
1 3
2 1 1
2 2
3 1
4
1 1 1 1 1
1 1 1 2
1 1 2 1
1 1 3
1 2 1 1
1 2 2
1 3 1
2 1 1 1
2 1 2
2 2 1
2 3
3 1 1
3 2
p = nombre de possibilité
5 = p(4) + p(3) + p(2)
4 = p(3) + p(2) + p(1)
3 = p(2) + p(1) + p(0)
p(n) = p(n - 1) + p(n - 2) + p(n - 3)
p(0) = 1
p(1) = 1
p(2) = 2
p(3) = 4
def p(n):
if n < 0:
return 0
if n == 0:
return 1
return p(n - 1) + p(n - 2) + p(n - 3)
| StarcoderdataPython |
4948819 | <reponame>nishaarya/LondonCrimes<filename>build_week_2_london_crimes_nisha_arya.py
# -*- coding: utf-8 -*-
"""Build Week 2 - London Crimes - <NAME>
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1z6aphy51MJe2yn47nWOL-SIv3Ou4O00V
"""
url = https://www.kaggle.com/jboysen/london-crime/download
import pandas as pd
df = pd.read_csv('london_crime_by_lsoa.csv')
df.head()
"""The dataset I have chosen is based on the freequency and the type of crimes that have occured between January 2008 to December 2016. It looks at the different boroughs in London and if the crime committed it considered 'major' or minor. It gives more information on the year and month that the crime occured, with the column 'value' telling us how many times it occured within that specific month. Looking at the data, I can start to brainstorm and explore if there are seasonal or time-of-week/day changes in crime occurrences? Or if there are particular crimes that mainly occur in a particular brorough and if these increase or decrease in a particular month."""
df.shape
#the number of rows and columns
df.tail()
"""Overall, my dataset is quite large, containing 13 million rows. It is a good dataset as I have a wide range of data to use, compare and analyse but I know it will have an impact on the accuracy of my data and it will be time consuming. With that, I have then decided to use a specific year, which is 2011. I chose this year due to the fact that I knew that the 2011 London Riots occured and I wanted to see if this had an effect on the frequency of crimes that occured during the year."""
from datetime import date
df2 = df[(df['year'] == 2011)]
df2.head()
df.isnull().sum()
# Unique Values - the amount of tims the crimes happen per month
import numpy as np
np.unique(df2["value"])
df2.describe()
# mean of value x count of value = baseline prediction
# baseline prediction is the minimum
# on average, there is 0.47 times a crime occurs
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
def function_name(item):
if item == 0:
return 0
elif item > 0:
return 1
df2['crime_occured'] = df2['value'].apply(function_name)
"""Baseline - create predictions for a dataset
A baseline is a method that uses simple summary statistics which is creates predictions for a dataset. Baselines are used to measure accuracy and used as an indication to compare back to once you further analyse your data and do more accuracy testing.
"""
df3 = df2.drop(columns=['value'])
df3.head()
Total = df2['value'].sum()
print (Total)
"""The below interactive bar plot is showing me the different crimes that occured in the year 2011. The bar plot allows me to see which borough in London had the most crimes in the year, giving me the interaction of seeing which crimes occured (major or minor). Looking at this, I would like to analyse the column 'month' and see how it differs in the type of crime that occured."""
import plotly.express as px
data = px.data.gapminder()
df3_major = df3['crime_occured']
fig = px.bar(df2, x='borough', y='value',
hover_data=['major_category', 'minor_category'], color='crime_occured',
labels={'Month':'Sum of Crimes Occured'}, height=400)
#fig.set_title("The frequency and type of crime occured in different boroughs in London")
fig.show()
import plotly.express as px
data = px.data.gapminder()
df3_major = df3['crime_occured']
fig = px.bar(df3, x='borough', y='crime_occured',
hover_data=['major_category', 'minor_category'], color='crime_occured',
labels={'Month':'Sum of Crimes Occured'}, height=400)
#fig.set_title("The frequency and type of crime occured in different boroughs in London")
fig.show()
# Scatter plot for minor_category
fig = px.scatter_3d(df2, x='month', y='value', z='minor_category',
color='borough')
fig.show()
# Scatter plot for major_category
fig = px.scatter_3d(df2, x='month', y='value', z='major_category',
color='borough')
fig.show()
df2.value.value_counts().groupby(df3['crime_occured'])
"""I found my baseline by using value_counts and it stands at 74%. My target is 'crime_occured' which is a type of binary data which represents if a crime occured (1) or did not occur (0) in that given month. So the baseline is telling me that 74% of my data has no correlation between the time of the month and the frequency of crime committed."""
df3['crime_occured'].value_counts(normalize=True)
df3.crime_occured.value_counts().plot.bar()
df3.head()
# 2.Choose what data to hold out for your test set
#The training set contains a known output and the model learns on this data
#test data is used to evaluate its accuracy
from sklearn.model_selection import train_test_split
train, test = train_test_split(df3, train_size=0.80, test_size=0.20, random_state=2)
train.shape
test.shape
train, val = train_test_split(train, train_size=0.80, test_size=0.20, random_state=42)
train.shape
val.shape
"""I am now going to focus on my target and features which will allow me to chose an evaluation metric and compare the different accuracy scores that I get."""
target = 'crime_occured'
features = ['lsoa_code' , 'borough' , 'major_category' , 'minor_category' , 'year' , 'month']
X_train = train[features]
X_val = val[features]
X_test = test[features]
y_train = train[target]
y_val = val[target]
y_test = test[target]
X_train.shape
X_val.shape
X_test.shape
"""# Objective 2: Define a regression or classification problem, choose an appropriate evaluation metric and begin with baselines"""
!pip install category_encoders
import category_encoders as ce
import pandas as pd
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.tree import DecisionTreeClassifier
from sklearn.impute import SimpleImputer
from sklearn.ensemble import RandomForestClassifier
from scipy.stats import randint, uniform
from sklearn.model_selection import RandomizedSearchCV
"""Test Accuracy"""
X_test
X_train.head()
y_train.head()
# Classification problem
# Evaluation metric - accuracy score
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='median'),
RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1)
)
# Fit on train
pipeline.fit(X_train, y_train)
print('Test Accuracy', pipeline.score(X_test, y_test))
# This test accuracy is same as my baseline
"""# Objective 3: Student fits and evaluates any linear model for regression or classification"""
import category_encoders as ce
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
preprocessing= make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='median'),
StandardScaler()
)
X_train_transform = preprocessing.fit_transform(X_train)
X_test_transform = preprocessing.transform(X_test)
X_val_transform = preprocessing.fit_transform(X_val)
model = RandomForestClassifier(bootstrap=True, class_weight=None,
criterion='gini', max_depth=100,
max_features='auto',
max_leaf_nodes=None,
min_impurity_decrease=0.0,
min_impurity_split=None,
min_samples_leaf=10,
min_samples_split=2,
min_weight_fraction_leaf=0.0,
n_estimators=106, n_jobs=-1,
oob_score=False, random_state=7,
verbose=0, warm_start=False)
model.fit(X_train_transform, y_train)
!pip install eli5
import eli5
from eli5.sklearn import PermutationImportance
permuter = PermutationImportance(
model,
scoring='accuracy',
n_iter=2,
random_state=42
)
permuter.fit(X_val_transform, y_val)
new_variable = X_val.columns.tolist()
pd.Series(permuter.feature_importances_, new_variable).sort_values(ascending=False)
eli5.show_weights(
permuter,
top=None, # show permutation importances for all features
feature_names=new_variable)
# minor_category holds the most weight on its influence on mypredictions
plt.figure(figsize=(8,8))
rf = pipeline.named_steps['randomforestclassifier']
importances = pd.Series(rf.feature_importances_, X_train.columns)
importances.sort_values().plot.barh(color='grey');
"""1) Train/Test/Val Accuracy"""
from sklearn.metrics import accuracy_score
# Fit on train set
model.fit(X_train_transform, y_train)
# Get train accuracy
y_pred = model.predict(X_train_transform)
print('Train Accuracy', accuracy_score(y_train, y_pred))
# Get test accuracy
y_pred = model.predict(X_test_transform)
print('Test Accuracy', accuracy_score(y_test, y_pred))
# Get validation accuracy
y_pred = model.predict(X_val_transform)
print('Validation Accuracy', accuracy_score(y_val, y_pred))
"""2) Train/Test/Val - Using Logistic Regression"""
#Logistic regression - binary
# train accuracy
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression(solver='lbfgs')
log_reg.fit(X_train_transform, y_train)
print('Train Accuracy', log_reg.score(X_train_transform, y_train))
log_reg = LogisticRegression(solver='lbfgs')
log_reg.fit(X_test_transform, y_test)
print('Test Accuracy', log_reg.score(X_test_transform, y_test))
log_reg = LogisticRegression(solver='lbfgs')
log_reg.fit(X_val_transform, y_val)
print('Validation Accuracy', log_reg.score(X_val_transform, y_val))
#solver is a hyperparameter - that looks into the data, coefficeent etc
#74.536% of the time were accurate, whilst our baseline was 74.583%
"""# Objective 4: Student fits and evaluates a decision tree, random forest, or gradient boosting model for regression or classification
3) Train/Test/Val - Using Gradient Boosting Model
"""
from xgboost import XGBClassifier
pipeline = make_pipeline(
ce.OrdinalEncoder(),
XGBClassifier(n_estimators=118, random_state=42, n_jobs=-1, max_depth = 5)
)
pipeline.fit(X_train, y_train)
from sklearn.metrics import accuracy_score
y_pred = pipeline.predict(X_train)
print('Train Accuracy', accuracy_score(y_train, y_pred))
y_pred = pipeline.predict(X_test)
print('Test Accuracy', accuracy_score(y_test, y_pred))
y_pred = pipeline.predict(X_val)
print('Validation Accuracy', accuracy_score(y_val, y_pred))
"""Feature Importance"""
# Just an example to test out
from sklearn.impute import SimpleImputer
#drop-column year
column = 'year'
# Fit without column
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='median'),
RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1)
)
pipeline.fit(X_train.drop(columns=column), y_train)
score_without = pipeline.score(X_test.drop(columns=column), y_test)
print(f'Validation Accuracy without {column}: {score_without}')
# Fit with column
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='median'),
RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1)
)
pipeline.fit(X_train, y_train)
score_with = pipeline.score(X_test, y_test)
print(f'Validation Accuracy with {column}: {score_with}')
# Compare the error with & without column
print(f'Drop-Column Importance for {column}: {score_with - score_without}')
"""# Objective 6: Student writes 300+ words (not including code).
# Student reports baseline score, validation scores from 2+ models, and test score from 1 selected mode
The dataset I have chosen is based on the freequency and the type of crimes that have occured between January 2008 to December 2016. It looks at the different boroughs in London and if the crime committed it considered 'major' or minor. It gives more information on the year and month that the crime occured, with the column 'value' telling us how many times it occured within that specific month. Looking at the data, I can start to brainstorm and explore if there are seasonal or time-of-week/day changes in crime occurrences? Or if there are particular crimes that mainly occur in a particular brorough and if these increase or decrease in a particular month.
Overall, my dataset is quite large, containing 13 million rows. It is a good dataset as I have a wide range of data to use, compare and analyse but I know it will have an impact on the accuracy of my data and it will be time consuming. With that, I have then decided to use a specific year, which is 2011. I chose this year due to the fact that I knew that the 2011 London Riots occured and I wanted to see if this had an effect on the frequency of crimes that occured during the year.
A baseline is a method that uses simple summary statistics which is creates predictions for a dataset. Baselines are used to measure accuracy and used as an indication to compare back to once you further analyse your data and do more accuracy testing. I kicked off with getting my baseline, which is the starting point to creating predictions for my data set. I got my baseline by using value_counts (number of occurrence of an element in a list) and it stands at 74%. My target is 'crime_occured' which is a type of binary data which represents if a crime occurred (1) or did not occur (0) in that given month. Binary data only has 2 outcomes, yes or no, truth or false. Looking at the value of my baseline, it is telling me that 74% of my data has no correlation between the time of the month and the frequency of crime committed.
I initially started off with my Test Accuracy, using classification accuracy score. Accuracy score is a type of evaluation metric which looks at the number of correct predictions over the total number of predictions. My test accuracy is 0.73%, which is 1% lower than my baseline. This indicates to me that I may need to use other machine learning algorithms to try to beat my baseline of 74%.
I then moved onto the Random Forest Classifier which is considered as a highly accurate and robust method because of the number of decision trees (predictions)it outputs. It takes the average of all the predictions, cancelling out the biases, whilst handling missing values and being able to get the feature importance, which helps in selecting the most contributing features. I then used Eli5 which is a package used in Data Science which helps to debug machine learning classifiers and explain their predictions. This tells me that lsoa_code ( Lower Super Output Area code), month and borough hold the most weight on its influence on my predictions. This supports my research question that the time of the year and borough does have an affect on the frequency of crimes that occur. I also looked into the feature importance, which allowed me to explore which features had any significance with my research question.
# Objective 7:Student makes 2+ visualizations to explain their model
"""
!pip install shap
import shap
shap.initjs()
enc = ce.OrdinalEncoder()
enc.fit(X_train)
processed_X_train = enc.transform(X_train)
X_train_clean = processed_X_train.fillna(method='ffill')
model = RandomForestClassifier(n_estimators = 200, random_state = 6)
model.fit(X_train_clean, y_train)
row = X_train_clean.iloc[[0]]
explainerModel = shap.TreeExplainer(model)
shap_values_Model = explainerModel.shap_values(row)
shap.force_plot(base_value = explainerModel.expected_value[0]
,shap_values = explainerModel.shap_values(row)[0]
,features = row.iloc[[0]]
,link = 'logit'
)
# 0.71 is our accuracy
| StarcoderdataPython |
6680274 | <gh_stars>0
import dns.resolver
SUPPORTED_RECORDS = ['A', 'MX', 'NS', 'CERT', 'TXT']
class DNSRecord(object):
def __init__(self):
# Model variables
self._a = None
self._mx = None
self._ns = None
self._cert = None
self._txt = None
self._registered_at = None
def resolve_domain(self, target: str):
for record in SUPPORTED_RECORDS:
try:
answer = dns.resolver.resolve(target, record)
setattr(self, f"_{record.lower()}", [str(val) for val in answer])
except Exception as Ex:
print(f"Target {target}")
print(Ex)
def __str__(self):
return f"DNSRecord for IP {self._a}"
| StarcoderdataPython |
6670486 | from .propagators import Conditional, Cascade, PointMutation, MateUniform, SelectBest, SelectUniform, InitUniform, IntervalMutationNormal
def get_default_propagator(pop_size, limits, mate_prob, mut_prob, random_prob, sigma_factor=0.05):
propagator = Cascade(
[
SelectBest(pop_size),
SelectUniform(2),
MateUniform(mate_prob),
PointMutation(limits, probability=mut_prob),
IntervalMutationNormal(limits, sigma_factor=sigma_factor, probability=1),
InitUniform(limits, parents=1, probability=random_prob), # TODO this should be put in a "forked" propagator?
]
)
init = InitUniform(limits)
propagator = Conditional(pop_size, propagator, init)
return propagator
| StarcoderdataPython |
296595 | # ======================== Imports ======================== #
import argparse
import os
import warnings
from typing import Any, Callable, Dict, List
import higher
import torch
import torch.nn as nn
from numpy import number
from tqdm import tqdm
import wandb
from dataloader import ECGDataSetWrapper
from engine.helpers import (
do_ft_head,
do_pretrain,
eval_student,
inner_loop_finetune,
update_lossdict,
)
from engine.utils import model_saver
from hyperparam_utils import gather_flat_grad, hyper_step, zero_hypergrad
from loss import NTXentLoss
from nets.resnet import ecg_simclr_resnet18, ecg_simclr_resnet34
from nets.temporal_warp import RandWarpAugLearnExMag
from nets.wrappers import MultiTaskHead
from utils import set_seed
# Ignore certain warnings for aesthetic output
warnings.filterwarnings("ignore", category=UserWarning)
parser = argparse.ArgumentParser(description="ECG SIMCLR IFT")
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--pretrain_lr", type=float, default=1e-4)
parser.add_argument("--finetune_lr", type=float, default=1e-4)
parser.add_argument("--hyper_lr", type=float, default=1e-4)
parser.add_argument("--epochs", default=50, type=int)
parser.add_argument("--ex", default=500, type=int)
parser.add_argument("--warmup_epochs", type=int, default=1)
parser.add_argument("--pretrain_steps", type=int, default=10)
parser.add_argument("--finetune_steps", type=int, default=1)
parser.add_argument("--studentarch", type=str, default="resnet18")
parser.add_argument("--teacherarch", type=str, default="warpexmag")
parser.add_argument("--dataset", type=str, default="ecg")
parser.add_argument("--neumann", type=int, default=1)
parser.add_argument("--batch_size", type=int, default=256)
parser.add_argument("--savefol", type=str, default="checkpoints")
parser.add_argument("--save", action="store_false")
parser.add_argument("--no_probs", action="store_true")
parser.add_argument("--temperature", type=float, default=0.5)
parser.add_argument("--checkpoint", type=str)
parser.add_argument("--teach_checkpoint", type=str)
args = parser.parse_args()
# Create a directory to save model checkpoints
os.makedirs(args.savefol, exist_ok=True)
set_seed(args.seed)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Initialize Normalized Temperature-Scaled Cross-Entropy Loss
nt_xent_criterion = NTXentLoss(
device, args.batch_size, args.temperature, use_cosine_similarity=True
)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def train(args):
pt_meter = AverageMeter()
ft_loss_meter = AverageMeter()
ft_acc_meter = AverageMeter()
# Create Save Path
if args.save:
save_path: str = args.savefol
if args.teacherarch == "warpexmag":
# Create the teacher architecture
teacher: nn.Module = RandWarpAugLearnExMag(inshape=[1024]).to(device)
# Initialize Hyperparamters, Hyperparamter Optimizer and Hyperparameter Scheduler
hyp_params: List = list(teacher.parameters())
hyp_optim: torch.optim.Optimizer = torch.optim.Adam(
[
{"params": teacher.net.parameters(), "lr": args.hyper_lr},
{"params": teacher.flow_mag_layer.parameters(), "lr": args.hyper_lr},
{"params": [teacher.flow_mag], "lr": 1},
]
)
hyp_scheduler = None
else:
args.teacherarch = None
teacher = None
hyp_params = None
hyp_optim = None
hyp_scheduler = None
# Instantiate Datasets and Dataloader
DSHandle: Callable = ECGDataSetWrapper(args.batch_size)
pretrain_dl, train_dl, val_dl, test_dl, _, NUM_TASKS_FT = DSHandle.get_data_loaders(
args
)
# Initialize Student and Head
if args.studentarch == "resnet18":
student: nn.Module = ecg_simclr_resnet18().to(device)
elif args.studentarch == "resnet34":
student: nn.Module = ecg_simclr_resnet34().to(device)
# Good Error Handling
else:
raise NotImplementedError
head: nn.Module = MultiTaskHead(256, NUM_TASKS_FT).to(device)
# Initialize Optimizer's and Schedule
pretrain_optim: torch.optim.Optimizer = torch.optim.Adam(
student.parameters(), lr=args.pretrain_lr
)
pretrain_scheduler: Any = torch.optim.lr_scheduler.CosineAnnealingLR(
pretrain_optim, T_max=args.epochs, eta_min=0, last_epoch=-1
)
finetune_optim: torch.optim.Optimizer = torch.optim.Adam(
head.parameters(), lr=args.finetune_lr
)
# Manage Checkpointing
if args.checkpoint:
ckpt: Any = torch.load(args.checkpoint)
student.load_state_dict(ckpt["student_sd"])
if teacher is not None and ckpt["teacher_sd"] is not None:
teacher.load_state_dict(ckpt["teacher_sd"])
head.load_state_dict(ckpt["head_sd"])
pretrain_optim.load_state_dict(ckpt["pt_opt_state_dict"])
pretrain_scheduler.load_state_dict(ckpt["pt_sched_state_dict"])
finetune_optim.load_state_dict(ckpt["ft_opt_state_dict"])
if teacher is not None and ckpt["hyp_opt_state_dict"] is not None:
hyp_optim.load_state_dict(ckpt["hyp_opt_state_dict"])
load_ep = int(os.path.split(args.checkpoint)[-1][16:-3]) + 1
print(f"Restored from epoch {load_ep}")
else:
print("Training from scratch")
load_ep: int = 0
if args.teach_checkpoint:
print("LOADING PT AUG MODEL")
ckpt: Any = torch.load(args.teach_checkpoint)
teacher.load_state_dict(ckpt["aug_sd"])
print("LOAD SUCCESSFUL")
# Initialize Loss Dictionaries
stud_pretrain_ld: Dict = {"loss": [], "acc": []}
stud_finetune_train_ld: Dict = {"loss": [], "acc": []}
stud_finetune_val_ld: Dict = {"loss": [], "acc": []}
stud_finetune_test_ld: Dict = {}
num_finetune_steps: int = args.finetune_steps
num_neumann_steps: int = args.neumann
# Start Training
steps = 0
print("Starting Training")
for n in range(load_ep, args.epochs):
# Create a Progress Bar for better visualization of training
progress_bar: Any = tqdm(pretrain_dl)
for _, (xis, xjs) in enumerate(progress_bar):
# Customize Progress Bar
progress_bar.set_description("Epoch " + str(n))
# Zero out hyperparameters of teacher (if needed)
if teacher is not None:
zero_hypergrad(hyp_params)
# Perform pretraining (if needed)
if n < args.warmup_epochs or teacher is None:
# Get PreTraining loss
pt_loss: number = do_pretrain(
student,
teacher,
pretrain_optim,
nt_xent_criterion,
xis,
xjs,
device,
)
# Sync Metrics to Weights and Biases 🔥
wandb.log({"PreTraining Loss": pt_loss})
# Update the PreTraining Meter
pt_meter.update(pt_loss)
# Get FineTuning Metrics (if neede)
if teacher is not None:
ft_train_loss, ft_train_acc = do_ft_head(
student, head, finetune_optim, train_dl, device
)
else:
ft_train_loss, ft_train_acc = 0, 0
# Sync Metrics to Weights and Biases 🔥
wandb.log(
{
"FineTuning Training Loss": ft_train_loss,
}
)
# Update FineTuning Meter
ft_loss_meter.update(ft_train_loss)
ft_acc_meter.update(ft_train_acc)
ft_val_loss, ft_val_acc, hypg = 0, 0, 0
else:
# Get PreTraining loss
pt_loss = do_pretrain(
student,
teacher,
pretrain_optim,
nt_xent_criterion,
xis,
xjs,
device,
)
# Sync Metrics to Weights and Biases 🔥
wandb.log({"PreTraining Loss": pt_loss})
# Update PreTraining Meter
pt_meter.update(pt_loss)
if steps % args.pretrain_steps == 0:
with higher.innerloop_ctx(
head, finetune_optim, copy_initial_weights=True
) as (fnet, diffopt):
(
(ft_train_loss, ft_train_acc),
(ft_val_loss, ft_val_acc),
ft_grad,
fnet,
) = inner_loop_finetune(
student,
fnet,
teacher,
diffopt,
train_dl,
val_dl,
num_finetune_steps,
device,
)
head.load_state_dict(fnet.state_dict())
# Update FineTuning Metrics
ft_loss_meter.update(ft_train_loss)
ft_acc_meter.update(ft_train_acc)
# Sync Metrics to Weights and Biases 🔥
wandb.log(
{
"FineTuning Training Loss": ft_train_loss,
}
)
# Get FineTuning Gradient
ft_grad: torch.Tensor = gather_flat_grad(ft_grad)
for param_group in pretrain_optim.param_groups:
cur_lr = param_group["lr"]
break
# Take a "Hyper" Step
hypg = hyper_step(
student,
head,
teacher,
hyp_params,
pretrain_dl,
nt_xent_criterion,
pretrain_optim,
ft_grad,
cur_lr,
num_neumann_steps,
device,
)
hypg = hypg.norm().item()
hyp_optim.step()
else:
# Pass through the FineTuning Head
ft_train_loss, ft_train_acc = do_ft_head(
student, head, finetune_optim, train_dl, device
)
# Sync Metrics to Weights and Biases 🔥
wandb.log(
{
"FineTuning Training Loss": ft_train_loss,
}
)
# Update FineTuning Metrics
ft_loss_meter.update(ft_train_loss)
ft_acc_meter.update(ft_train_acc)
ft_val_loss, ft_val_acc, hypg = 0, 0, 0
steps += 1
progress_bar.set_postfix(
pretrain_loss="%.4f" % pt_meter.avg,
finetune_train_loss="%.4f" % ft_loss_meter.avg,
finetune_train_acc="%.4f" % ft_acc_meter.avg,
)
# Update Loss Dictionaries
stud_pretrain_ld["loss"].append(pt_loss)
stud_finetune_train_ld["loss"].append(ft_train_loss)
stud_finetune_train_ld["acc"].append(ft_train_acc)
stud_finetune_val_ld["loss"].append(ft_val_loss)
stud_finetune_val_ld["acc"].append(ft_val_acc)
# Evaluate Student
if teacher is not None:
ft_test_ld = eval_student(student, head, test_dl, device, split="Test")
stud_finetune_test_ld = update_lossdict(stud_finetune_test_ld, ft_test_ld)
ft_val_ld = eval_student(student, head, val_dl, device, split="Validation")
stud_finetune_val_ld = update_lossdict(stud_finetune_val_ld, ft_val_ld)
ft_train_ld = eval_student(student, head, train_dl, device, split="Train")
stud_finetune_train_ld = update_lossdict(
stud_finetune_train_ld, ft_train_ld
)
if hyp_scheduler is not None:
hyp_scheduler.step()
# Reset the Meters
pt_meter.reset()
ft_loss_meter.reset()
ft_acc_meter.reset()
# Save the Logs
if args.save:
tosave = {
"pretrain_ld": stud_pretrain_ld,
"finetune_train_ld": stud_finetune_train_ld,
"finetune_val_ld": stud_finetune_val_ld,
"finetune_test_ld": stud_finetune_test_ld,
}
torch.save(tosave, os.path.join(save_path, "logs.ckpt"))
if n == args.epochs - 1:
model_saver(
n,
student,
head,
teacher,
pretrain_optim,
pretrain_scheduler,
finetune_optim,
hyp_optim,
save_path,
)
print(f"Saved model at epoch {n}")
trained_model_artifact = wandb.Artifact(
"{}-{}-{}-{}".format(
args.seed, args.warmup_epochs, args.epochs, args.ex
),
type="{}".format(args.studentarch),
description="A {} trained on the PTB-XL ECG dataset using SimCLR using Meta-Parameterized Pre-Training for {} warmup epochs, {} Meta FT examples with random seed {}".format(
args.studentarch, args.warmup_epochs, args.ex, args.seed
),
metadata=vars(args),
)
trained_model_artifact.add_dir(args.savefol)
wandb.run.log_artifact(trained_model_artifact)
return (
student,
head,
teacher,
pretrain_optim,
pretrain_scheduler,
finetune_optim,
hyp_optim,
)
if __name__ == "__main__":
wandb.init(
project="meta-parameterized-pre-training",
name="{}-{}-{}-{}".format(args.seed, args.warmup_epochs, args.epochs, args.ex),
entity="sauravmaheshkar",
job_type="train",
config=vars(args),
)
train(args)
wandb.run.finish() # type: ignore
| StarcoderdataPython |
3575878 | <gh_stars>0
"""Holds custom exceptions."""
class CommandCancel(Exception):
pass
| StarcoderdataPython |
295715 | import typing
import flask
import werkzeug.datastructures
from nasse import config, exceptions, models, utils
_overwritten = {"nasse", "app", "nasse_endpoint",
"client_ip", "method", "headers", "values", "args", "form", "params", "cookies"}
class Request(object):
def __init__(self, app, endpoint: models.Endpoint, dynamics: dict = {}) -> None:
"""
A request object looking like the flask.Request one, but with the current endpoint in it and verification
Example
--------
>>> from nasse import request
# and then when processing the request #
>>> request.nasse_endpoint
Endpoint(path='/hello')
>>> request.nasse_endpoint.name
'Greeting Endpoint'
Parameters
----------
endpoint: Nasse.models.Endpoint
The request's endpoint
"""
if not isinstance(endpoint, models.Endpoint):
raise exceptions.request.MissingEndpoint(
"The current request doesn't have any Nasse endpoint")
self.nasse = app
self.app = self.nasse
self.nasse_endpoint = endpoint
self.client_ip = utils.ip.get_ip()
self.method = flask.request.method.upper()
# sanitize
if config.General.SANITIZE_USER_SENT:
self.values = werkzeug.datastructures.MultiDict((key, utils.sanitize.sanitize_text(value))
for key, value in flask.request.values.items(multi=True))
#values.append((key, value.replace("<", "<").replace(">", ">")))
else:
self.values = werkzeug.datastructures.MultiDict(
flask.request.values.items(multi=True))
self.params = self.values
if config.General.SANITIZE_USER_SENT:
self.args = werkzeug.datastructures.MultiDict((key, utils.sanitize.sanitize_text(value))
for key, value in flask.request.args.items(multi=True))
else:
self.args = werkzeug.datastructures.MultiDict(
flask.request.args.items(multi=True))
if config.General.SANITIZE_USER_SENT:
self.form = werkzeug.datastructures.MultiDict((key, utils.sanitize.sanitize_text(value))
for key, value in flask.request.form.items(multi=True))
else:
self.form = werkzeug.datastructures.MultiDict(
flask.request.form.items(multi=True))
if config.General.SANITIZE_USER_SENT:
self.dynamics = werkzeug.datastructures.MultiDict((key, utils.sanitize.sanitize_text(value))
for key, value in dynamics.items())
else:
self.dynamics = werkzeug.datastructures.MultiDict(dynamics.items())
self.headers = werkzeug.datastructures.MultiDict(flask.request.headers)
self.cookies = werkzeug.datastructures.MultiDict(flask.request.cookies)
# verify if missing
for attr, exception, current_values in [("params", exceptions.request.MissingParam, self.values), ("headers", exceptions.request.MissingHeader, self.headers), ("cookies", exceptions.request.MissingCookie, self.cookies), ("dynamics", exceptions.request.MissingDynamic, self.dynamics)]:
for value in self.nasse_endpoint[attr]:
if value.name not in current_values:
if value.required and (value.all_methods or self.method in value.methods):
raise exception(name=value.name)
else:
if value.type is not None:
results = []
for key, val in current_values.items(multi=True):
if key == value.name:
results.append(value.type(val))
current_values.setlist(value.name, results)
def __setattr__(self, name: str, value: typing.Any) -> None:
if name in _overwritten:
return super().__setattr__(name, value)
return flask.request.__setattr__(name, value)
def __getattribute__(self, name: str) -> typing.Any:
if name in _overwritten:
return super().__getattribute__(name)
return flask.request._get_current_object().__getattribute__(name)
| StarcoderdataPython |
3239471 | import os.path as p
def parse_username_and_password_file(path):
with open(p.abspath(p.expanduser(path)), 'rb') as f:
up = f.readlines()
return tuple([l.strip() for l in up][:2])
| StarcoderdataPython |
9671557 | from kivy.vector import Vector
from .decorator import CallResult
class Movement:
def __init__(self, grid):
self.grid = grid
self.size = Vector(len(self.grid), len(self.grid[0]))
@CallResult
def down(self, pos: Vector):
x = pos.x + 1
while x < self.size.x:
tile = self.grid[x][pos.y]
if tile.selectable:
return tile
x += 1
@CallResult
def up(self, pos):
x = pos.x - 1
while x >= 0:
tile = self.grid[x][pos.y]
if tile.selectable:
return tile
x -= 1
@CallResult
def left(self, pos):
y = pos.y - 1
while y >= 0:
tile = self.grid[pos.x][y]
if tile.selectable:
return tile
y -= 1
@CallResult
def right(self, pos):
y = pos.y + 1
while y < self.size.y:
tile = self.grid[pos.x][y]
if tile.selectable:
return tile
y += 1
| StarcoderdataPython |
4984631 | <filename>src/django_grainy/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-30 14:40
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_grainy.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
("auth", "0008_alter_user_username_max_length"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="GroupPermission",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"namespace",
models.CharField(
help_text="Permission namespace (A '.' delimited list of keys",
max_length=255,
),
),
("permission", django_grainy.fields.PermissionField(default=1)),
(
"group",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="grainy_permissions",
to="auth.Group",
),
),
],
options={
"verbose_name": "Group Permission",
"verbose_name_plural": "Group Permissions",
"base_manager_name": "objects",
},
),
migrations.CreateModel(
name="UserPermission",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"namespace",
models.CharField(
help_text="Permission namespace (A '.' delimited list of keys",
max_length=255,
),
),
("permission", django_grainy.fields.PermissionField(default=1)),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="grainy_permissions",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"verbose_name": "User Permission",
"verbose_name_plural": "User Permissions",
"base_manager_name": "objects",
},
),
]
| StarcoderdataPython |
318879 | <reponame>frikol3000/Amazon_Headset_Scraper<gh_stars>0
BOT_NAME = 'AmazonHeadSetScraping'
SPIDER_MODULES = ['AmazonHeadSetScraping.spiders']
NEWSPIDER_MODULE = 'AmazonHeadSetScraping.spiders'
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0'
DOWNLOAD_DELAY = 3
DOWNLOAD_TIMEOUT = 30
RANDOMIZE_DOWNLOAD_DELAY = True
REACTOR_THREADPOOL_MAXSIZE = 8
CONCURRENT_REQUESTS = 16
CONCURRENT_REQUESTS_PER_DOMAIN = 16
CONCURRENT_REQUESTS_PER_IP = 16
AUTOTHROTTLE_ENABLED = True
AUTOTHROTTLE_START_DELAY = 1
AUTOTHROTTLE_MAX_DELAY = 3
AUTOTHROTTLE_TARGET_CONCURRENCY = 8
AUTOTHROTTLE_DEBUG = True
RETRY_ENABLED = True
RETRY_TIMES = 3
RETRY_HTTP_CODES = [500, 502, 503, 504, 400, 401, 403, 404, 405, 406, 407, 408, 409, 410, 429]
DOWNLOADER_MIDDLEWARES = {
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
'scrapy.spidermiddlewares.referer.RefererMiddleware': 80,
'scrapy.downloadermiddlewares.retry.RetryMiddleware': 90,
'scrapy_fake_useragent.middleware.RandomUserAgentMiddleware': 120,
'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': 130,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': 900,
} | StarcoderdataPython |
366484 | """Allow for easily adding segments to lines."""
from functools import singledispatch
from typing import Optional
from cairo import ANTIALIAS_NONE
from gi.repository import Gtk
from gaphas.aspect import MoveType
from gaphas.aspect.handlemove import HandleMove, ItemHandleMove, item_at_point
from gaphas.connector import Handle, LinePort
from gaphas.geometry import distance_line_point, distance_point_point_fast
from gaphas.item import Item, Line, matrix_i2i
from gaphas.solver import WEAK
from gaphas.view import GtkView, Selection
from gaphas.view.model import Model
@singledispatch
class Segment:
def __init__(self, item, model):
raise TypeError
def split_segment(self, segment, count=2):
...
def split(self, pos):
...
def merge_segment(self, segment, count=2):
...
@Segment.register(Line) # type: ignore
class LineSegment:
def __init__(self, item: Line, model: Model):
self.item = item
self.model = model
def split(self, pos):
item = self.item
handles = item.handles()
x, y = item.matrix_i2c.inverse().transform_point(*pos)
for h1, h2 in zip(handles, handles[1:]):
xp = (h1.pos.x + h2.pos.x) / 2
yp = (h1.pos.y + h2.pos.y) / 2
if distance_point_point_fast((x, y), (xp, yp)) <= 4:
segment = handles.index(h1)
handles, ports = self.split_segment(segment)
return handles and handles[0]
def split_segment(self, segment, count=2):
"""Split one item segment into ``count`` equal pieces.
def split_segment(self, segment, count=2):
Two lists are returned
- list of created handles
- list of created ports
:Parameters:
segment
Segment number to split (starting from zero).
count
Amount of new segments to be created (minimum 2).
"""
item = self.item
if segment < 0 or segment >= len(item.ports()):
raise ValueError("Incorrect segment")
if count < 2:
raise ValueError("Incorrect count of segments")
def do_split(segment, count):
handles = item.handles()
p0 = handles[segment].pos
p1 = handles[segment + 1].pos
dx, dy = p1.x - p0.x, p1.y - p0.y
new_h = Handle((p0.x + dx / count, p0.y + dy / count), strength=WEAK)
item.insert_handle(segment + 1, new_h)
p0 = LinePort(p0, new_h.pos)
p1 = LinePort(new_h.pos, p1)
item.remove_port(item.ports()[segment])
item.insert_port(segment, p0)
item.insert_port(segment + 1, p1)
if count > 2:
do_split(segment + 1, count - 1)
do_split(segment, count)
# force orthogonal constraints to be recreated
item.update_orthogonal_constraints(item.orthogonal)
self.recreate_constraints()
self.model.request_update(item)
handles = item.handles()[segment + 1 : segment + count]
ports = item.ports()[segment : segment + count - 1]
return handles, ports
def merge_segment(self, segment, count=2):
"""Merge two (or more) item segments.
Tuple of two lists is returned, list of deleted handles and
list of deleted ports.
:Parameters:
segment
Segment number to start merging from (starting from zero).
count
Amount of segments to be merged (minimum 2).
"""
item = self.item
if len(item.ports()) < 2:
raise ValueError("Cannot merge line with one segment")
if item.orthogonal and len(item.ports()) < 1 + count:
raise ValueError("Cannot merge orthogonal line to one segment")
if segment < 0 or segment >= len(item.ports()):
raise ValueError("Incorrect segment")
if count < 2 or segment + count > len(item.ports()):
raise ValueError("Incorrect count of segments")
# remove handle and ports which share position with handle
deleted_handles = item.handles()[segment + 1 : segment + count]
deleted_ports = item.ports()[segment : segment + count]
for h in deleted_handles:
item.remove_handle(h)
for p in deleted_ports:
item.remove_port(p)
# create new port, which replaces old ports destroyed due to
# deleted handle
p1 = item.handles()[segment].pos
p2 = item.handles()[segment + 1].pos
port = LinePort(p1, p2)
item.insert_port(segment, port)
# force orthogonal constraints to be recreated
item.update_orthogonal_constraints(item.orthogonal)
self.recreate_constraints()
self.model.request_update(item)
return deleted_handles, deleted_ports
def recreate_constraints(self):
"""Create connection constraints between connecting lines and an item.
:Parameters:
connected
Connected item.
"""
connected = self.item
model = self.model
def find_port(line, handle, item):
"""Glue to the closest item on the canvas.
If the item can connect, it returns a port.
"""
pos = matrix_i2i(line, item).transform_point(*handle.pos)
port = None
max_dist = 10e6
for p in item.ports():
pg, d = p.glue(pos)
if d >= max_dist:
continue
port = p
max_dist = d
return port
for cinfo in list(model.connections.get_connections(connected=connected)):
item, handle = cinfo.item, cinfo.handle
port = find_port(item, handle, connected)
constraint = port.constraint(item, handle, connected)
model.connections.reconnect_item(item, handle, port, constraint=constraint)
class SegmentState:
moving: Optional[MoveType]
def __init__(self):
self.reset()
def reset(self):
self.moving = None
def segment_tool(view):
gesture = (
Gtk.GestureDrag.new(view)
if Gtk.get_major_version() == 3
else Gtk.GestureDrag.new()
)
segment_state = SegmentState()
gesture.connect("drag-begin", on_drag_begin, segment_state)
gesture.connect("drag-update", on_drag_update, segment_state)
gesture.connect("drag-end", on_drag_end, segment_state)
return gesture
def on_drag_begin(gesture, start_x, start_y, segment_state):
view = gesture.get_widget()
pos = (start_x, start_y)
item = item_at_point(view, pos)
handle = item and maybe_split_segment(view, item, pos)
if handle:
segment_state.moving = HandleMove(item, handle, view)
gesture.set_state(Gtk.EventSequenceState.CLAIMED)
else:
gesture.set_state(Gtk.EventSequenceState.DENIED)
def on_drag_update(gesture, offset_x, offset_y, segment_state):
if segment_state.moving:
_, x, y = gesture.get_start_point()
segment_state.moving.move((x + offset_x, y + offset_y))
def on_drag_end(gesture, offset_x, offset_y, segment_state):
if segment_state.moving:
_, x, y = gesture.get_start_point()
segment_state.moving.stop_move((x + offset_x, y + offset_y))
segment_state.reset()
class LineSegmentMergeMixin:
view: GtkView
item: Item
handle: Handle
def stop_move(self, pos):
super().stop_move(pos) # type: ignore[misc]
maybe_merge_segments(self.view, self.item, self.handle)
@HandleMove.register(Line)
class LineHandleMove(LineSegmentMergeMixin, ItemHandleMove):
pass
def maybe_split_segment(view, item, pos):
item = view.selection.hovered_item
handle = None
if item is view.selection.focused_item:
try:
segment = Segment(item, view.model)
except TypeError:
pass
else:
cpos = view.matrix.inverse().transform_point(*pos)
handle = segment.split(cpos)
return handle
def maybe_merge_segments(view, item, handle):
handles = item.handles()
# don't merge using first or last handle
if handles[0] is handle or handles[-1] is handle:
return
# ensure at least three handles
handle_index = handles.index(handle)
segment = handle_index - 1
# cannot merge starting from last segment
if segment == len(item.ports()) - 1:
segment = -1
assert segment >= 0 and segment < len(item.ports()) - 1
before = handles[handle_index - 1]
after = handles[handle_index + 1]
d, p = distance_line_point(before.pos, after.pos, handle.pos)
if d > 2:
return
try:
Segment(item, view.model).merge_segment(segment)
except ValueError:
pass
else:
if handle:
view.model.request_update(item)
class LineSegmentPainter:
"""This painter draws pseudo-handles on gaphas.item.Line objects. Each line
can be split by dragging those points, which will result in a new handle.
ConnectHandleTool take care of performing the user interaction
required for this feature.
"""
def __init__(self, selection: Selection):
self.selection = selection
def paint(self, _items, cairo):
selection = self.selection
item = selection.hovered_item
if isinstance(item, Line) and item is selection.focused_item:
h = item.handles()
for h1, h2 in zip(h[:-1], h[1:]):
p1, p2 = h1.pos, h2.pos
cx = (p1.x + p2.x) / 2
cy = (p1.y + p2.y) / 2
vx, vy = cairo.user_to_device(*item.matrix_i2c.transform_point(cx, cy))
cairo.save()
cairo.set_antialias(ANTIALIAS_NONE)
cairo.identity_matrix()
cairo.translate(vx, vy)
cairo.rectangle(-3, -3, 6, 6)
cairo.set_source_rgba(0, 0.5, 0, 0.4)
cairo.fill_preserve()
cairo.set_source_rgba(0.25, 0.25, 0.25, 0.6)
cairo.set_line_width(1)
cairo.stroke()
cairo.restore()
| StarcoderdataPython |
9797408 | <filename>ovl/partials/keyword_partial.py
import functools
import warnings
from .reverse_partial import ReversePartial
def keyword_partial(target_function):
"""
A Decorator used to load other parameters to a function before applying it on a given input data
This decorator is used to load parameters to the filter function, image processing filters, morphological functions
sorters and other functions that act similarly.
Preloading a function consists of pass all arguments except the first,
which is then passed when the function is called.
Example:
For a given function:
.. code-block:: python
@keyword_partial
def area_filter(contours, min_area, max_area):
output_list = []
ratio_list = []
if type(contour_list) is not list:
contour_list = [contour_list]
for current_contour in contour_list:
if min_area <= cv2.contourArea(current_contour) <= max_area:
output_list.append(current_contour)
ratio_list.append(current_contour)
return output_list, ratio_list
Instead of calling the function like other functions:
.. code-block:: python
area_filter(list_of_contours, min_area=200, max_area=5000)
The function needs to be called as follows:
.. code-block:: python
activator = area_filter(min_area=200, max_area=5000)
final_value = activator(list_of_contours)
Vision objects use functions that are decorated with keyword_partial (contour_filter, image_filter,
you can just pass the activator to the Vision object like so:
.. code-block:: python
target_filters = [some_filter(parameter1=5, parameter2=3), ovl.circle_filter(min_area_ratio=0.75)]
vision = Vision(..., contours_filters=target_filters, ...)
:param target_function: the function to be preloaded
:return: a function (argument loader) that preloads (passes only some of the arguments)
the wrapped function (target_function)
"""
def argument_loader(*args, **kwargs):
if args != ():
warning_message = ("When passing parameters it is recommended to pass everything as keywords "
"in order to make it clear what parameters are passed."
"(Do: {0}(parameter1=value2, parameter2=value2) not {0}(value, value2))"
.format(target_function.__name__))
warnings.warn(warning_message, SyntaxWarning)
partial_function = ReversePartial(target_function, *args, **kwargs)
return functools.update_wrapper(partial_function, target_function)
wrapped_argument_loader = functools.update_wrapper(argument_loader, target_function)
return wrapped_argument_loader
| StarcoderdataPython |
1823644 | import re
BREAK_PATTERN = re.compile(r'\n')
EMOJI_PATTERN = re.compile(r'\\uf\w+')
SPACES_PATTERN = re.compile(r' +')
WEB_PATTERN = re.compile('[\w\-_\d]*.(com|net)', re.I)
NUM_PATTERN = re.compile(r'\d+')
| StarcoderdataPython |
3566549 | import os
import random
from PIL import Image
import numpy as np
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import torchvision.transforms as transforms
from data_aug.gaussian_blur import GaussianBlur
from data_aug.cutout import Cutout
from data_aug.outpainting import Outpainting
from data_aug.nonlin_trans import NonlinearTrans
from data_aug.sharpen import Sharpen
np.random.seed(0)
class USDataset_video(Dataset):
def __init__(self, data_dir, transform=None, LabelList=None, DataList=None, Checkpoint_Num=None):
"""
Ultrasound self-supervised training Dataset, choose 2 different images from a video
:param data_dir: str
:param transform: torch.transform
"""
# self.label_name = {"Rb1": 0, "Rb2": 1, "Rb3": 2, "Rb4": 3, "Rb5": 4, "F0_": 5, "F1_": 6, "F2_": 7, "F3_": 8, "F4_": 9,
# "Reg": 10, "Cov": 11, "Ali": 10, "Bli": 11, "Ple": 11, "Oth": 11} # US-4
# self.label_name = {"Rb1": 0, "Rb2": 1, "Rb3": 2, "Rb4": 3, "Rb5": 4} # CLUST
# self.label_name = {"F0_": 0, "F1_": 1, "F2_": 2, "F3_": 3, "F4_": 4} # Liver Forbrosis
self.label_name = {"Reg": 0, "Cov": 1} # Butterfly
# self.label_name = {"Ali": 0, "Bli": 1, "Ple": 2, "Oth": 3} # COVID19-LUSMS
self.data_info = self.get_img_info(data_dir)
self.transform = transform
self.LabelList = LabelList
self.DataList = DataList
self.Checkpoint_Num = Checkpoint_Num
def __getitem__(self, index):
# ## Different data rate
if index not in self.DataList:
index = random.sample(self.DataList, 1)[0] # index in data set
path_imgs = self.data_info[index]
if len(path_imgs) >= 3: # more than 3 images in one video
path_img = random.sample(path_imgs, 3) # random choose 3 images
img1 = Image.open(path_img[0]).convert('RGB') # 0~255
img2 = Image.open(path_img[1]).convert('RGB') # 0~255
img3 = Image.open(path_img[2]).convert('RGB') # 0~255
if index in self.LabelList:
# path_imgs[0]: '/home/zhangchunhui/MedicalAI/Butte/Cov-Cardiomyopathy_mp4/Cov-Cardiomyopathy_mp4_frame0.jpg'
# path_imgs[0][35:38]: 'Cov'
label1 = self.label_name[path_imgs[0][35:38]]
label2 = self.label_name[path_imgs[1][35:38]]
label3 = self.label_name[path_imgs[2][35:38]]
else:
label1, label2, label3 = 9999, 9999, 9999 # unlabel data = 9999
if self.transform is not None:
img1, img2, img3 = self.transform((img1, img2, img3)) # transform
##########################################################################
### frame mixup
# alpha, beta = 2, 5
alpha, beta = 0.5, 0.5
lam = np.random.beta(alpha, beta)
# img2 as anchor
mixupimg1 = lam * img1 + (1.0 - lam) * img2
mixupimg2 = lam * img3 + (1.0 - lam) * img2
return mixupimg1, label1, mixupimg2, label2, img1, img2
elif len(path_imgs) == 2:
path_img = random.sample(path_imgs, 2) # random choose 3 images
img1 = Image.open(path_img[0]).convert('RGB') # 0~255
img2 = Image.open(path_img[1]).convert('RGB') # 0~255
if index in self.LabelList:
label1 = self.label_name[path_imgs[0][35:38]]
label2 = self.label_name[path_imgs[1][35:38]]
else:
label1, label2 = 9999, 9999 # unlabel data = 9999
if self.transform is not None:
img1, img2 = self.transform((img1, img2)) # transform
return img1, label1, img2, label2, img1, img2
else: # one image in the video, using augmentation to obtain two positive samples
img1 = Image.open(path_imgs[0]).convert('RGB') # 0~255
img2 = Image.open(path_imgs[0]).convert('RGB') # 0~255
if index in self.LabelList:
label1 = self.label_name[path_imgs[0][35:38]]
label2 = self.label_name[path_imgs[0][35:38]]
else:
label1, label2 = 9999, 9999 # unlabel data = 9999
if self.transform is not None:
img1, img2 = self.transform((img1, img2)) # transform
return img1, label1, img2, label2, img1, img2
# if self.transform is not None:
# img1, img2 = self.transform((img1, img2)) # transform
# return img1, label1, img2, label2
def __len__(self): # len
return len(self.data_info)
@staticmethod
def get_img_info(data_dir):
data_info = list()
for root, dirs, _ in os.walk(data_dir):
for sub_dir in dirs: # one video as one class
img_names = os.listdir(os.path.join(root, sub_dir))
img_names = list(filter(lambda x: x.endswith('.jpg') or x.endswith('.png'), img_names))
path_imgs = [] # list
for i in range(len(img_names)):
img_name = img_names[i]
path_img = os.path.join(root, sub_dir, img_name)
path_imgs.append(path_img)
data_info.append(path_imgs)
return data_info
class USDataset_image(Dataset):
def __init__(self, data_dir, transform=None, LabelList=None, DataList=None):
"""
Ultrasound self-supervised training Dataset, only choose one image from a video
:param data_dir: str
:param transform: torch.transform
"""
self.data_info = self.get_img_info(data_dir)
self.transform = transform
self.LabelList = LabelList
self.DataList = DataList
def __getitem__(self, index):
path_imgs = self.data_info[index] # list
path_img = random.sample(path_imgs, 1) # random choose one image
img1 = Image.open(path_img[0]).convert('RGB') # 0~255
img2 = Image.open(path_img[0]).convert('RGB') # 0~255
label1 = 0 if path_img[0].lower()[64:].find("cov") > -1 else (1 if path_img[0].lower()[64:].find("pneu") > -1 else 2)
if self.transform is not None:
img1, img2 = self.transform((img1, img2)) # transform
return img1, label1, img2, label1
def __len__(self): # len
return len(self.data_info)
@staticmethod
def get_img_info(data_dir):
data_info = list()
for root, dirs, _ in os.walk(data_dir):
for sub_dir in dirs: # one video as one class
img_names = os.listdir(os.path.join(root, sub_dir))
img_names = list(filter(lambda x: x.endswith('.jpg') or x.endswith('.png'), img_names))
path_imgs = []
for i in range(len(img_names)):
img_name = img_names[i]
path_img = os.path.join(root, sub_dir, img_name)
path_imgs.append(path_img)
data_info.append(path_imgs)
return data_info
class DataSetWrapper(object):
def __init__(self, batch_size, LabelList, DataList, Checkpoint_Num, num_workers, valid_size, input_shape, s):
self.batch_size = batch_size
self.num_workers = num_workers
self.valid_size = valid_size # leave out ratio, e.g. 0.20
self.s = s
self.input_shape = eval(input_shape) # (H, W, C) shape of input image
self.LabelList = LabelList
self.DataList = DataList
self.Checkpoint_Num = Checkpoint_Num
def get_data_loaders(self):
''' Get dataloader for target dataset, this function will be called before the training process '''
data_augment = self._get_simclr_pipeline_transform()
print('\nData augmentation:')
print(data_augment)
use_video = True
if use_video:
print('\nUse video augmentation!')
# US-4
# train_dataset = USDataset_video("/home/zhangchunhui/WorkSpace/SSL/Ultrasound_Datasets_train/Video/",
# transform=SimCLRDataTransform(data_augment), LabelList=self.LabelList, DataList=self.DataList) # augmented from 2 images
# 1 video-CLUST
# train_dataset = USDataset_video("/home/zhangchunhui/MedicalAI/Ultrasound_Datasets_train/CLUST/",
# transform=SimCLRDataTransform(data_augment), LabelList=self.LabelList, DataList=self.DataList) # augmented from 2 images
# 1 video-Liver
# train_dataset = USDataset_video("/home/zhangchunhui/MedicalAI/Ultrasound_Datasets_train/Liver/",
# transform=SimCLRDataTransform(data_augment), LabelList=self.LabelList, DataList=self.DataList) # augmented from 2 images
# 1 video-COVID
# train_dataset = USDataset_video("/home/zhangchunhui/MedicalAI/Ultrasound_Datasets_train/COVID/",
# transform=SimCLRDataTransform(data_augment), LabelList=self.LabelList, DataList=self.DataList) # augmented from 2 images
# 1 video-Butte
train_dataset = USDataset_video("/home/zhangchunhui/MedicalAI/Butte/",
transform=SimCLRDataTransform(data_augment), LabelList=self.LabelList, DataList=self.DataList) # augmented from 2 images
else:
print('\nDo not use video augmentation!')
# Images
train_dataset = USDataset_image("/home/zhangchunhui/MedicalAI/Butte/",
transform=SimCLRDataTransform(data_augment), LabelList=self.LabelList, DataList=self.DataList) # augmented from 1 image
train_loader, valid_loader = self.get_train_validation_data_loaders(train_dataset)
# train_loader = self.get_train_validation_data_loaders(train_dataset)
return train_loader, valid_loader
# return train_loader
def __len__(self): #
return self.batch_size
def _get_simclr_pipeline_transform(self):
'''
Get a set of data augmentation transformations as described in the SimCLR paper.
Random Crop (resize to original size) + Random color distortion + Gaussian Blur
'''
color_jitter = transforms.ColorJitter(0.8 * self.s, 0.8 * self.s, 0.8 * self.s, 0.2 * self.s)
data_transforms = transforms.Compose([Sharpen(degree=0),
transforms.Resize((self.input_shape[0], self.input_shape[1])),
transforms.RandomResizedCrop(size=self.input_shape[0], scale=(0.8, 1.0), ratio=(0.8, 1.25)),
transforms.RandomHorizontalFlip(),
# transforms.RandomRotation(10),
color_jitter,
# transforms.RandomAffine(degrees=0, translate=(0.1, 0.1)),
# GaussianBlur(kernel_size=int(0.05 * self.input_shape[0])),
transforms.ToTensor(),
# NonlinearTrans(prob=0.9), # 0-1
# transforms.Normalize(mean=[0.5,0.5,0.5], std=[0.25,0.25,0.25]),
# Cutout(n_holes=3, length=32),
# Outpainting(n_holes=5),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.25, 0.25, 0.25]),
])
return data_transforms
def get_train_validation_data_loaders(self, train_dataset):
# obtain indices that will be used for training / validation
num_train = len(train_dataset)
indices = list(range(num_train))
np.random.shuffle(indices)
split = int(np.floor(self.valid_size * num_train))
train_idx, valid_idx = indices[split:], indices[:split]
train_idx= indices[split:]
# define samplers for obtaining training and validation batches
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
# data loaders for training and validation, drop_last should be False to avoid data shortage of valid_loader
train_loader = DataLoader(train_dataset, batch_size=self.batch_size, sampler=train_sampler,
num_workers=self.num_workers, drop_last=False, shuffle=False)
valid_loader = DataLoader(train_dataset, batch_size=self.batch_size, sampler=valid_sampler,
num_workers=self.num_workers, drop_last=False)
return train_loader, valid_loader
# return train_loader
class SimCLRDataTransform(object):
''' transform two images in a video to two augmented samples '''
def __init__(self, transform):
self.transform = transform
def __call__(self, sample):
if len(sample)>2:
xi = self.transform(sample[0]) # sample -> xi, xj in original implementation
xj = self.transform(sample[1])
xk = self.transform(sample[2])
return xi, xj, xk
else:
xi = self.transform(sample[0]) # sample -> xi, xj in original implementation
xj = self.transform(sample[1])
return xi, xj
| StarcoderdataPython |
287147 | # Compute the sum of perfect sqaures up to h
import math
h = int(input("Enter number:"))
q = 0
perfectSquares = []
for i in range(1, h+1):
perfectSquares.append(i)
q += int(math.pow(i, 2))
print(perfectSquares)
print(f"Sum: {q}") | StarcoderdataPython |
11305838 | import sys
if sys.argv[0].endswith("__main__.py"):
sys.argv[0] = "python -m tktable"
from . import _test as main
main() | StarcoderdataPython |
4999363 | <filename>cliapp/main_cli.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
import click
from .core.mymodule import MyClass
HERE = os.path.abspath(os.path.dirname(__file__))
CMD_LINE_EXAMPLES = """SOME EXAMPLES HERE:
$ cliapp
=> returns some nice text
"""
@click.command()
@click.argument('args', nargs=-1)
@click.option('--examples', is_flag=True, help='Show some examples')
@click.pass_context
def main_cli(ctx, args=None, examples=False):
"""Main CLI."""
if examples:
click.secho(CMD_LINE_EXAMPLES, fg="green")
return
if not args:
# print dir(search_cli)
click.echo(ctx.get_help())
return
for arg in args:
print('passed argument :: {}'.format(arg))
my_object = MyClass(arg)
my_object.say_name()
if __name__ == '__main__':
main_cli()
| StarcoderdataPython |
9784298 | <gh_stars>0
#
# @lc app=leetcode.cn id=51 lang=python3
#
# [51] N 皇后
#
# https://leetcode-cn.com/problems/n-queens/description/
#
# algorithms
# Hard (73.36%)
# Likes: 676
# Dislikes: 0
# Total Accepted: 89.5K
# Total Submissions: 122K
# Testcase Example: '4'
#
# n 皇后问题研究的是如何将 n 个皇后放置在 n×n 的棋盘上,并且使皇后彼此之间不能相互攻击。
#
#
#
# 上图为 8 皇后问题的一种解法。
#
# 给定一个整数 n,返回所有不同的 n 皇后问题的解决方案。
#
# 每一种解法包含一个明确的 n 皇后问题的棋子放置方案,该方案中 'Q' 和 '.' 分别代表了皇后和空位。
#
#
#
# 示例:
#
# 输入:4
# 输出:[
# [".Q..", // 解法 1
# "...Q",
# "Q...",
# "..Q."],
#
# ["..Q.", // 解法 2
# "Q...",
# "...Q",
# ".Q.."]
# ]
# 解释: 4 皇后问题存在两个不同的解法。
#
#
#
#
# 提示:
#
#
# 皇后彼此不能相互攻击,也就是说:任何两个皇后都不能处于同一条横行、纵行或斜线上。
#
#
#
# @lc code=start
class Solution:
def solveNQueens(self, n: int) -> List[List[str]]:
res = []
if n == 0:
return res
col,main,sub = 0,0,0
path = []
def dfs(row,col,sub,main,path):
if row == n:
board = convert2board(path)
res.add(board)
return
for i in range(n):
if ((col >> i) & 1) == 0 and ((main >> (row-i+n-1)) & 1) == 0 and ((sub >> (row+i)) & 1) == 0:
path.append(i)
col ^= (1<<i)
main ^= (1<<(row-i+n-1))
sub ^= (1<<(row+i))
dfs(row+1,col,sub,main,path)
sub ^= (1 << (row+i))
main ^= (1<<(row-i+n-1))
col ^= (1<<i)
path.pop()
def convert2board(path):
board = []
for i in path:
row = []
row.append(".")
# @lc code=end
# def solveNQueens(self, n: int) -> List[List[str]]:
# board = [["." for _ in range(n)] for _ in range(n)]
# res = []
# def isValid(board,row,col):
# for i in range(row):
# if board[i][col] == "Q":
# return False
# i,j = row-1,col-1
# while i >= 0 and j >= 0:
# if board[i][j] == "Q":
# return False
# i -= 1
# j -= 1
# i,j = row-1,col+1
# while i >= 0 and j < n:
# if board[i][j] == "Q":
# return False
# i -= 1
# j += 1
# return True
# def backtrack(board,row):
# if row == len(board):
# res.append(["".join(board[i]) for i in range(row)])
# return
# for col in range(len(board[0])):
# if not isValid(board,row,col):
# continue
# board[row][col] = "Q"
# backtrack(board,row+1)
# board[row][col] = "."
# backtrack(board,0)
# return res
| StarcoderdataPython |
3489469 | from models.models import Historial | StarcoderdataPython |
199776 | <gh_stars>1-10
from datetime import datetime, timedelta
import pytest
from releasy.miner.vcs.miner import Miner
from tests.miner.vcs.mock import DifferentReleaseNameVcsMock
miner = Miner(vcs=DifferentReleaseNameVcsMock(), release_prefixes=["v",""], ignored_suffixes=["Final"])
project = miner.mine_releases()
print(project.release_suffixes) | StarcoderdataPython |
9714996 | <filename>shortner/views.py<gh_stars>0
from django.shortcuts import render
import pyshorteners
# Create your views here.
def index(request):
return render(request, 'shortner/index.html')
def create(request):
if request.method == 'POST':
link = request.POST['link']
s = pyshorteners.Shortener()
shorten_url=s.tinyurl.short(link)
print(shorten_url)
context={
"url":shorten_url
}
return render(request,'shortner/index.html',context)
| StarcoderdataPython |
1762225 | import numpy as np
import theano
import theano.tensor as T
import theano.tensor.nnet.bn as bn
eps = np.float32(1e-6)
zero = np.float32(0.)
one = np.float32(1.)
def bn_shared(params, outFilters, index):
''' Setup BN shared variables.
'''
normParam = {}
template = np.ones((outFilters,), dtype=theano.config.floatX)
normParam['mean'] = theano.shared(value=0.*template, name='mean_%d' % (index), borrow=True)
normParam['var'] = theano.shared(value=1.*template, name='var_%d' % (index), borrow=True)
normParam['mean_batch'] = theano.shared(value=0.*template, name='mean_batch_%d' % (index), borrow=True) # need for exact
normParam['var_batch'] = theano.shared(value=1.*template, name='var_batch_%d' % (index), borrow=True) # need for exact
normParam['iter'] = theano.shared(np.float32(1.), name='iter')
paramsBN = [normParam['mean'], normParam['var'], normParam['mean_batch'], normParam['var_batch'], normParam['iter']]
return normParam, paramsBN
def bn_layer(x, a, b, normParam, params, phase):
''' Apply BN.
# phase = 0 : BN eval with m1v1, BN ups weighter average
# phase = 1 : BN eval with m2v2, no BN ups
'''
minAlpha = params.movingAvMin
iterStep = params.movingAvStep
# compute mean & variance
if params.model == 'convnet':
mean1 = T.mean(x, axis = (0, 2, 3))
var1 = T.var(x, axis = (0, 2, 3))
else:
mean1 = T.mean(x, axis = 0)
var1 = T.var(x, axis = 0)
# moving average as a proxi for validation model
alpha = (1.-phase)*T.maximum(minAlpha, 1./normParam['iter'])
mean2 = (1.-alpha)*normParam['mean'] + alpha*mean1
var2 = (1.-alpha)*normParam['var'] + alpha*var1
mean = (1.-phase)*mean2 + phase*mean1
var = (1.-phase)*var1 + phase*var1
std = T.sqrt(var+eps)
# apply transformation:
if params.model == 'convnet':
x = bn.batch_normalization(x, a.dimshuffle('x', 0, 'x', 'x'), b.dimshuffle('x', 0, 'x', 'x'),
mean.dimshuffle('x', 0, 'x', 'x'), std.dimshuffle('x', 0, 'x', 'x'), mode='high_mem')
else:
x = bn.batch_normalization(x, a, b, mean, std)
updateBN = [mean2, var2, mean1, var1, normParam['iter']+iterStep]
return x, updateBN
def update_bn(model, params, evaluateBN, t1Data, t1Label):
''' Computation of exact batch normalization parameters for the trained model (referred to test-BN).
Implemented are three ways to compute the BN parameters:
'lazy' test-BN are approximated by a running average during training
'default' test-BN are computed by averaging over activations of params.m samples from training set
'proper' test-BN of k-th layer are computed as in 'default',
however the activations are recomputed by rerunning with test-BN params on all previous layers
If the setting is 'lazy', this function will not be called, since running average test-BN
are computed automatically during training.
'''
oldBN, newBN = [{}, {}]
nSamples1 = t1Data.shape[0]
batchSizeBN = nSamples1/params.m
trainPermBN = range(0, nSamples1)
np.random.shuffle(trainPermBN)
# list of layers which utilize BN
if params.model == 'convnet':
allLayers = params.convLayers
loopOver = filter(lambda i: allLayers[i].bn, range(len(allLayers)))
print loopOver
else:
loopOver = range(params.nLayers-1)
# extract old test-BN parameters, reset new
oldBN['mean'] = map(lambda i: model.h[i].normParam['mean'].get_value(), loopOver)
oldBN['var'] = map(lambda i: model.h[i].normParam['var'].get_value(), loopOver)
newBN['mean'] = map(lambda i: 0.*oldBN['mean'][i], range(len(loopOver)))
newBN['var'] = map(lambda i: 0.*oldBN['var'][i], range(len(loopOver)))
# CASE: 'proper'
if params.testBN == 'proper':
# loop over layers, loop over examples
for i in len(range(loopOver)):
layer = loopOver[i]
for k in range(0, params.m):
sampleIndexBN = trainPermBN[(k * batchSizeBN):((k + 1) * (batchSizeBN))]
evaluateBN(t1Data[sampleIndexBN], 0, 1)
newBN['mean'][i] = model.h[layer].normParam['mean_batch'].get_value() + newBN['mean'][i]
newBN['var'][i] = model.h[layer].normParam['var_batch'].get_value() + newBN['var'][i]
np.random.shuffle(trainPermBN)
biasCorr = batchSizeBN / (batchSizeBN-1)
# compute mean, adjust for biases
newBN['mean'][i] /= params.m
newBN['var'][i] *= biasCorr/params.m
model.h[layer].normParam['mean'].set_value(newBN['mean'][i])
model.h[layer].normParam['var'].set_value(newBN['var'][i])
# CASE: 'default'
elif params.testBN == 'default':
# loop over examples
for k in range(0, params.m):
sampleIndexBN = trainPermBN[(k * batchSizeBN):((k + 1) * (batchSizeBN))]
evaluateBN(t1Data[sampleIndexBN], 0, 0)
newBN['mean'] = map(lambda (i, j): model.h[i].normParam['mean_batch'].get_value() + newBN['mean'][j], zip(loopOver, range(len(loopOver))))
newBN['var'] = map(lambda (i, j): model.h[i].normParam['var_batch'].get_value() + newBN['var'][j], zip(loopOver, range(len(loopOver))))
# compute mean, adjust for biases
biasCorr = batchSizeBN / (batchSizeBN-1)
newBN['var'] = map(lambda i: newBN['var'][i]*biasCorr/params.m, range(len(loopOver)))
newBN['mean'] = map(lambda i: newBN['mean'][i]/params.m, range(len(loopOver)))
# updating test-BN parameters, update shared
map(lambda (i,j): model.h[i].normParam['mean'].set_value(newBN['mean'][j]), zip(loopOver, range(len(loopOver))))
map(lambda (i,j): model.h[i].normParam['var'].set_value(newBN['var'][j]), zip(loopOver, range(len(loopOver))))
# printing an example of previous and updated versions of test-BN
print 'BN samples: '
print 'mean low', oldBN['mean'][1][0], newBN['mean'][1][0]
print 'var low', oldBN['var'][1][0], newBN['var'][1][0]
print 'mean up', oldBN['mean'][-1][0], newBN['mean'][-1][0]
print 'var up', oldBN['var'][-1][0], newBN['var'][-1][0]
return model
| StarcoderdataPython |
6438286 | <filename>thespian/system/transport/MultiprocessQueueTransport.py
"""Uses the python multiprocess.Queue as the transport mechanism.
Queues are multi-producer/multi-consumer objects. In this usage,
there will be only one consumer (the current Actor) although there may
be multiple producers (any other actor sending to this actor); this
actor has a single Queue for all incoming messages this actor will
handle.
However, Queues can only be passed by inheritance, not by pickling, so
only the Parent Actor has the proper queue handle to talk to the Child
Actor. Any other Actor wishing to talk to the Child Actor will have
received the Child Actor's address from the Parent Actor, and that
address will indicate that the message must be forwarded through the
Parent Actor. By extension, passing an Actor address itself must be
forwarded through the parent, so this means that a message passed from
one Actor to another must be passed up from the sender to it's Parent
and the message will recurse up to the Parent that is common to both
the Sender and the Receiver, where it will then traverse down the
chain of children to the destination Actor.
This also requires Parent Actors to learn about actors their Children
have created. Each Actor therefore will maintain a table of known
addresses. Any time an Actor creates a new child Actor, it will send
the Address of that Child Actor up to it's Parent, which will record
the new Address and the creating Child for future forwarding to that
Address. The Parent will then recursively send the address upwards;
the Admin will end up having a table of all known Actors.
Worst case is when an actor address has been passed inside a message,
so a particular Actor has no idea how to route an address. If this
routing deferral propagates up to the Admin, then the message will be
sent to EACH AND EVERY child (on the assumption that it will be
discarded by all leaves except the ultimate recipient). Very
inefficient.
This transport is therefore not the most efficient transport, but it
is used as a semi-academic exercise to ensure that the Thespian system
remains flexible to handle non-socket transport mechanisms.
2014-Nov-01 NOTE: this actor seems to be prone to deadlock; multiple
actors telling messages (or external telling internal) appears to
deadlock often. This includes testLoad non-asking tests (deadlock
100% of the time) and
testActorAdder.py:test07_LotsOfActorsEveryTenWithBackground (deadlocks
25% of the time).
"""
import logging
from thespian.actors import *
from thespian.system.utilis import thesplog, partition, foldl, AssocList
from thespian.system.timing import timePeriodSeconds
from thespian.system.transport import *
from thespian.system.transport.asyncTransportBase import asyncTransportBase
from thespian.system.transport.wakeupTransportBase import wakeupTransportBase
from thespian.system.messages.multiproc import ChildMayHaveDied
from thespian.system.addressManager import ActorLocalAddress
from multiprocessing import Queue
import threading
try:
import Queue as Q # Python 2
except ImportError:
import queue as Q # type: ignore # Python 3
from datetime import datetime
try:
import cPickle as pickle
except Exception:
import pickle # type: ignore
MAX_ADMIN_QUEUESIZE=40 # depth of Admin queue
MAX_ACTOR_QUEUESIZE=10 # depth of Actor queue
MAX_QUEUE_TRANSMIT_PERIOD = timedelta(seconds=20) # always local, so shorter times are appropriate
QUEUE_CHECK_PERIOD = 2 # maximum sleep time in seconds on Q get
class QueueActorAddress(object): # internal use by this module only
def __init__(self, name):
self._qaddr = name
def __str__(self): return 'Q.'+str(self._qaddr)
def __eq__(self, o): return isinstance(o, QueueActorAddress) and self._qaddr == o._qaddr
def __ne__(self, o): return not self.__eq__(o)
def __hash__(self): return hash(str(self))
def subAddr(self, inst):
import string
useable = string.ascii_letters
namegen = lambda v: addnext(*divmod(v, len(useable)))
addnext = lambda x,y: useable[y] if 0 == x else (namegen(x) + useable[y])
return self._qaddr + '.' + namegen(inst)
class ReturnTargetAddressWithEnvelope(object): pass
class ExternalQTransportCopy(object): pass
class MpQTEndpoint(TransportInit__Base): # internal use by this module only
def __init__(self, *args): self.args = args
@property
def addrInst(self): return self.args[0]
class MultiprocessQueueTCore_Common(object):
def __init__(self, myQueue, parentQ, adminQ, adminAddr):
self._myInputQ = myQueue
self._parentQ = parentQ
self._adminQ = adminQ
self._adminAddr = adminAddr
# _queues is a map of direct child ActorAddresses to Queue instance. Note
# that there will be multiple keys mapping to the same Queue
# instance because routing is only either to the Parent or to
# an immediate Child.
self._queues = AssocList() # addr -> queue
# _fwdvia represents routing for other than immediate parent
# or child (there may be multiple target addresses mapping to
# the same forward address.
self._fwdvia = AssocList() # targetAddress -> fwdViaAddress
self._deadaddrs = []
# Signals can set these to true; they should be checked and
# reset by the main processing loop. There is a small window
# where they could be missed because signals are not queued,
# but this should handle the majority of situations. Note
# that the Queue object is NOT signal-safe, so don't try to
# queue signals that way.
self._checkChildren = False
self._shutdownSignalled = False
def mainLocalInputQueueEndpoint(self): return self._myInputQ
def adminQueueEndpoint(self): return self._adminQ
@property
def adminAddr(self): return self._adminAddr
def protectedFileNumList(self):
return foldl(lambda a, b: a+[b._reader.fileno(), b._writer.fileno()],
[self._myInputQ, self._parentQ, self._adminQ] +
list(self._queues.values()), [])
def childResetFileNumList(self):
return foldl(lambda a, b: a+[b._reader.fileno(), b._writer.fileno()],
[self._parentQ] +
list(self._queues.values()), [])
def add_endpoint(self, child_addr, child_queue):
self._queues.add(child_addr, child_queue)
def set_address_to_dead(self, child_addr):
self._queues.rmv(child_addr)
self._fwdvia.rmv(child_addr)
self._fwdvia.rmv_value(child_addr)
self._deadaddrs.append(child_addr)
def abort_core_run(self):
self._aborting_run = Thespian__Run_Terminated()
def core_common_transmit(self, transmit_intent, from_addr):
try:
if self.isMyAddress(transmit_intent.targetAddr):
if transmit_intent.message:
self._myInputQ.put( (from_addr, transmit_intent.serMsg),
True,
timePeriodSeconds(transmit_intent.delay()))
else:
tgtQ = self._queues.find(transmit_intent.targetAddr)
if tgtQ:
tgtQ.put((from_addr, transmit_intent.serMsg), True,
timePeriodSeconds(transmit_intent.delay()))
else:
# None means sent by parent, so don't send BACK to parent if unknown
topOrFromBelow = from_addr if self._parentQ else None
(self._parentQ or self._adminQ).put(
(topOrFromBelow, transmit_intent.serMsg),
True,
timePeriodSeconds(transmit_intent.delay()))
transmit_intent.tx_done(SendStatus.Sent)
return
except Q.Full:
pass
transmit_intent.tx_done(SendStatus.DeadTarget if not isinstance(
transmit_intent._message,
(ChildActorExited, ActorExitRequest)) else SendStatus.Failed)
def core_common_receive(self, incoming_handler, local_routing_addr, run_time_f):
"""Core scheduling method; called by the current Actor process when
idle to await new messages (or to do background
processing).
"""
if incoming_handler == TransmitOnly or \
isinstance(incoming_handler, TransmitOnly):
# transmits are not queued/multistage in this transport, no waiting
return local_routing_addr, 0
self._aborting_run = None
while self._aborting_run is None:
ct = currentTime()
if run_time_f().view(ct).expired():
break
try:
# Unfortunately, the Queue object is not signal-safe,
# so a frequent wakeup is needed to check
# _checkChildren and _shutdownSignalled.
rcvd = self._myInputQ.get(True,
min(run_time_f().view(ct).remainingSeconds() or
QUEUE_CHECK_PERIOD,
QUEUE_CHECK_PERIOD))
except Q.Empty:
if not self._checkChildren and not self._shutdownSignalled:
# Probably a timeout, but let the while loop decide for sure
continue
rcvd = 'BuMP'
if rcvd == 'BuMP':
relayAddr = sendAddr = destAddr = local_routing_addr
if self._checkChildren:
self._checkChildren = False
msg = ChildMayHaveDied()
elif self._shutdownSignalled:
self._shutdownSignalled = False
msg = ActorExitRequest()
else:
return local_routing_addr, Thespian__UpdateWork()
else:
relayAddr, (sendAddr, destAddr, msg) = rcvd
if not self._queues.find(sendAddr):
# We don't directly know about this sender, so
# remember what path this arrived on to know where to
# direct future messages for this sender.
if relayAddr and self._queues.find(relayAddr) and \
not self._fwdvia.find(sendAddr):
# relayAddr might be None if it's our parent, which is OK because
# the default message forwarding is to the parent. If it's not
# none, it should be in self._queues though!
self._fwdvia.add(sendAddr, relayAddr)
if hasattr(self, '_addressMgr'):
destAddr,msg = self._addressMgr.prepMessageSend(destAddr, msg)
if destAddr is None:
thesplog('Unexpected target inaccessibility for %s', msg,
level = logging.WARNING)
raise CannotPickleAddress(destAddr)
if msg is SendStatus.DeadTarget:
thesplog('Faking message "sent" because target is dead and recursion avoided.')
continue
if self.isMyAddress(destAddr):
if isinstance(incoming_handler, ReturnTargetAddressWithEnvelope):
return destAddr, ReceiveEnvelope(sendAddr, msg)
if incoming_handler is None:
return destAddr, ReceiveEnvelope(sendAddr, msg)
r = Thespian__Run_HandlerResult(
incoming_handler(ReceiveEnvelope(sendAddr, msg)))
if not r:
# handler returned False-ish, indicating run() should exit
return destAddr, r
else:
# Note: the following code has implicit knowledge of serialize() and xmit
putQValue = lambda relayer: (relayer, (sendAddr, destAddr, msg))
deadQValue = lambda relayer: (relayer, (sendAddr,
self._adminAddr,
DeadEnvelope(destAddr, msg)))
# Must forward this packet via a known forwarder or our parent.
send_dead = False
tgtQ = self._queues.find(destAddr)
if tgtQ:
sendArgs = putQValue(local_routing_addr), True
if not tgtQ:
tgtA = self._fwdvia.find(destAddr)
if tgtA:
tgtQ = self._queues.find(tgtA)
sendArgs = putQValue(None),
else:
for each in self._deadaddrs:
if destAddr == each:
send_dead = True
if tgtQ:
try:
tgtQ.put(*sendArgs,
timeout=timePeriodSeconds(MAX_QUEUE_TRANSMIT_PERIOD))
continue
except Q.Full:
thesplog('Unable to send msg %s to dest %s; dead lettering',
msg, destAddr)
send_dead = True
if send_dead:
try:
(self._parentQ or self._adminQ).put(
deadQValue(local_routing_addr if self._parentQ else None),
True,
timePeriodSeconds(MAX_QUEUE_TRANSMIT_PERIOD))
except Q.Full:
thesplog('Unable to send deadmsg %s to %s or admin; discarding',
msg, destAddr)
continue
# Not sure how to route this message yet. It
# could be a heretofore silent child of one of our
# children, it could be our parent (whose address
# we don't know), or it could be elsewhere in the
# tree.
#
# Try sending it to the parent first. If the
# parent can't determine the routing, it will be
# sent back down (relayAddr will be None in that
# case) and it must be sprayed out to all children
# in case the target lives somewhere beneath us.
# Note that _parentQ will be None for top-level
# actors, which send up to the Admin instead.
#
# As a special case, the external system is the
# parent of the admin, but the admin is the
# penultimate parent of all others, so this code
# must keep the admin and the parent from playing
# ping-pong with the message. But... the message
# might be directed to the external system, which
# is the parent of the Admin, so we need to check
# with it first.
# parentQ == None but adminQ good --> external
# parentQ and adminQ and myAddress == adminAddr --> Admin
# parentQ and adminQ and myAddress != adminADdr --> other Actor
if relayAddr:
# Send message up to the parent to see if the
# parent knows how to forward it
try:
(self._parentQ or self._adminQ).put(
putQValue(local_routing_addr if self._parentQ else None),
True,
timePeriodSeconds(MAX_QUEUE_TRANSMIT_PERIOD))
except Q.Full:
thesplog('Unable to send dead msg %s to %s or admin; discarding',
msg, destAddr)
else:
# Sent by parent or we are an external, so this
# may be some grandchild not currently known.
# Do the worst case and just send this message
# to ALL immediate children, hoping it will
# get there via some path.
for A,AQ in self._queues.items():
if A not in [self._adminAddr, str(self._adminAddr)]:
# None means sent by Parent, so don't
# send BACK to parent if unknown
try:
AQ.put(putQValue(None),
True,
timePeriodSeconds(MAX_QUEUE_TRANSMIT_PERIOD))
except Q.Full:
pass
if self._aborting_run is not None:
return local_routing_addr, self._aborting_run
return local_routing_addr, Thespian__Run_Expired()
def interrupt_run(self, signal_shutdown=False, check_children=False):
self._shutdownSignalled |= signal_shutdown
self._checkChildren |= check_children
# Do not put anything on the Queue if running in the context
# of a signal handler, because Queues are not signal-context
# safe. Instead, those will just have to depend on the short
# maximum Queue get wait time.
if not signal_shutdown and not check_children:
try:
self._myInputQ.put_nowait('BuMP')
except Q.Full:
# if the queue is full, it should be reading something
# off soon which will accomplish the same interrupt
# effect, so nothing else needs to be done here.
pass
class MultiprocessQueueTCore_Actor(MultiprocessQueueTCore_Common):
# Transport core for an Actor. All access here is
# single-threaded, and there is only ever one address, so this is
# a simple interface.
def __init__(self, myQueue, parentQ, adminQ, adminAddr, myAddr):
super(MultiprocessQueueTCore_Actor, self).__init__(myQueue, parentQ,
adminQ, adminAddr)
self._myAddr = myAddr
def isMyAddress(self, addr):
return addr == self._myAddr
def core_transmit(self, transmit_intent, my_address):
# n.b. my_address == self._myAddr
return self.core_common_transmit(transmit_intent, my_address)
def core_receive(self, incoming_handler, my_address, run_time_f):
# n.b. my_address == self._myAddr
return self.core_common_receive(incoming_handler, my_address, run_time_f)[1]
def core_close(self, _addr):
pass
class MultiprocessQueueTCore_External(MultiprocessQueueTCore_Common):
# Transport core for an External interface. There may be multiple
# External interfaces (generated by the ActorSystem.private()
# context generator), each represented by a
# MultiprocessQueueTransport object, but all sharing this single
# core object. There can be only one input multiprocess.Queue for
# this process, which is managed by this object. Only one thread
# can be waiting at a time, but each MultiprocessQueueTransport
# has a local-unique address, along with a threading.Queue. This
# common module hosts the multiprocess.Queue endpoint and
# demultiplexes incoming messages to the correct threading.Queue.
def __init__(self, myQueue, parentQ, adminQ, adminAddr, my_address):
super(MultiprocessQueueTCore_External, self).__init__(myQueue, parentQ,
adminQ, adminAddr)
self._my_address = my_address
self.isMyAddress = self.simple_isMyAddress
self.core_transmit = self.simple_core_transmit
self.core_receive = self.simple_core_receive
self.core_close = self.simple_core_close
self.abort_run = self.abort_core_run
self.clone_lock = threading.Lock()
self.clone_count = 0
def new_clone_id(self):
with self.clone_lock:
self.clone_count += 1
return self.clone_count
# Initially, the following simple methods will be used (for better
# performance) when there are no multi-threaded contexts declared.
def simple_isMyAddress(self, addr):
return addr == self._my_address
def simple_core_transmit(self, transmit_intent, my_address):
# n.b. my_address == self._myAddr
return self.core_common_transmit(transmit_intent, my_address)
def simple_core_receive(self, incoming_handler, my_address, run_time_f):
# n.b. my_address == self._myAddr
return self.core_common_receive(incoming_handler, my_address, run_time_f)[1]
def simple_core_close(self, _addr):
pass
# The following establishes additional external entrypoints, as
# well as switching from the simple direct calls to the underlying
# thread core over to thread-aware regulated calls.
def make_external_clone(self):
with self.clone_lock:
self.clone_count += 1
new_addr = ActorAddress(QueueActorAddress('~%d' % self.clone_count))
if self.isMyAddress == self.simple_isMyAddress:
self._my_tqueues = {str(self._my_address): Q.Queue()}
self._subthread = threading.Thread(target=self.subcontext,
name='subcontext')
self._subthread.daemon = True
self._subthread.start()
self.isMyAddress = self.tsafe_isMyAddress
self.core_transmit = self.tsafe_core_transmit
self.core_receive = self.tsafe_core_receive
self.core_close = self.tsafe_core_close
self.abort_run = self.tsafe_abort_run
self._my_tqueues[str(new_addr)] = Q.Queue()
return new_addr
def tsafe_isMyAddress(self, addr):
return str(addr) in self._my_tqueues
def tsafe_core_close(self, address):
# Set to None so that this is recognized as a local address,
# but no longer valid.
self._my_tqueues[str(address)] = None
if address != self._my_address:
return
self._full_close = True
self.abort_core_run()
self.interrupt_run()
self._subthread.join()
def tsafe_core_transmit(self, transmit_intent, my_address):
# Transmit is already single-threaded in the asyncTransport
# portion, so it's sufficient to simply call-through to the
# lower layer.
return self.core_common_transmit(transmit_intent, my_address)
def tsafe_core_receive(self, incoming_handler, my_address, run_time_f):
# Only one thread should be allowed to run the lower-level
# receive; other threads will just wait on their local tqueue;
# when the lower-level receive obtains input for this process
# but a different thread, it is placed on the tqueue.
if incoming_handler == TransmitOnly or \
isinstance(incoming_handler, TransmitOnly):
# transmits are not queued/multistage in this transport, no waiting
return 0
self._abort_my_run = False
while not self._abort_my_run:
ct = currentTime()
if run_time_f().view(ct).expired():
break
try:
rcv_envelope = self._my_tqueues[str(my_address)].get(
True,
run_time_f().view(ct).remainingSeconds() or QUEUE_CHECK_PERIOD)
except Q.Empty:
# probably a timeout
continue
if rcv_envelope != None:
if incoming_handler is None:
return rcv_envelope
r = incoming_handler(rcv_envelope)
if not r:
return r
# else loop
return None
def tsafe_abort_run(self):
self._abort_my_run = True
def subcontext(self):
while not getattr(self, '_full_close', False):
rcv_addr_and_envelope = self.core_common_receive(
ReturnTargetAddressWithEnvelope(),
self._my_address,
lambda t=ExpirationTimer(): t) # forever
if self._aborting_run:
# Exit from this core thread
return
if rcv_addr_and_envelope is None:
continue
addr, env = rcv_addr_and_envelope
if env is None: # or isinstance(env, Thespian__UpdateWork):
continue
if str(addr) in self._my_tqueues and self._my_tqueues[str(addr)]:
self._my_tqueues[str(addr)].put(env)
class MultiprocessQueueTransport(asyncTransportBase, wakeupTransportBase):
"""A transport designed to use a multiprocess.Queue instance to send
and receive messages with other multiprocess Process actors.
There is one instance of this object in each Actor. This
object maintains a single input queue (used by its parent an
any children it creates) and a table of all known sub-Actor
addresses and their queues (being the most immediate child
Actor queue that moves the message closer to the child target,
or the Parent actor queue to which the message should be
forwarded if no child is identified).
"""
def __init__(self, initType, *args):
super(MultiprocessQueueTransport, self).__init__()
if isinstance(initType, ExternalInterfaceTransportInit):
# External process that's going to talk "in". There is no
# parent, and the child is the systemAdmin.
capabilities, logDefs, self._concontext = args
NewQ = self._concontext.Queue if self._concontext else Queue
self._adminQ = NewQ(MAX_ADMIN_QUEUESIZE)
self._adminAddr = self.getAdminAddr(capabilities)
self._myQAddress = ActorAddress(QueueActorAddress('~'))
self._myInputQ = NewQ(MAX_ACTOR_QUEUESIZE)
self._QCore = MultiprocessQueueTCore_External(self._myInputQ, None, self._adminQ, self._adminAddr, self._myQAddress)
elif isinstance(initType, MpQTEndpoint):
_addrInst, myAddr, myQueue, parentQ, adminQ, adminAddr, ccon = initType.args
self._concontext = ccon
self._adminQ = adminQ
self._adminAddr = adminAddr
self._myQAddress = myAddr
self._QCore = MultiprocessQueueTCore_Actor(myQueue, parentQ, adminQ, myAddr, myAddr)
elif isinstance(initType, ExternalQTransportCopy):
# External process that's going to talk "in". There is no
# parent, and the child is the systemAdmin.
self._QCore, = args
self._myQAddress = self._QCore.make_external_clone()
else:
thesplog('MultiprocessQueueTransport init of type %s unsupported!', str(initType),
level=logging.ERROR)
self._nextSubInstance = 0
def close(self):
self._QCore.core_close(self._myQAddress)
def external_transport_clone(self):
# Return a unique context for actor communication from external
return MultiprocessQueueTransport(ExternalQTransportCopy(),
self._QCore)
def protectedFileNumList(self):
return self._QCore.protectedFileNumList()
def childResetFileNumList(self):
return self._QCore.childResetFileNumList()
@property
def myAddress(self): return self._myQAddress
@staticmethod
def getAddressFromString(addrspec):
# addrspec is assumed to be a valid address string
return ActorAddress(QueueActorAddress(addrspec))
@staticmethod
def getAdminAddr(capabilities):
return MultiprocessQueueTransport.getAddressFromString(
capabilities.get('Admin Address', 'ThespianQ'))
@staticmethod
def probeAdmin(addr):
"""Called to see if there might be an admin running already at the
specified addr. This is called from the systemBase, so
simple blocking operations are fine. This only needs to
check for a responder; higher level logic will verify that
it's actually an ActorAdmin suitable for use.
"""
# never reconnectable; Queue objects are only available from
# the constructor and cannot be synthesized or passed.
return False
def _updateStatusResponse(self, resp):
"Called to update a Thespian_SystemStatus or Thespian_ActorStatus with common information"
asyncTransportBase._updateStatusResponse(self, resp)
wakeupTransportBase._updateStatusResponse(self, resp)
def _nextSubAddress(self):
subAddrStr = self._myQAddress.addressDetails.subAddr(self._nextSubInstance)
self._nextSubInstance = self._nextSubInstance + 1
return ActorAddress(QueueActorAddress(subAddrStr))
def prepEndpoint(self, assignedLocalAddr, capabilities):
"""In the parent, prepare to establish a new communications endpoint
with a new Child Actor. The result of this call will be
passed to a created child process to use when initializing
the Transport object for that class; the result of this
call will also be kept by the parent to finalize the
communications after creation of the Child by calling
connectEndpoint() with this returned object.
"""
NewQ = self._concontext.Queue if self._concontext else Queue
if isinstance(assignedLocalAddr.addressDetails, ActorLocalAddress):
return MpQTEndpoint(assignedLocalAddr.addressDetails.addressInstanceNum,
self._nextSubAddress(),
NewQ(MAX_ACTOR_QUEUESIZE),
self._QCore.mainLocalInputQueueEndpoint(),
self._QCore.adminQueueEndpoint(),
self._QCore.adminAddr,
self._concontext)
return MpQTEndpoint(None,
assignedLocalAddr,
self._QCore.adminQueueEndpoint(),
self._QCore.mainLocalInputQueueEndpoint(),
self._QCore.adminQueueEndpoint(),
self._adminAddr,
self._concontext)
def connectEndpoint(self, endPoint):
"""Called by the Parent after creating the Child to fully connect the
endpoint to the Child for ongoing communications."""
(_addrInst, childAddr, childQueue, _myQ,
_adminQ, _adminAddr, _concurrency_context) = endPoint.args
self._QCore.add_endpoint(childAddr, childQueue)
def deadAddress(self, addressManager, childAddr):
# Can no longer send to this Queue object. Delete the
# entry; this will cause forwarding of messages, although
# the addressManager is also aware of the dead address and
# will cause DeadEnvelope forwarding. Deleting here
# prevents hanging on queue full to dead children.
thesplog('deadAddress %s', childAddr)
addressManager.deadAddress(childAddr)
self._QCore.set_address_to_dead(childAddr)
super(MultiprocessQueueTransport, self).deadAddress(addressManager, childAddr)
def _runWithExpiry(self, incomingHandler):
return self._QCore.core_receive(incomingHandler, self.myAddress,
lambda s=self: s.run_time)
def abort_run(self, drain=False):
# Queue transmits immediately, so no draining needed
self._QCore.abort_core_run()
def serializer(self, intent):
wrappedMsg = self._myQAddress, intent.targetAddr, intent.message
# For multiprocess Queues, the serialization (pickling) of the
# outbound message happens in a separate process. This is
# unfortunate because if the message is not pickle-able, the
# exception is thrown (and not handled) in the other process,
# and this process has no indication of the issue. The
# unfortunate solution is that pickling must be tried in the
# current process first to detect these errors (unfortunate
# because that means each message gets pickled twice,
# impacting performance).
discard = pickle.dumps(wrappedMsg)
return wrappedMsg
def interrupt_wait(self, signal_shutdown=False, check_children=False):
self._QCore.interrupt_run(signal_shutdown, check_children)
def _scheduleTransmitActual(self, transmitIntent):
self._QCore.core_transmit(transmitIntent, self.myAddress)
| StarcoderdataPython |
212049 | #!/usr/bin/env python
"""
Styled just like an apt-get installation.
"""
import time
import quo
from quo.progress import formatters
style = quo.styles.Style.add(
{
"label": "bg:#ffff00 #000000",
"percentage": "bg:#ffff00 #000000",
"current": "#448844",
"bar": "",
}
)
def main():
custom_formatters = [
formatters.Label(),
formatters.Text(": [", style="class:percentage"),
formatters.Percentage(),
formatters.Text("]", style="class:percentage"),
formatters.Text(" "),
formatters.Bar(sym_a="#", sym_b="#", sym_c="."),
formatters.Text(" "),
]
with quo.ProgressBar(style=style, formatters=custom_formatters) as pb:
for i in pb(range(1600), label="Installing"):
time.sleep(0.01)
if __name__ == "__main__":
main()
| StarcoderdataPython |
5138745 | <reponame>anschwa/savetheyak<filename>savetheyak/views.py
# -*- coding: utf-8 -*-
import datetime as dt
try: # PY3
from urllib.parse import urljoin
except ImportError: # PY2
from urlparse import urljoin
from collections import Counter
from flask import render_template, request
from werkzeug.contrib.atom import AtomFeed
from flask_flatpages import pygments_style_defs
from .app import app, pages, freezer
def _ensure_datetime(val):
"""Ensure ``val`` is a datetime.datetime object, converting it
from a datetime.date if necessary.
"""
ret = None
if isinstance(val, dt.date):
ret = dt.datetime.combine(val, dt.datetime.min.time())
elif isinstance(val, dt.datetime):
ret = val
else:
raise ValueError('Could not convert {} to a datetime.datetime')
return ret
def _sort_by_updated(pages):
'''Returns a list of pages sorted by the "updated" field.
Exludes any of the pages in EXCLUDE_PAGES.
'''
def sort_key(page):
return _ensure_datetime(page.meta['updated'])
return [page for page in sorted(pages,
reverse=True,
key=sort_key)
if page.path not in EXCLUDE_PAGES]
EXCLUDE_PAGES = ['contribute', 'template', 'example-dep', "README"]
ALL_PAGES = [p for p in pages if p.path not in EXCLUDE_PAGES]
ALL_SORTED = _sort_by_updated(ALL_PAGES)
tags = []
for page in ALL_PAGES:
tags.extend(page.meta.get('tags', []))
tag_counter = Counter(tags) # Dict of tag frequency
# List of tags sorted by frequency
SORTED_TAGS = sorted(tag_counter, reverse=True, key=lambda t: tag_counter[t])
ALL_OS = ['macosx', 'linux', 'windows']
@freezer.register_generator
def pages_url_generator():
pages_no_readme = [p for p in pages if p.path != "README"]
for page in pages_no_readme:
yield 'page', {'path': page.path}
@app.route('/')
def home():
return render_template('index.html', pages=ALL_SORTED,
all_tags=SORTED_TAGS, all_os=ALL_OS, home=True)
# @app.route('/guides/')
# def guides():
# return render_template('index.html', pages=ALL_SORTED,
# all_tags=SORTED_TAGS, all_os=ALL_OS)
@app.route('/<path:path>/')
def page(path):
page = pages.get_or_404(path)
dep_list = page.meta.get('deps', [])
dep_pages = [pages.get(dep) for dep in dep_list]
template = page.meta.get('template', 'page.html')
return render_template(template, page=page, deps=dep_pages)
@app.route('/contribute/')
def contribute():
page = pages.get_or_404('contribute')
return render_template('page.html', page=page)
@app.route('/os/<string:os>/')
def os(os):
filtered = [p for p in pages if os in p.meta.get('os', [])]
# Fix capitalization of MacOSX
if os.lower() == 'macosx':
os = 'MacOSX'
else:
os = os
latest = _sort_by_updated(filtered)
return render_template('index.html', pages=latest, os=os,
all_tags=SORTED_TAGS, all_os=ALL_OS)
@app.route('/tag/<string:tag>/')
def tag(tag):
filtered = [p for p in pages if tag in p.meta.get('tags', [])]
latest = _sort_by_updated(filtered)
return render_template('index.html', pages=latest, tag=tag,
all_tags=SORTED_TAGS, all_os=ALL_OS)
def make_external(url):
return urljoin(app.config["BASE_URL"], url)
@app.route('/feed/recent.atom')
def recent_feed():
feed = AtomFeed('Save The Yak - Recent Guides', feed_url=request.url,
url=request.url_root)
all_pages = [p for p in pages if p.path not in EXCLUDE_PAGES]
if len(all_pages) >= 15:
latest = _sort_by_updated(all_pages)[:15]
else:
latest = _sort_by_updated(all_pages)
for page in latest:
feed.add(page.meta['title'],
page.meta.get('description', make_external(page.path)),
content_type="html",
author=page.meta.get('contributors', ['Anonymous'])[0],
url=make_external(page.path),
updated=_ensure_datetime(page.meta['updated']),
published=_ensure_datetime(page.meta['updated']))
return feed.get_response()
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.route('/pygments.css')
def pygments_css():
return pygments_style_defs('manni'), 200, {'Content-Type': 'text/css'}
| StarcoderdataPython |
3318438 | <reponame>alliance-genome/ontobio
#!/usr/bin/env python
"""
Command line wrapper to ontobio.golr library.
Type:
qbiogolr -h
For instructions
"""
import argparse
from ontobio.golr.golr_associations import search_associations
from ontobio.ontol_factory import OntologyFactory
from ontobio.io.ontol_renderers import *
import networkx as nx
from networkx.algorithms.dag import ancestors, descendants
from networkx.drawing.nx_pydot import write_dot
from prefixcommons.curie_util import expand_uri
from ontobio.slimmer import get_minimal_subgraph
#from ontobio.golr.golr_associations import search_associations, search_associations_compact, GolrFields, select_distinct_subjects, get_objects_for_subject, get_subjects_for_object
import logging
def main():
"""
Wrapper for OGR
"""
parser = argparse.ArgumentParser(
description='Command line interface to python-ontobio.golr library'
"""
Provides command line interface onto the ontobio.golr python library, a high level
abstraction layer over Monarch and GO solr indices.
""",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-r', '--resource', type=str, required=False,
help='Name of ontology')
parser.add_argument('-d', '--display', type=str, default='o', required=False,
help='What to display: some combination of o, s, r. o=object ancestors, s=subject ancestors. If r present, draws s<->o relations ')
parser.add_argument('-o', '--outfile', type=str, required=False,
help='Path to output file')
parser.add_argument('-t', '--to', type=str, required=False,
help='Output to (tree, dot, ...)')
parser.add_argument('-C', '--category', type=str, required=False,
help='Category')
parser.add_argument('-c', '--container_properties', nargs='*', type=str, required=False,
help='Properties to nest in graph')
parser.add_argument('-s', '--species', type=str, required=False,
help='NCBITaxon ID')
parser.add_argument('-e', '--evidence', type=str, required=False,
help='ECO ID')
parser.add_argument('-G', '--graph', type=str, default='', required=False,
help='Graph type. m=minimal')
parser.add_argument('-S', '--slim', nargs='*', type=str, required=False,
help='Slim IDs')
parser.add_argument('-M', '--mapids', type=str, required=False,
help='Map identifiers to this ID space, e.g. ENSEMBL')
parser.add_argument('-p', '--properties', nargs='*', type=str, required=False,
help='Properties')
parser.add_argument('-v', '--verbosity', default=0, action='count',
help='Increase output verbosity')
parser.add_argument('ids',nargs='*')
# ontology
args = parser.parse_args()
if args.verbosity >= 2:
logging.basicConfig(level=logging.DEBUG)
elif args.verbosity == 1:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
logging.info("Welcome!")
ont = None
g = None
handle = args.resource
if handle is not None:
logging.info("Handle: {}".format(handle))
factory = OntologyFactory()
logging.info("Factory: {}".format(factory))
ont = factory.create(handle)
logging.info("Created ont: {}".format(ont))
g = ont.get_filtered_graph(relations=args.properties)
w = GraphRenderer.create(args.to)
nodes = set()
display = args.display
# query all IDs, gathering associations
assocs = []
for id in args.ids:
this_assocs, facets = search_golr_wrap(id,
args.category,
subject_taxon=args.species,
rows=1000,
slim=args.slim,
evidence=args.evidence,
map_identifiers=args.mapids)
assocs += this_assocs
logging.info("Num assocs: {}".format(len(assocs)))
for a in assocs:
print("{}\t{}\t{}\t{}".format(a['subject'],
a['subject_label'],
a['relation'],
";".join(a['objects'])))
if ont is not None:
# gather all ontology classes used
for a in assocs:
objs = a['objects']
if display.find('r') > -1:
pass
if display.find('o') > -1:
for obj in objs:
nodes.add(obj)
if ont is not None:
nodes.update(ont.ancestors(obj))
if display.find('s') > -1:
sub = a['subject']
nodes.add(sub)
if ont is not None:
nodes.update(ont.ancestors(sub))
# create a subgraph
subg = g.subgraph(nodes)
# optionally add edges between subj and obj nodes
if display.find('r') > -1:
for a in assocs:
rel = a['relation']
sub = a['subject']
objs = a['objects']
if rel is None:
rel = 'rdfs:seeAlso'
for obj in objs:
logging.info("Adding assoc rel {} {} {}".format(sub,obj,rel))
subg.add_edge(obj,sub,pred=rel)
# display tree/graph
show_graph(subg, nodes, objs, args)
# TODO
def cmd_map2slim(ont, args):
subset_term_ids = ont.extract_subset(args.slim)
nodes = set()
for id in resolve_ids(g, args.ids, args):
nodes.add(id)
assocs = search_associations(object=id,
subject_taxon=args.species,
slim=subset_term_ids,
rows=0,
subject_category=args.category)
for a in assocs:
print(a)
for x in a['objects']:
print(' '+pp_node(g,x,args))
def show_graph(g, nodes, query_ids, args):
"""
Writes graph
"""
if args.graph.find('m') > -1:
logging.info("SLIMMING")
g = get_minimal_subgraph(g, query_ids)
w = GraphRenderer.create(args.to)
if args.outfile is not None:
w.outfile = args.outfile
logging.info("Writing subg from "+str(g))
w.write(g, query_ids=query_ids, container_predicates=args.container_properties)
def search_golr_wrap(id, category, **args):
"""
performs searches in both directions
"""
#assocs1 = search_associations_compact(object=id, subject_category=category, **args)
#assocs2 = search_associations_compact(subject=id, object_category=category, **args)
assocs1, facets1 = search_compact_wrap(object=id, subject_category=category, **args)
assocs2, facets2 = search_compact_wrap(subject=id, object_category=category, **args)
facets = facets1
if len(assocs2) > 0:
facets = facets2
return assocs1 + assocs2, facets
def search_compact_wrap(**args):
searchresult = search_associations(use_compact_associations=True,
facet_fields=[],
**args
)
return searchresult['compact_associations'], searchresult['facet_counts']
if __name__ == "__main__":
main()
| StarcoderdataPython |
3414891 | """
Functions for two particles
"""
import numpy as np
from .phys import pt
class TwoParticles():
def __init__(self, data, is_signal=1):
self.var_labels = ["px1", "py1", "pz1", "e1",
"px2", "py2", "pz2", "e2"]
self.data = self.get_data(data, is_signal)
def get_data(self, data, is_signal):
if type(data) == str:
with open(data) as f:
lines = f.readlines()
else:
lines = data
output = []
for line in lines:
if type(line) == str:
line = line.strip()
line = [float(x) for x in line.split()]
if len(line) == 9:
is_signal = line[8]
if pt(line[0], line[1]) > pt(line[4], line[5]):
output.append(line + [is_signal])
else:
output.append(line[4:8] + line[0:4] + [is_signal])
return np.array(output)
| StarcoderdataPython |
5108940 | <gh_stars>0
from django.apps import AppConfig
class PilotsConfig(AppConfig):
name = 'pilots'
| StarcoderdataPython |
8078477 | <gh_stars>1-10
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleFilterError
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils import helpers
def split_url(value, query='', alias='urlsplit'):
results = helpers.object_to_dict(urlsplit(value), exclude=['count', 'index', 'geturl', 'encode'])
# If a query is supplied, make sure it's valid then return the results.
# If no option is supplied, return the entire dictionary.
if query:
if query not in results:
raise AnsibleFilterError(alias + ': unknown URL component: %s' % query)
return results[query]
else:
return results
# ---- Ansible filters ----
class FilterModule(object):
''' URI filter '''
def filters(self):
return {
'urlsplit': split_url
}
| StarcoderdataPython |
11348693 | INF = float('inf')
NEG_INF = float('-inf')
PASSIVE = "PASSIVE"
__all__ = ["INF", "NEG_INF", "PASSIVE"]
| StarcoderdataPython |
8053410 | <filename>parsing/tracking_logs/generate_course_tracking_logs.py
'''
This module will extract tracking logs for a given course and date range
between when course enrollment start and when the course ended. For each log,
the parent_data and meta_data from the course_structure collection will be
appended to the log based on the event key in the log
'''
import pymongo
import sys
from datetime import datetime
import json
def connect_to_db_collection(db_name, collection_name):
'''
Return collection of a given database name and collection name
'''
connection = pymongo.Connection('localhost', 27017)
db = connection[db_name]
collection = db[collection_name]
return collection
def load_config(config_file):
'''
Return course ids and ranges of dates from which course specific tracking
logs will be extracted
'''
with open(config_file) as file_handler:
data = json.load(file_handler)
if not isinstance(data['course_ids'], list):
raise ValueError('Expecting list of course ids')
try:
start_date = datetime.strptime(data['date_of_course_enrollment'], '%Y-%m-%d')
end_date = datetime.strptime(data['date_of_course_completion'], '%Y-%m-%d')
except ValueError:
raise ValueError('Incorrect data format, should be YYYY-MM-DD')
return data['course_ids'], start_date.date(), end_date.date()
def append_course_structure_data(course_structure_collection, _id, document):
'''
Append parent_data and metadata (if exists) from course structure to
tracking log
'''
try:
data = course_structure_collection.find({"_id" : _id})[0]
if 'parent_data' in data:
document['parent_data'] = data['parent_data']
if 'metadata' in data:
document['metadata'] = data['metadata']
except:
pass
def extract_tracking_logs(source_collection, destination_collection, course_structure_collection, course_ids, start_date, end_date):
'''
Return all trackings logs that contain given ids and that contain dates
within the given range
'''
documents = source_collection.find({'course_id' : { '$in' : course_ids }})
for document in documents:
if start_date <= datetime.strptime(document['time'].split('T')[0], "%Y-%m-%d").date() <= end_date:
# Bind parent_data and metadata from course_structure to tracking document
bound = False
if document['event']:
if isinstance(document['event'], dict):
if 'id' in document['event']:
splitted = document['event']['id'].split('-')
if len(splitted) > 3:
document['event']['id'] = splitted[-1]
if not bound:
append_course_structure_data(course_structure_collection, document['event']['id'], document)
bound = True
if document['page']:
splitted = document['page'].split('/')
if len(splitted) > 2:
document['page'] = splitted[-2]
if not bound:
append_course_structure_data(course_structure_collection, document['page'], document)
# End of binding, now insert document into collection
destination_collection.insert(document)
def main():
if len(sys.argv) != 6:
usage_message = """usage: %s source_db destination_db course_config_file
Provide name of course database to insert tracking logs to and
config file to load configurations\n
"""
sys.stderr.write(usage_message % sys.argv[0])
sys.exit(1)
source_db = sys.argv[1]
destination_db = sys.argv[2]
source_collection = connect_to_db_collection(source_db, 'tracking')
destination_collection = connect_to_db_collection(destination_db, 'tracking')
course_structure_collection = connect_to_db_collection(destination_db, 'course_structure')
course_ids, start_date, end_date = load_config(sys.argv[3])
extract_tracking_logs(source_collection, destination_collection, course_structure_collection, course_ids, start_date, end_date)
if __name__ == '__main__':
main()
| StarcoderdataPython |
6586757 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test for the game_stats example."""
# pytype: skip-file
from __future__ import absolute_import
import logging
import unittest
import apache_beam as beam
from apache_beam.examples.complete.game import game_stats
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
class GameStatsTest(unittest.TestCase):
SAMPLE_DATA = [
'user1_team1,team1,18,1447686663000,2015-11-16 15:11:03.921',
'user1_team1,team1,18,1447690263000,2015-11-16 16:11:03.921',
'user2_team2,team2,2,1447690263000,2015-11-16 16:11:03.955',
'user3_team3,team3,8,1447690263000,2015-11-16 16:11:03.955',
'user4_team3,team3,5,1447690263000,2015-11-16 16:11:03.959',
'user1_team1,team1,14,1447697463000,2015-11-16 18:11:03.955',
'robot1_team1,team1,9000,1447697463000,2015-11-16 18:11:03.955',
'robot2_team2,team2,1,1447697463000,2015-11-16 20:11:03.955',
'robot2_team2,team2,9000,1447697463000,2015-11-16 21:11:03.955',
]
def create_data(self, p):
return (p
| beam.Create(GameStatsTest.SAMPLE_DATA)
| beam.ParDo(game_stats.ParseGameEventFn())
| beam.Map(lambda elem:\
beam.window.TimestampedValue(elem, elem['timestamp'])))
def test_spammy_users(self):
with TestPipeline() as p:
result = (
self.create_data(p)
| beam.Map(lambda elem: (elem['user'], elem['score']))
| game_stats.CalculateSpammyUsers())
assert_that(
result, equal_to([('robot1_team1', 9000), ('robot2_team2', 9001)]))
def test_game_stats_sessions(self):
session_gap = 5 * 60
user_activity_window_duration = 30 * 60
with TestPipeline() as p:
result = (
self.create_data(p)
| beam.Map(lambda elem: (elem['user'], elem['score']))
| 'WindowIntoSessions' >> beam.WindowInto(
beam.window.Sessions(session_gap),
timestamp_combiner=beam.window.TimestampCombiner.OUTPUT_AT_EOW)
| beam.CombinePerKey(lambda _: None)
| beam.ParDo(game_stats.UserSessionActivity())
| 'WindowToExtractSessionMean' >> beam.WindowInto(
beam.window.FixedWindows(user_activity_window_duration))
| beam.CombineGlobally(beam.combiners.MeanCombineFn())\
.without_defaults())
assert_that(result, equal_to([300.0, 300.0, 300.0]))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| StarcoderdataPython |
3571062 | <filename>text_recognizer/data/__init__.py<gh_stars>0
from .base_data_module import BaseDataModule
# Hide lines below until Lab 2
from .emnist import EMNIST
from .emnist_lines import EMNISTLines
from .mnist import MNIST
from .util import BaseDataset
# Hide lines above until Lab 2
| StarcoderdataPython |
3210684 | import logging
import time
import datetime
import random
import weaved
# BEGIN Configuration
# Weaved related configuration
PLUG_IP = '192.168.1.201' # Assumes the Smart Plug is configured for SSH and IR blaster
PLUG_USER = 'root' # Assumes password-less (key based) SSH authentication is set up
# IR codes for turning TV on/off; use the POWER code if there aren't separate codes for POWER ON and POWER OFF
TV_ON_CODE = '2203D6F71297971C8C47206E8C743267654B3708D374B492211147000111746D0100116D75110058770065476D006D654774657400000000000000000000000000000000000000000000000000000000'
TV_OFF_CODE = '2203D6F71197971B8C47206E8C743267654B3708D374B492211147000111746D0000116D76110058770065476D006D654774657400000000000000000000000000000000000000000000000000000000'
# NoBurglar configuration
START_TIME = '1930' # Daily start time in military time
END_TIME = '2300' # Daily end time
TV_ON_PERCENTAGE = 50.0 # Probably don't want the TV on the entire duration of the time window
# Quick way to enable/disable
# File in the local directory containing 0 or 1; 1 => enabled
# To enable - $ echo 1 > enabled
# To disable - $ echo 0 > enabled
ENABLED_FILENAME = 'enabled'
POLL_INTERVAL = 60 # seconds
# END Configuration
DEBUG = False
logging.basicConfig(level=logging.INFO, format='%(asctime)s - [%(levelname)s] %(message)s')
def enabled():
'''Checks the file to see if this is enabled'''
with open(ENABLED_FILENAME) as f:
if f.read().strip() == "1":
return True
else:
return False
# Global state
class State:
def __init__(self):
self.reset()
def reset(self):
self.is_light_on = False
self.is_tv_on = False
self.tv_start_time = 0 # When TV should be started today
self.tv_total_time = 0 # How long the TV has been on today
self.tv_done = False # Whether we have completed TV time today
state = State()
plug = weaved.Plug(PLUG_IP, PLUG_USER)
def run_triggers():
'''Run the triggers (TV/light) if applicable'''
logging.debug("Processing triggers")
now = datetime.datetime.today()
t1 = datetime.datetime.combine(now.date(), datetime.datetime.strptime(START_TIME, '%H%M').time())
t2 = datetime.datetime.combine(now.date(), datetime.datetime.strptime(END_TIME, '%H%M').time())
in_range = t1 <= now <= t2
# Check the light state
if not in_range:
if state.is_light_on:
logging.info('Turning light off')
if DEBUG or not plug.power_off():
state.is_light_on = False
elif not state.is_light_on:
logging.info('Turning light on')
if DEBUG or not plug.power_on():
state.is_light_on = True
# Randomly start the TV based on the percentage and the start and end times
if in_range:
if not state.tv_done:
tv_target_duration = TV_ON_PERCENTAGE / 100 * (t2 - t1).total_seconds()
if not state.tv_start_time:
delay = random.random() * ((t2 - t1).total_seconds() - tv_target_duration)
state.tv_start_time = t1 + datetime.timedelta(seconds = delay)
logging.info('TV will turn on at around ' + str(state.tv_start_time.time()) +
' for ' + str(tv_target_duration) + ' seconds')
if now > state.tv_start_time:
state.tv_total_time = (now - state.tv_start_time).total_seconds()
if state.tv_total_time >= tv_target_duration:
# time to turn the TV off
logging.info('Turning TV off')
if DEBUG or not plug.send_ir_code(TV_OFF_CODE):
state.is_tv_on = False
state.tv_start_time = state.tv_total_time = None
state.tv_done = True
elif not state.is_tv_on:
logging.info('Turning TV on')
if DEBUG or not plug.send_ir_code(TV_ON_CODE):
state.is_tv_on = True
else:
if state.is_tv_on:
# Usually shouldn't happen unless the tv end time is close to the END_TIME
# and the thread doesn't get woken up until it's past END_TIME
logging.info('Turning TV off since time window has elapsed')
if DEBUG or not plug.send_ir_code(TV_OFF_CODE):
state.tv_start_time = state.tv_total_time = None
state.is_tv_on = False
state.tv_done = False
if __name__ == '__main__':
# Check for action periodically
while True:
if enabled():
run_triggers()
else:
# If this goes from enabled -> disabled in the middle of time window, leave the
# physical state of the devices as it is; just reset the in-memory state
state.reset()
time.sleep(POLL_INTERVAL)
| StarcoderdataPython |
3326394 | import json
import boto3
from util import DecimalEncoder
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
structures_table = dynamodb.Table('corp-fittings-doctrines')
def handle(event, context):
doctrine_response = structures_table.scan()
doctrines = doctrine_response['Items']
return {
"statusCode": 200,
"body": json.dumps(doctrines, cls=DecimalEncoder)
}
| StarcoderdataPython |
9616090 | <filename>myProject/myApp/migrations/0001_initial.py<gh_stars>0
# Generated by Django 2.0.6 on 2019-02-21 17:38
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=500)),
('pageNumber', models.IntegerField()),
('gender', models.CharField(max_length=300)),
('publishedDate', models.DateTimeField(default=datetime.datetime(2019, 2, 21, 17, 38, 59, 279723, tzinfo=utc))),
],
),
]
| StarcoderdataPython |
8131833 | <filename>ableton/v2/control_surface/components/scene.py
#Embedded file name: /Users/versonator/Jenkins/live/output/Live/mac_64_static/Release/python-bundle/MIDI Remote Scripts/ableton/v2/control_surface/components/scene.py
from __future__ import absolute_import, print_function, unicode_literals
import Live
from builtins import zip
from ...base import listens, liveobj_valid, liveobj_changed
from ..component import Component
from ..control import ButtonControl
from .clip_slot import ClipSlotComponent, is_button_pressed, find_nearest_color
class SceneComponent(Component):
u"""
Class representing a scene in Live
"""
clip_slot_component_type = ClipSlotComponent
launch_button = ButtonControl()
def __init__(self, session_ring = None, *a, **k):
assert session_ring is not None
assert session_ring.num_tracks >= 0
self._controlled_tracks = []
super(SceneComponent, self).__init__(*a, **k)
self._session_ring = session_ring
self._scene = None
self._clip_slots = []
self._color_palette = None
self._color_table = None
for _ in range(session_ring.num_tracks):
new_slot = self._create_clip_slot()
self._clip_slots.append(new_slot)
self._triggered_color = u'Session.SceneTriggered'
self._scene_color = u'Session.Scene'
self._no_scene_color = u'Session.NoScene'
self._track_offset = 0
self._select_button = None
self._delete_button = None
self._duplicate_button = None
self.__on_track_list_changed.subject = session_ring
@listens(u'tracks')
def __on_track_list_changed(self):
self._update_controlled_tracks()
def set_scene(self, scene):
if liveobj_changed(scene, self._scene):
self._scene = scene
self.__on_is_triggered_changed.subject = scene
self.__on_scene_color_changed.subject = scene
self.update()
def set_launch_button(self, button):
self.launch_button.set_control_element(button)
self.update()
def set_select_button(self, button):
self._select_button = button
def set_delete_button(self, button):
self._delete_button = button
def set_duplicate_button(self, button):
self._duplicate_button = button
def set_track_offset(self, offset):
assert offset >= 0
if offset != self._track_offset:
self._track_offset = offset
self._update_controlled_tracks()
def set_color_palette(self, palette):
self._color_palette = palette
def set_color_table(self, table):
self._color_table = table
def clip_slot(self, index):
return self._clip_slots[index]
def update(self):
super(SceneComponent, self).update()
if liveobj_valid(self._scene) and self.is_enabled():
clip_slots_to_use = self.build_clip_slot_list()
for slot_wrapper, clip_slot in zip(self._clip_slots, clip_slots_to_use):
slot_wrapper.set_clip_slot(clip_slot)
else:
for slot in self._clip_slots:
slot.set_clip_slot(None)
self._update_launch_button()
def _update_controlled_tracks(self):
controlled_tracks = self._session_ring.controlled_tracks()
if controlled_tracks != self._controlled_tracks:
self.update()
self._controlled_tracks = controlled_tracks
def _determine_actual_track_offset(self, tracks):
actual_track_offset = self._track_offset
if self._track_offset > 0:
real_offset = 0
visible_tracks = 0
while visible_tracks < self._track_offset and len(tracks) > real_offset:
if tracks[real_offset].is_visible:
visible_tracks += 1
real_offset += 1
actual_track_offset = real_offset
return actual_track_offset
def build_clip_slot_list(self):
slots_to_use = []
tracks = self.song.tracks
track_offset = self._determine_actual_track_offset(tracks)
clip_slots = self._scene.clip_slots
for _ in self._clip_slots:
while len(tracks) > track_offset and not tracks[track_offset].is_visible:
track_offset += 1
if len(clip_slots) > track_offset:
slots_to_use.append(clip_slots[track_offset])
else:
slots_to_use.append(None)
track_offset += 1
return slots_to_use
@launch_button.pressed
def launch_button(self, value):
self._on_launch_button_pressed()
def _on_launch_button_pressed(self):
if is_button_pressed(self._select_button):
self._do_select_scene(self._scene)
elif liveobj_valid(self._scene):
if is_button_pressed(self._duplicate_button):
self._do_duplicate_scene(self._scene)
elif is_button_pressed(self._delete_button):
self._do_delete_scene(self._scene)
else:
self._do_launch_scene(True)
@launch_button.released
def launch_button(self, value):
self._on_launch_button_released()
def _on_launch_button_released(self):
if not is_button_pressed(self._select_button) and liveobj_valid(self._scene) and not is_button_pressed(self._duplicate_button) and not is_button_pressed(self._delete_button):
self._do_launch_scene(False)
def _do_select_scene(self, scene_for_overrides):
if liveobj_valid(self._scene):
view = self.song.view
if view.selected_scene != self._scene:
view.selected_scene = self._scene
self._on_scene_selected()
def _on_scene_selected(self):
pass
def _do_delete_scene(self, scene_for_overrides):
try:
if liveobj_valid(self._scene):
song = self.song
song.delete_scene(list(song.scenes).index(self._scene))
self._on_scene_deleted()
except RuntimeError:
pass
def _on_scene_deleted(self):
pass
def _do_duplicate_scene(self, scene_for_overrides):
try:
song = self.song
song.duplicate_scene(list(song.scenes).index(self._scene))
self._on_scene_duplicated()
except (Live.Base.LimitationError, IndexError, RuntimeError):
pass
def _on_scene_duplicated(self):
pass
def _do_launch_scene(self, value):
launched = False
if self.launch_button.is_momentary:
self._scene.set_fire_button_state(value != 0)
launched = value != 0
elif value != 0:
self._scene.fire()
launched = True
if launched and self.song.select_on_launch:
self.song.view.selected_scene = self._scene
@listens(u'is_triggered')
def __on_is_triggered_changed(self):
assert liveobj_valid(self._scene)
self._update_launch_button()
@listens(u'color')
def __on_scene_color_changed(self):
assert liveobj_valid(self._scene)
self._update_launch_button()
def _color_value(self, color):
value = None
if self._color_palette:
value = self._color_palette.get(color, None)
if value is None and self._color_table:
value = find_nearest_color(self._color_table, color)
return value
def _update_launch_button(self):
if self.is_enabled():
value_to_send = self._no_scene_color
if liveobj_valid(self._scene):
value_to_send = self._scene_color
if self._scene.is_triggered:
value_to_send = self._triggered_color
else:
possible_color = self._color_value(self._scene.color)
if possible_color:
value_to_send = possible_color
self.launch_button.color = value_to_send
def _create_clip_slot(self):
return self.clip_slot_component_type(parent=self)
| StarcoderdataPython |
247391 | """
This is the Contacts Walker Process
caveats:
* email address is assumed to be case-insensitive in SalesForce
* some email addresses are duplicates within a single CSV file.
"""
import pprint
import os, sys
import textwrap
import time
import datetime
import re
import logging
import cStringIO
from pyax.connection import Connection
from pyax.exceptions import ApiFault
from vyperlogix.misc import _utils
from vyperlogix.daemon.daemon import Log
from vyperlogix.logging import standardLogging
from vyperlogix.hash import lists
from vyperlogix.misc import ObjectTypeName
from vyperlogix.misc import Args
from vyperlogix.misc import PrettyPrint
from vyperlogix.aima import utils
from vyperlogix import oodb
from vyperlogix.misc import threadpool
from vyperlogix.parsers.CSV import CSV
from vyperlogix.parsers.CSV import asCSV
import traceback
import Queue
from stat import *
from sfConstant import BAD_INFO_LIST
from crypto import *
from runWithAnalysis import *
from sfUtil_win32 import *
from vyperlogix.sf import update
csv_fname = ""
_isVerbose = False
_csvPath = ''
_logging = logging.WARNING
is_platform_not_windows = lambda _sys:(_sys.platform != 'win32')
bool_is_platform_not_windows = is_platform_not_windows(sys)
_isBeingDebugged = (os.environ.has_key('WINGDB_ACTIVE')) # When debugger is being used we do not use threads...
_proc_queue = Queue.Queue(750) if (_isBeingDebugged) else threadpool.ThreadQueue(750)
_isVerbose = False
def _sf_query(sfdc,soql):
try:
ioTimeAnalysis.ioBeginTime('SOQL')
ret = sfdc.query(soql)
ioTimeAnalysis.ioEndTime('SOQL')
sf_stats.count_query()
return ret
except ApiFault:
exc_info = sys.exc_info()
info_string = '\n'.join(traceback.format_exception(*exc_info))
logging.error('(%s) soql=%s, Reason: %s' % (_utils.funcName(),soql,info_string))
return None
def exception_callback(sections):
_msg = 'EXCEPTION Causing Abend.\n%s' % '\n'.join(sections)
print >>sys.stdout, _msg
print >>sys.stderr, _msg
logging.error('(%s) :: %s' % (_utils.funcName(),_msg))
sys.stdout.close()
sys.stderr.close()
sys.exit(0)
__getRealObjectsFromSOQL = True
def __getObjectsFromSOQL(sfdc,soql):
try:
ret = _sf_query(sfdc,soql)
logging.info('(%s) soql=%s' % (_utils.funcName(),soql))
if ret in BAD_INFO_LIST:
logging.warning("(%s) :: Could not find any Object(s) for SOQL of (%s)." % (_utils.funcName(),soql))
else:
logging.info("(%s) :: soql=%s." % (_utils.funcName(),soql))
objects = []
for k in ret.keys():
v = ret[k]
val = v if (__getRealObjectsFromSOQL) else lists.copyAsDict(v)
objects.append(val if (__getRealObjectsFromSOQL) else val.asDict())
return objects
except:
exc_info = sys.exc_info()
info_string = '\n'.join(traceback.format_exception(*exc_info))
logging.warning('(%s) :: %s' % (_utils.funcName(),info_string))
return None
def _getCRLinkByCrId(args):
try:
sfdc, id = args
soql = "Select c.Account__c, c.Description__c, c.Id, c.Name, c.Parent_CR__c from CR_Link__c c where c.Parent_CR__c = '%s'" % (id)
objs = __getObjectsFromSOQL(sfdc,soql)
if objs in BAD_INFO_LIST:
logging.warning("(%s) :: Could not find any CR_Link__c Object(s) for id of %s." % (_utils.funcName(),id))
return objs
except:
exc_info = sys.exc_info()
info_string = '\n'.join(traceback.format_exception(*exc_info))
logging.warning('(%s) :: %s' % (_utils.funcName(),info_string))
return None
def getCRLinkByCrId(args):
return _getCRLinkByCrId(args)
def _getCrByCaseNumber(args):
try:
sfdc, caseNumber = args
soql = "Select c.CaseNumber, c.CR_Number__c, c.Id from Case c where c.CaseNumber = '%s'" % (caseNumber)
objs = __getObjectsFromSOQL(sfdc,soql)
if objs in BAD_INFO_LIST:
logging.warning("(%s) :: Could not find any Case Object(s) for caseNumber of %s." % (_utils.funcName(),caseNumber))
return objs
except:
exc_info = sys.exc_info()
info_string = '\n'.join(traceback.format_exception(*exc_info))
logging.warning('(%s) :: %s' % (_utils.funcName(),info_string))
return None
def getCrByCaseNumber(args):
return _getCrByCaseNumber(args)
def main(args):
email_from_row = lambda row:row[_analysisColNum].strip().lower()
ioTimeAnalysis.ioBeginTime('%s' % (__name__))
try:
sfdc, csv = args
files = []
if (len(csv.rows) > 0):
for row in csv.rows:
caseNum = row[_analysisColNum]
if (len(caseNum) > 0) and (caseNum != 'MISSING'):
crs = getCrByCaseNumber((sfdc,caseNum))
if (crs is not None):
for cr in crs:
id = cr['Id']
links = getCRLinkByCrId((sfdc,id))
if (links is not None):
for link in links:
if (_isVerbose):
lists.prettyPrint(link,'\t',title='CR_Link__c for %s via %s' % (id,caseNum),fOut=sys.stdout)
if (_col2rename is not None) and (len(_col2rename) > 0):
if (link[_col2rename].find(_oldValue) > -1):
if (_isCommit):
print >>sys.stdout, 'Committed Change :: Rename CRLink with Id of "%s" from "%s" to "%s".' % (link['Id'],link[_col2rename],link[_col2rename].replace(_oldValue,_newValue))
link[_col2rename] = link[_col2rename].replace(_oldValue,_newValue)
update.updateSalesForceObject(link)
else:
print >>sys.stdout, 'UnCommitted Change :: Rename CRLink with Id of "%s" from "%s" to "%s".' % (link['Id'],link[_col2rename],link[_col2rename].replace(_oldValue,_newValue))
print >>sys.stdout, ''
pass
else:
if (_isVerbose):
print >>sys.stdout, 'No Change :: CRLink with Id of "%s" from "%s" to "%s".' % (link['Id'],link[_col2rename],link[_col2rename].replace(_oldValue,_newValue))
print >>sys.stdout, ''
pass
else:
print >>sys.stderr, 'WARNING: Missing data for Case Id of "%s".' % (id)
else:
print >>sys.stderr, 'WARNING: Missing data for caseNum of "%s".' % (caseNum)
pass
except:
exc_info = sys.exc_info()
info_string = '\n'.join(traceback.format_exception(*exc_info))
logging.error('(%s) :: %s' % (_utils.funcName(),info_string))
ioTimeAnalysis.ioEndTime('%s' % (__name__))
logging.info('(%s) :: %s' % (_utils.funcName(),ioTimeAnalysis.ioTimeAnalysisReport()))
if __name__ == "__main__":
def ppArgs():
pArgs = [(k,args[k]) for k in args.keys()]
pPretty = PrettyPrint.PrettyPrint('',pArgs,True,' ... ')
pPretty.pprint()
args = {'--help':'displays this help text.',
'--verbose':'output more stuff.',
'--csv=?':'name the path to the csv file (must be a simple csv file).',
'--xls=?':'name the path to the xls file.',
'--headers':'default is False however if this option is used the CSV headers are placed at the top of all output files.',
'--colname=?':'name the column in the source file to use, such as "email".',
'--old=?':'specify the old value or the value to rename.',
'--new=?':'specify the new value or the value the old value is to become.',
'--col2rename=?':'name the column in the source file to use for the rename function, such as "Name".',
'--commit':'use this option to commit changes to SalesForce.',
'--folder=?':'names the folder in which the logs and data will reside.',
'--logging=?':'[logging.INFO,logging.WARNING,logging.ERROR,logging.DEBUG]',
}
_argsObj = Args.Args(args)
if (_isVerbose):
print '_argsObj=(%s)' % str(_argsObj)
if (len(sys.argv) == 1):
ppArgs()
else:
_progName = _argsObj.programName
_isVerbose = False
try:
if _argsObj.booleans.has_key('isVerbose'):
_isVerbose = _argsObj.booleans['isVerbose']
except:
exc_info = sys.exc_info()
info_string = '\n'.join(traceback.format_exception(*exc_info))
print >>sys.stderr, '(%s) :: %s' % (_utils.funcName(),info_string)
_isVerbose = False
_isCommit = False
try:
if _argsObj.booleans.has_key('isCommit'):
_isCommit = _argsObj.booleans['isCommit']
except:
exc_info = sys.exc_info()
info_string = '\n'.join(traceback.format_exception(*exc_info))
print >>sys.stderr, '(%s) :: %s' % (_utils.funcName(),info_string)
_isCommit = False
_isHeaders = False
try:
if _argsObj.booleans.has_key('isHeaders'):
_isHeaders = _argsObj.booleans['isHeaders']
except:
exc_info = sys.exc_info()
info_string = '\n'.join(traceback.format_exception(*exc_info))
print >>sys.stderr, '(%s) :: %s' % (_utils.funcName(),info_string)
_isHeaders = False
__folder = os.path.dirname(sys.argv[0])
try:
__folder = _argsObj.arguments['folder'] if _argsObj.arguments.has_key('folder') else ''
if (len(__folder) == 0) or (not os.path.exists(__folder)):
if (os.environ.has_key('cwd')):
__folder = os.environ['cwd']
except:
pass
_folderPath = __folder
_colname = ''
try:
_colname = _argsObj.arguments['colname'] if _argsObj.arguments.has_key('colname') else ''
except:
pass
_oldValue = ''
try:
_oldValue = _argsObj.arguments['old'] if _argsObj.arguments.has_key('old') else ''
except:
pass
_newValue = ''
try:
_newValue = _argsObj.arguments['new'] if _argsObj.arguments.has_key('new') else ''
except:
pass
_col2rename = ''
try:
_col2rename = _argsObj.arguments['col2rename'] if _argsObj.arguments.has_key('col2rename') else ''
except:
pass
_csvPath = ''
_analysisData = []
_analysisColNum = -1
try:
if _argsObj.arguments.has_key('csv'):
f = _argsObj.arguments['csv']
if (os.path.exists(f)):
if (os.path.isdir(f)):
_csvPath = f
else:
try:
csv = CSV(f)
csv_header_toks = [(t.split(),t) for t in csv.header]
_target_toks = _colname.split()
isFound = None
for tt in _target_toks:
for t in csv_header_toks:
if (tt in t[0]):
isFound = t[-1]
break
if (isFound):
try:
_analysisData = csv.column(isFound)
_analysisColNum = csv.header.index(isFound)
_csvPath = f
except ValueError:
print >>sys.stderr, 'Cannot use the --analysis argument because "%s" does not appear to contain any data in the column(s) in which the tokens %s appears in row 1 although this file appears to be a valid Excel file type.' % (f,_target_toks)
else:
print >>sys.stderr, 'Cannot use the --analysis argument because "%s" does not appear to contain any columns in which the tokens %s appears in row 1 although this file appears to be a valid Excel file type.' % (f,_target_toks)
except ValueError:
print >>sys.stderr, 'Cannot use the --analysis argument because "%s" does not appear to be a valid Excel file type.' % (f)
else:
print >>sys.stderr, 'Cannot use the --analysis argument because "%s" does not exist.' % (f)
except:
exc_info = sys.exc_info()
info_string = '\n'.join(traceback.format_exception(*exc_info))
print >>sys.stderr, '(%s) :: %s' % (_utils.funcName(),info_string)
_isAnalysis = False
try:
if _argsObj.arguments.has_key('xls'):
f = _argsObj.arguments['xls']
if (os.path.exists(f)):
if (os.path.isdir(f)):
_csvPath = f
else:
try:
isFound = None
import xlrd
book = xlrd.open_workbook(f)
l = book.name_obj_list()
pass
if (isFound):
try:
_analysisData = csv.column(isFound)
_csvPath = f
except ValueError:
print >>sys.stderr, 'Cannot use the --analysis argument because "%s" does not appear to contain any data in the column(s) in which the tokens %s appears in row 1 although this file appears to be a valid Excel file type.' % (f,_target_toks)
else:
print >>sys.stderr, 'Cannot use the --analysis argument because "%s" does not appear to contain any columns in which the tokens %s appears in row 1 although this file appears to be a valid Excel file type.' % (f,_target_toks)
except ValueError:
print >>sys.stderr, 'Cannot use the --analysis argument because "%s" does not appear to be a valid Excel file type.' % (f)
else:
print >>sys.stderr, 'Cannot use the --analysis argument because "%s" does not exist.' % (f)
except:
exc_info = sys.exc_info()
info_string = '\n'.join(traceback.format_exception(*exc_info))
print >>sys.stderr, '(%s) :: %s' % (_utils.funcName(),info_string)
_isAnalysis = False
try:
_logging = eval(_argsObj.arguments['logging']) if _argsObj.arguments.has_key('logging') else False
except:
_logging = logging.WARNING
d_passwords = lists.HashedLists2()
s = ''.join([chr(ch) for ch in [126,254,192,145,170,209,4,52,159,254,122,198,76,251,246,151]])
pp = ''.join([ch for ch in decryptData(s).strip() if ord(ch) > 0])
d_passwords['<EMAIL>'] = [pp,'NHZ3awCqrcoLt1MVG4n3on9z']
s = ''.join([chr(ch) for ch in [39,200,142,151,251,164,142,15,45,216,225,201,121,177,89,252]])
pp = ''.join([ch for ch in decryptData(s).strip() if ord(ch) > 0])
d_passwords['<EMAIL>'] = [pp]
print 'sys.version=[%s]' % sys.version
v = _utils.getFloatVersionNumber()
if (v >= 2.51):
#print 'sys.path=[%s]' % '\n'.join(sys.path)
if (not _isBeingDebugged):
from vyperlogix.handlers.ExceptionHandler import *
excp = ExceptionHandler()
excp.callback = exception_callback
from vyperlogix.misc._psyco import *
importPsycoIfPossible(func=main,isVerbose=True)
_username = '<EMAIL>'
#ep = encryptData('...')
#print 'ep=[%s]' % asReadableData(ep)
print 'sys.argv=%s' % sys.argv
_cwd = _folderPath
print '_cwd=%s' % _cwd
if (len(_cwd) > 0) and (os.path.exists(_cwd)):
name = _utils.getProgramName()
if (os.path.exists(_folderPath)):
_log_path = _utils.safely_mkdir(fpath=_folderPath)
else:
_log_path = _utils.safely_mkdir(fpath=_cwd)
logFileName = os.sep.join([_log_path,'%s_%s.log' % (_utils.timeStamp().replace(':','-'),name)])
standardLogging.standardLogging(logFileName,_level=_logging,isVerbose=_isVerbose)
logging.info('Logging to "%s" using level of "%s:.' % (logFileName,standardLogging.explainLogging(_logging)))
_stdOut = open(os.sep.join([_log_path,'stdout.txt']),'w')
_stdErr = open(os.sep.join([_log_path,'stderr.txt']),'w')
sys.stdout = Log(_stdOut)
sys.stderr = Log(_stdErr)
logging.warning('stdout to "%s".' % (_stdOut.name))
logging.warning('stderr to "%s".' % (_stdErr.name))
print >>sys.stdout, 'Command Line Arguments=%s' % (_argsObj)
if (d_passwords.has_key(_username)):
_password = d_passwords[_username]
else:
_password = []
if (len(_username) > 0) and (len(_password) > 0):
logging.info('username is "%s", password is known and valid.' % (_username))
try:
sfdc = Connection.connect(_username, _password[0])
logging.info('sfdc=%s' % str(sfdc))
logging.info('sfdc.endpoint=%s' % str(sfdc.endpoint))
ioTimeAnalysis.initIOTime('%s' % (__name__))
ioTimeAnalysis.initIOTime('SOQL')
if (_isBeingDebugged):
runWithAnalysis(main,[sfdc,csv])
else:
import cProfile
cProfile.run('runWithAnalysis(main,[sfdc,csv])', os.sep.join([_log_path,'profiler.txt']))
except NameError:
exc_info = sys.exc_info()
info_string = '\n'.join(traceback.format_exception(*exc_info))
logging.error(info_string)
except AttributeError:
exc_info = sys.exc_info()
info_string = '\n'.join(traceback.format_exception(*exc_info))
logging.error(info_string)
except ApiFault:
exc_info = sys.exc_info()
info_string = '\n'.join(traceback.format_exception(*exc_info))
logging.error(info_string)
else:
logging.error('Cannot figure-out what username (%s) and password (%s) to use so cannot continue. Sorry !' % (_username,_password))
else:
print >> sys.stderr, 'ERROR: Missing the cwd parm which is the first parm on the command line.'
else:
logging.error('You are using the wrong version of Python, you should be using 2.51 or later but you seem to be using "%s".' % sys.version)
_msg = 'Done !'
logging.warning(_msg)
print >> sys.stdout, _msg
sys.stdout.close()
sys.stderr.close()
sys.exit(0)
| StarcoderdataPython |
5044947 | class Solution(object):
def getSum(self, a, b):
"""
:type a: int
:type b: int
:rtype: int
"""
# 32 bits integer max
MAX = 0x7FFFFFFF
# 32 bits interger min
MIN = 0x80000000
# mask to get last 32 bits
mask = 0xFFFFFFFF
while b != 0:
# ^ get different bits and & gets double 1s, << moves carry
a, b = (a ^ b) & mask, ((a & b) << 1) & mask
# if a is negative, get a's 32 bits complement positive first
# then get 32-bit positive's Python complement negative
return a if a <= MAX else ~(a ^ mask)
# Python has more than 32 bits for integers. You can try to run "print 2
# ** 31"and Python would shows the exact number correctly, while other
# languages like Java would not. Java only recognizes - 2 ** 31 to 2 ** 31
# - 1.
# How does integers presented in Python differ from integers in 32 - bit e.g. Java?
# From what I heard, Python has 64 bits. (Please let me know if I am wrong.)
# So 1 in Python would look like 0x0000000000000001, but it looks like 0x00000001 in 32 - bit format.
# -1 in Python would look like 0xFFFFFFFFFFFFFFFF, but it looks like 0xFFFFFFFF in 32 - bit format.
# It seems that the input given by LC is in 32 - bit format. Since Python
# would treat it as positive with 1 on the 32 position, we have to use
# mask to treat it as negative.
| StarcoderdataPython |
37948 |
import matplotlib.pyplot as plt
from sdaudio.callables import Circular
from sdaudio.callables import Constant
from sdaudio import draw
from sdaudio import wavio
from sdaudio.wt_oscillators import Choruses
def main():
#-------------------------------------------------------------------------
# sawtooth demo
print("Generating 60 Hz sawtooth, no chorus")
sr = 8000
dur = 7.0
freqs = draw.line(sr, dur, 60, 60)
x = draw.sawtooth(sr, dur, Circular(freqs), n = 5)
plt.figure()
plt.plot(x)
plt.xlim([0, 3000])
plt.grid(True)
plt.title('Sawtooth, n = 5, no chorus')
fout = 'saw-no-chorus.wav'
print("Writing: %s" % fout)
wavio.write(fout, 0.666 * x, sr)
#-------------------------------------------------------------------------
# sawtooth oscillator with chorus
print("Generating 60 Hz sawtooth, with chorus")
table = draw.sawtooth(sr, 1.0, Constant(1.0))
chorus = [0.99, 1.0, 1.01]
chorus = [0.97, 1.0, 1.03]
chorus = [0.991234134, 1.012983475290375]
gen = Choruses(sr, table, chorus)
x = gen.generate(dur, Circular(freqs))
plt.figure()
plt.plot(x)
plt.xlim([0, 3000])
plt.grid(True)
plt.title('Sawtooth, n = 5, with chorus')
fout = 'saw-with-chorus.wav'
print("Writing: %s" % fout)
wavio.write(fout, 0.666 * x, sr)
#-------------------------------------------------------------------------
# freq ramp
print("Generating sawtooth ramp, no chorus")
freqs = draw.line(sr, dur, 40, 200)
x = draw.sawtooth(sr, dur, Circular(freqs))
fout = 'saw-ramp-no-chorus.wav'
print("Writing: %s" % fout)
wavio.write(fout, 0.666 * x, sr)
print("Generating sawtooth ramp, with chorus")
x = gen.generate(dur, Circular(freqs))
fout = 'saw-ramp-with-chorus.wav'
print("Writing: %s" % fout)
wavio.write(fout, 0.666 * x, sr)
plt.show()
if __name__ == "__main__":
main() | StarcoderdataPython |
6471014 | <filename>main/migrations/0021_auto_20161208_1214.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-12-08 11:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0020_tournament_profiles'),
]
operations = [
migrations.CreateModel(
name='TournamentTeam',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('profiles', models.ManyToManyField(to='main.Profile', verbose_name='profiler')),
],
options={
'verbose_name': 'hold',
'verbose_name_plural': 'hold',
},
),
migrations.RemoveField(
model_name='tournamentprofile',
name='profile',
),
migrations.RemoveField(
model_name='tournamentprofile',
name='tournament',
),
migrations.RemoveField(
model_name='tournament',
name='profiles',
),
migrations.DeleteModel(
name='TournamentProfile',
),
migrations.AddField(
model_name='tournament',
name='teams',
field=models.ManyToManyField(to='main.TournamentTeam'),
),
]
| StarcoderdataPython |
5135488 | <reponame>justinforbes/munin
#!/usr/bin/python
import json
import sys
import colorama
from colorama import Fore,Style
with open(sys.argv[1], "r") as f:
data = json.load(f)
print ("----------------------------------------------------")
print ( "Total Hash Count : %s" % len(data))
print ( "Showing Suspicious and Malicious Entries Only")
print ("----------------------------------------------------")
for x in data:
if not any(s in x['rating'] for s in ('unknown', 'clean')):
if 'malicious' in x['rating']:
print (Fore.RED + "%s has been detected by %s AVs and rated as %s, Possible Filenames include %s." % (x['hash'], x['result'], x['rating'], x['filenames']))
print(Style.RESET_ALL, end='')
else:
print ("%s has been detected by %s AVs and rated as %s." % (x['hash'], x['result'], x['rating']))
| StarcoderdataPython |
3295558 | <reponame>JesusManuelPuentesG/ML_Python_Metricas
…
# Librería necesaria para utilizar la matriz de confusión
from sklearn.metrics import confusion_matrix
…
matrix = confusion_matrix(y_test, yhat_classes)
print()
print('Matriz de Confusión: ')
print()
print(matrix)
…
| StarcoderdataPython |
11205619 | #!/usr/bin/env python
import typing
class AttributeValue(typing.TypedDict, total=False):
"""
AttributeValue https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_streams_AttributeValue.html
Attributes:
----------
B: str
BS: typing.List[str]
BOOL: bool
L: typing.List
M: typing.Dict
N: str
NS: typing.List[str]
NULL: bool
S: str
SS: typing.List[str]
"""
B: str
BS: typing.List[str]
BOOL: bool
L: typing.List
M: typing.Dict
N: str
NS: typing.List[str]
NULL: bool
S: str
SS: typing.List[str]
class StreamRecord(typing.TypedDict, total=False):
"""
StreamRecord https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_streams_StreamRecord.html
Attributes:
----------
ApproximateCreationDateTime: typing.Optional[int]
Keys: typing.Dict[str, :py:class:`AttributeValue`]
NewImage: typing.Dict[str, :py:class:`AttributeValue`]
OldImage: typing.Optional[typing.Dict[str, AttributeValue]]
SequenceNumber: str
SizeBytes: int
StreamViewType: typing.Literal['KEYS_ONLY', 'NEW_IMAGE', 'OLD_IMAGE', 'NEW_AND_OLD_IMAGES']
"""
ApproximateCreationDateTime: typing.Optional[int]
Keys: typing.Dict[str, AttributeValue]
NewImage: typing.Dict[str, AttributeValue]
OldImage: typing.Optional[typing.Dict[str, AttributeValue]]
SequenceNumber: str
SizeBytes: int
StreamViewType: typing.Literal['KEYS_ONLY', 'NEW_IMAGE', 'OLD_IMAGE', 'NEW_AND_OLD_IMAGES']
class DynamodbRecord(typing.TypedDict, total=False):
"""
DynamodbRecord https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_streams_Record.html
Attributes:
----------
awsRegion: str
dynamodb: :py:class:`StreamRecord`
eventID: str
eventName: typing.Literal['INSERT', 'MODIFY', 'REMOVE']
eventSource: str
eventSourceARN: str
eventVersion: str
userIdentity: typing.Optional[typing.Any]
"""
awsRegion: str
dynamodb: StreamRecord
eventID: str
eventName: typing.Literal['INSERT', 'MODIFY', 'REMOVE']
eventSource: str
eventSourceARN: str
eventVersion: str
userIdentity: typing.Optional[typing.Any]
class DynamoDBStreamEvent(typing.TypedDict):
"""
DynamoDBStreamEvent https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html
Attributes:
----------
Records: typing.List[:py:class:`DynamodbRecord`]
"""
Records: typing.List[DynamodbRecord]
| StarcoderdataPython |
11288933 | from targets.pipes.base import (
InputPipeProcessWrapper,
IOPipeline,
OutputPipeProcessWrapper,
)
class Bzip2Pipeline(IOPipeline):
input = "bytes"
output = "bytes"
def pipe_reader(self, input_pipe):
return InputPipeProcessWrapper(["bzcat"], input_pipe)
def pipe_writer(self, output_pipe):
return OutputPipeProcessWrapper(["bzip2"], output_pipe)
| StarcoderdataPython |
358639 | <filename>leetcode/116_填充每个节点的下一个右侧节点指针.py
# -*- coding:utf-8 -*-
# author: hpf
# create time: 2020/12/15 11:07
# file: 116_填充每个节点的下一个右侧节点指针.py
# IDE: PyCharm
# 题目描述:
'''
给定一个 完美二叉树 ,其所有叶子节点都在同一层,每个父节点都有两个子节点。二叉树定义如下:
struct Node {
int val;
Node *left;
Node *right;
Node *next;
}
填充它的每个 next 指针,让这个指针指向其下一个右侧节点。如果找不到下一个右侧节点,则将 next 指针设置为 NULL。
初始状态下,所有 next 指针都被设置为 NULL。
进阶:
你只能使用常量级额外空间。
使用递归解题也符合要求,本题中递归程序占用的栈空间不算做额外的空间复杂度。
输入:root = [1,2,3,4,5,6,7]
输出:[1,#,2,3,#,4,5,6,7,#]
解释:给定二叉树如图 A 所示,你的函数应该填充它的每个 next 指针,以指向其下一个右侧节点,如图 B 所示。序列化的输出按层序遍历排列,同一层节点由 next 指针连接,'#' 标志着每一层的结束。
提示:
树中节点的数量少于 4096
-1000 <= node.val <= 1000
'''
# Definition for a Node.
class Node:
def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):
self.val = val
self.left = left
self.right = right
self.next = next
# 解法一: 递归
class Solution1:
def connect(self, root: 'Node') -> 'Node':
def dfs(node1, node2):
if not node1 or not node2:
return
# 前序遍历,设置连接
node1.next = node2
# 连接同父节点
dfs(node1.left, node1.right)
dfs(node2.left, node2.right)
# 连接跨父节点
dfs(node1.right, node2.left)
if not root:
return root
dfs(root.left, root.right)
return root
# 解法二: 递归
class Solution2:
def connect(self, root: 'Node') -> 'Node':
def dfs(root):
if not root:
return root
left = root.left
right = root.right
while left:
left.next = right
left = left.right
right = right.left
dfs(root.left)
dfs(root.right)
dfs(root)
return root | StarcoderdataPython |
211659 | <reponame>mrslow/cbr-client
import pytest
from cbr_client import Task
from conftest import base_url, correct_headers, tasks_json
@pytest.mark.asyncio
async def test_tasks(httpx_mock, client):
httpx_mock.add_response(status_code=200,
json=tasks_json,
headers={'Content-Type': 'application/json'},
method='GET',
url=f'{base_url}/back/rapi2/tasks',
match_headers=correct_headers)
tasks = await client.get_tasks()
assert isinstance(tasks, list)
assert isinstance(tasks[0], Task)
| StarcoderdataPython |
8051694 | <reponame>DominicOram/ophyd<filename>ophyd/log.py<gh_stars>10-100
# The LogFormatter is adapted light from tornado, which is licensed under
# Apache 2.0. See other_licenses/ in the repository directory.
import logging
import sys
try:
import colorama
colorama.init()
except ImportError:
colorama = None
try:
import curses
except ImportError:
curses = None
__all__ = (
"config_ophyd_logging",
"get_handler",
"logger",
"control_layer_logger",
"set_handler",
)
def _stderr_supports_color():
try:
if hasattr(sys.stderr, "isatty") and sys.stderr.isatty():
if curses:
curses.setupterm()
if curses.tigetnum("colors") > 0:
return True
elif colorama:
if sys.stderr is getattr(
colorama.initialise, "wrapped_stderr", object()
):
return True
except Exception:
# Very broad exception handling because it's always better to
# fall back to non-colored logs than to break at startup.
pass
return False
class LogFormatter(logging.Formatter):
"""Log formatter used in Tornado, modified for Python3-only ophyd.
Key features of this formatter are:
* Color support when logging to a terminal that supports it.
* Timestamps on every log line.
* Robust against str/bytes encoding problems.
This formatter is enabled automatically by
`tornado.options.parse_command_line` or `tornado.options.parse_config_file`
(unless ``--logging=none`` is used).
Color support on Windows versions that do not support ANSI color codes is
enabled by use of the colorama__ library. Applications that wish to use
this must first initialize colorama with a call to ``colorama.init``.
See the colorama documentation for details.
__ https://pypi.python.org/pypi/colorama
.. versionchanged:: 4.5
Added support for ``colorama``. Changed the constructor
signature to be compatible with `logging.config.dictConfig`.
"""
DEFAULT_FORMAT = "%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s"
DEFAULT_DATE_FORMAT = "%y%m%d %H:%M:%S"
DEFAULT_COLORS = {
logging.DEBUG: 4, # Blue
logging.INFO: 2, # Green
logging.WARNING: 3, # Yellow
logging.ERROR: 1, # Red
}
def __init__(
self,
fmt=DEFAULT_FORMAT,
datefmt=DEFAULT_DATE_FORMAT,
style="%",
color=True,
colors=DEFAULT_COLORS,
):
r"""
:arg bool color: Enables color support.
:arg str fmt: Log message format.
It will be applied to the attributes dict of log records. The
text between ``%(color)s`` and ``%(end_color)s`` will be colored
depending on the level if color support is on.
:arg dict colors: color mappings from logging level to terminal color
code
:arg str datefmt: Datetime format.
Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``.
.. versionchanged:: 3.2
Added ``fmt`` and ``datefmt`` arguments.
"""
super().__init__(datefmt=datefmt)
self._fmt = fmt
self._colors = {}
if color and _stderr_supports_color():
if curses is not None:
# The curses module has some str/bytes confusion in
# python3. Until version 3.2.3, most methods return
# bytes, but only accept strings. In addition, we want to
# output these strings with the logging module, which
# works with unicode strings. The explicit calls to
# unicode() below are harmless in python2 but will do the
# right conversion in python 3.
fg_color = curses.tigetstr("setaf") or curses.tigetstr("setf") or ""
for levelno, code in colors.items():
self._colors[levelno] = str(curses.tparm(fg_color, code), "ascii")
self._normal = str(curses.tigetstr("sgr0"), "ascii")
else:
# If curses is not present (currently we'll only get here for
# colorama on windows), assume hard-coded ANSI color codes.
for levelno, code in colors.items():
self._colors[levelno] = "\033[2;3%dm" % code
self._normal = "\033[0m"
else:
self._normal = ""
def format(self, record):
message = []
if hasattr(record, "ophyd_object_name"):
message.append(f"[{record.ophyd_object_name}]")
elif hasattr(record, "status"):
message.append(f"[{record.status}]")
else:
...
message.append(record.getMessage())
record.message = " ".join(message)
record.asctime = self.formatTime(record, self.datefmt)
try:
record.color = self._colors[record.levelno]
record.end_color = self._normal
except KeyError:
record.color = ""
record.end_color = ""
formatted = self._fmt % record.__dict__
if record.exc_info and not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
formatted = "{}\n{}".format(formatted.rstrip(), record.exc_text)
return formatted.replace("\n", "\n ")
plain_log_format = (
"[%(levelname)1.1s %(asctime)s.%(msecs)03d %(module)s:%(lineno)d] %(message)s"
)
color_log_format = (
"%(color)s[%(levelname)1.1s %(asctime)s.%(msecs)03d "
"%(module)s:%(lineno)d]%(end_color)s %(message)s"
)
def validate_level(level) -> int:
"""
Return an int for level comparison
"""
if isinstance(level, int):
levelno = level
elif isinstance(level, str):
levelno = logging.getLevelName(level)
if isinstance(levelno, int):
return levelno
else:
raise ValueError(
"Your level is illegal, please use "
"'CRITICAL', 'FATAL', 'ERROR', 'WARNING', 'INFO', or 'DEBUG'."
)
logger = logging.getLogger("ophyd")
control_layer_logger = logging.getLogger("ophyd.control_layer")
current_handler = None # overwritten below
def config_ophyd_logging(
file=sys.stdout, datefmt="%H:%M:%S", color=True, level="WARNING"
):
"""
Set a new handler on the ``logging.getLogger('ophyd')`` logger.
If this is called more than once, the handler from the previous invocation
is removed (if still present) and replaced.
Parameters
----------
file : object with ``write`` method or filename string
Default is ``sys.stdout``.
datefmt : string
Date format. Default is ``'%H:%M:%S'``.
color : boolean
Use ANSI color codes. True by default.
level : str or int
Python logging level, given as string or corresponding integer.
Default is 'WARNING'.
Returns
-------
handler : logging.Handler
The handler, which has already been added to the 'ophyd' logger.
Examples
--------
Log to a file.
>>> config_ophyd_logging(file='/tmp/what_is_happening.txt')
Include the date along with the time. (The log messages will always include
microseconds, which are configured separately, not as part of 'datefmt'.)
>>> config_ophyd_logging(datefmt="%Y-%m-%d %H:%M:%S")
Turn off ANSI color codes.
>>> config_ophyd_logging(color=False)
Increase verbosity: show level DEBUG or higher.
>>> config_ophyd_logging(level='DEBUG')
"""
global current_handler
if isinstance(file, str):
handler = logging.FileHandler(file)
else:
handler = logging.StreamHandler(file)
levelno = validate_level(level)
handler.setLevel(levelno)
if color:
log_format = color_log_format
else:
log_format = plain_log_format
handler.setFormatter(LogFormatter(log_format, datefmt=datefmt))
if current_handler in logger.handlers:
logger.removeHandler(current_handler)
logger.addHandler(handler)
current_handler = handler
if logger.getEffectiveLevel() > levelno:
logger.setLevel(levelno)
return handler
set_handler = config_ophyd_logging # for back-compat
def get_handler():
"""
Return the handler configured by the most recent call to :func:`config_ophyd_logging`.
If :func:`config_ophyd_logging` has not yet been called, this returns ``None``.
"""
return current_handler
| StarcoderdataPython |
8001622 | <reponame>LeonidPavlov/home_budget
from genericpath import isdir
import os
from src.storage.storage import Storage
storage = Storage('test_db_dir', 'test_db.db')
def test_create_directory_when_directory_absent() -> None:
assert(storage.create_directory() == True)
def test_create_directory_when_directory_exist() -> None:
assert(storage.create_directory() == False)
def test_create_db_file_when_it_not_exist() -> None:
assert(storage.create_database_file() == True)
def test_create_db_file_when_it_exist() -> None:
assert(storage.create_database_file() == True)
os.remove('test_db_dir/test_db.db')
os.rmdir('test_db_dir')
| StarcoderdataPython |
5099169 | # Stand-alone example test file:
# - define equivalent old- and new-style flows
# - parameterize a test to run each flow and verify its data artifacts
from metaflow import FlowSpec, Parameter
import metaflow.api as ma
from metaflow.api import foreach, join, step
from metaflow.tests.utils import parametrize, run
class OldSumSquares(FlowSpec):
num = Parameter("num", required=True, type=int, default=4)
@step
def start(self):
self.nums = list(range(1, self.num + 1))
self.next(self.square, foreach='nums')
@step
def square(self):
self.num2 = self.input**2
self.next(self.sum)
@step
def sum(self, inputs):
self.sum2 = sum(input.num2 for input in inputs)
self.next(self.end)
@step
def end(self):
print("Sum of squares up to %d: %d" % (int(self.num), int(self.sum2)))
class NewSumSquares(ma.FlowSpec):
num = Parameter("num", required=True, type=int, default=4)
@step
def start(self):
self.nums = list(range(1, self.num + 1))
@foreach('nums')
def square(self, num):
self.num2 = num**2
@join
def sum(self, inputs):
self.sum2 = sum(input.num2 for input in inputs)
@step
def end(self):
print("Sum of squares up to %d: %d" % (int(self.num), int(self.sum2)))
@parametrize('flow', [ OldSumSquares, NewSumSquares, ])
def test_simple_foreach(flow):
data = run(flow)
assert data == { 'num': 4, 'sum2': 30, }
| StarcoderdataPython |
1613994 | import time
import json
import os
import datetime
import dateutil.parser
from dateutil.tz import tzutc
import pytest
from waiting import wait
from replicate.heartbeat import Heartbeat
def test_heartbeat_running(tmpdir):
tmpdir = str(tmpdir)
path = "foo/heartbeat.json"
heartbeat = Heartbeat(
"experiment-id-foo",
"file://" + tmpdir,
path,
refresh_interval=datetime.timedelta(seconds=1),
)
assert not heartbeat.is_alive()
heartbeat.start()
assert heartbeat.is_alive()
heartbeat.kill()
time.sleep(0.1)
assert not heartbeat.is_alive()
heartbeat.ensure_running()
assert heartbeat.is_alive()
heartbeat.kill()
@pytest.mark.skip(reason="fix blocked on #436")
def test_heartbeat_write(tmpdir):
tmpdir = str(tmpdir)
t1 = datetime.datetime.utcnow().replace(tzinfo=tzutc())
path = "foo/heartbeat.json"
heartbeat = Heartbeat(
"experiment-id-foo",
"file://" + tmpdir,
path,
refresh_interval=datetime.timedelta(seconds=0.1),
)
heartbeat.start()
heartbeat_path = os.path.join(tmpdir, "foo", "heartbeat.json")
wait(lambda: os.path.exists(heartbeat_path), timeout_seconds=1, sleep_seconds=0.01)
# sleep a little extra in case the file is created but not yet written
time.sleep(0.01)
with open(heartbeat_path) as f:
obj = json.loads(f.read())
last_heartbeat = dateutil.parser.parse(obj["last_heartbeat"])
t2 = datetime.datetime.utcnow().replace(tzinfo=tzutc())
assert t1 < last_heartbeat < t2
time.sleep(0.2)
with open(heartbeat_path) as f:
obj = json.loads(f.read())
new_last_heartbeat = dateutil.parser.parse(obj["last_heartbeat"])
assert t1 < last_heartbeat < t2 < new_last_heartbeat
heartbeat.kill()
| StarcoderdataPython |
11231755 | <filename>tests/test_opttools.py
import io
import os
import sys
import math
import unittest
import unittest.mock
from libcli import default, command, error, run
import libcli.opttools as opttools
class TestException(Exception):
pass
class TestException32(Exception):
pass
class TestExceptionUnexpected(Exception):
pass
class TestCommandHandler(unittest.TestCase):
def setUp(self):
self.mock = unittest.mock.MagicMock()
def test_commandhandler_construct(self):
def func(*args, aflag=None, bflag=None, cflag=None):
self.mock(*args, aflag=aflag, bflag=bflag, cflag=cflag)
opttools.CommandHandler(func)(['test', '--aflag', '--bflag', '--cflag'])
self.mock.assert_called_once_with(aflag='None', bflag='None', cflag='None')
def test_commandhandler_construct_hint(self):
def func(*args, aflag=None, bflag=None, cflag=None):
self.mock(*args, aflag=aflag, bflag=bflag, cflag=cflag)
opttools.CommandHandler(func, aflag='_a', bflag='_b', cflag='_c::=-')\
(['test', '-a', '-b', '-c'])
self.mock.assert_called_once_with(aflag='', bflag='', cflag='-')
def test_commandhandler_construct_guess(self):
def func(*, i=0, s='', b=False, l=[]):
self.mock(i=i, s=s, b=b, l=l)
opttools.CommandHandler(func)(['test'])
self.mock.assert_called_once_with(i=0, s='', b=False, l=[])
def test_commandhandler_construct_guess_parse(self):
def func(*, i=0, s='', b=False, l=[]):
self.mock(i=i, s=s, b=b, l=l)
opttools.CommandHandler(func)\
(['test', '--i', '16', '--s', 's', '--b', 'N', '--l', 'x,y,z'])
self.mock.assert_called_once_with(i=16, s='s', b=False, l=['x', 'y', 'z'])
def test_commandhandler_construct_guess_parse_dict(self):
def func(*, di={}):
self.mock(di)
opttools.CommandHandler(func)(['test', '--d', 'a=1,b=2,c'])
self.mock.assert_called_once_with([('a', '1'), ('b', '2'), ('c', None)])
def test_commandhandler_construct_mono_positional_args(self):
def func(input):
self.mock(input)
ch = opttools.CommandHandler(func, input='i::str=-')
ch(['test', '--input=foobar'])
self.mock.assert_called_once_with('foobar')
with self.assertRaises(opttools.OptionError):
ch(['test'])
# DEPRECATED since 0.3
#def test_commandhandler_construct_many_positional_args(self):
#with self.assertRaises(opttools.StructureError):
#def func(a, b):
#pass # pragma no cover
#opttools.CommandHandler(func)(['test', 'a', 'b'])
def test_commandhandler_construct_mix_positional_and_variable_args(self):
with self.assertRaises(opttools.StructureError):
def func(a, *b):
pass # pragma no cover
opttools.CommandHandler(func)(['test'])
def test_commandhandler_construct_required_option(self):
def func(a, *, b):
self.mock(a, b)
opttools.CommandHandler(func, a='_a:', b='_b:str')\
(['test', '-a', 'vala', '-bvalb'])
def test_commandhandler_construct_required_option_with_default(self):
with self.assertRaises(opttools.StructureError):
def func(a, *, b):
pass # pragma no cover
opttools.CommandHandler(func, a='_a:=a', b='_b:str')\
(['test', '-a', 'vala', '-bvalb'])
def test_commandhandler_construct_hint_duplicate(self):
with self.assertRaises(opttools.StructureError):
def func(*, a, b):
pass # pragma no cover
opttools.CommandHandler(func, a='d', b='d')(['test', 'a', 'b'])
def test_commandhandler_construct_hint_optional_without_default(self):
with self.assertRaises(opttools.StructureError):
def func(*, a, b):
pass # pragma no cover
opttools.CommandHandler(func, a='::', b='::=')(['test', 'a', 'b'])
def test_commandhandler_parse_integers(self):
def func(*, h, d, o, b):
self.mock(h=h, d=d, o=o, b=b)
opttools.CommandHandler(func, h='h:hex', d='d:dec', o='o:oct', b='b:bin')\
(['test', '-h', '10', '-d', '10', '-o', '10', '-b', '10'])
self.mock.assert_called_once_with(h=16, d=10, o=8, b=2)
def test_commandhandler_parse_integers_smart(self):
def func(*, h, d, o, b):
self.mock(h=h, d=d, o=o, b=b)
opttools.CommandHandler(func, h='h:int', d='d:int', o='o:int', b='b:int')\
(['test', '-h', '0X10', '-d', '012', '-o', '0O10', '-b', '0B10'])
self.mock.assert_called_once_with(h=16, d=10, o=8, b=2)
def test_commandhandler_parse_invalid_option(self):
with self.assertRaises(opttools.OptionError):
def func(*, s):
pass # pragma no cover
opttools.CommandHandler(func, s=':str')(['test', '-t'])
def test_commandhandler_parse_missing_option(self):
with self.assertRaises(opttools.OptionError):
def func(*, s):
pass # pragma no cover
opttools.CommandHandler(func, s=':str')(['test'])
# DEPRECATED since 0.3
#def test_commandhandler_parse_duplicated_option(self):
#with self.assertRaises(opttools.OptionError):
#def func(s, *args):
#pass # pragma no cover
#opttools.CommandHandler(func, s='s:str')\
#(['test', '-s', 'once', 'twice'])
class TestCommandHandlerDebug(TestCommandHandler):
def setUp(self):
super().setUp()
opttools.DEBUG = True
self.stderr = io.StringIO()
self._stderr = sys.stderr
sys.stderr = self.stderr
def tearDown(self):
super().tearDown()
opttools.DEBUG = False
sys.stderr = self._stderr
class TestOptionHandler(unittest.TestCase):
def setUp(self):
self.opthdr = opttools.OptionHandler()
self.opthdr.error(TestException)
self.opthdr.error(TestException32, errno=32)
self.mock = unittest.mock.MagicMock()
def test_optionhandler_run(self):
@self.opthdr.command
@self.opthdr.default
def func(*args):
self.mock(*args)
self.opthdr.run(['test', 'arg0', 'arg1', 'arg2'])
self.mock.assert_called_once_with('arg0', 'arg1', 'arg2')
def test_optionhandler_run_sys_argv(self):
@self.opthdr.command
@self.opthdr.default
def func(*args):
self.mock(*args)
with unittest.mock.patch('sys.argv', ['test', 'arg0', 'arg1', 'arg2']):
self.opthdr.run()
self.mock.assert_called_once_with('arg0', 'arg1', 'arg2')
def test_optionhandler_run_withoud_stack_frame(self):
with unittest.mock.patch('inspect.currentframe', lambda: None):
@self.opthdr.command
@self.opthdr.default
def func(*args):
self.mock(*args)
self.opthdr.run(['test', 'arg0', 'arg1', 'arg2'])
self.mock.assert_called_once_with('arg0', 'arg1', 'arg2')
def test_optionhandler_default_duplicated(self):
with self.assertRaises(opttools.StructureError):
@self.opthdr.default
@self.opthdr.default
def func(*args):
pass # pragma no cover
def test_optionhandler_command_duplicated(self):
with self.assertRaises(opttools.StructureError):
@self.opthdr.command
@self.opthdr.command
def func(*args):
pass # pragma no cover
def test_optionhandler_default_duplicated_withoud_stack_frame(self):
with unittest.mock.patch('inspect.currentframe', lambda: None):
with self.assertRaises(opttools.StructureError):
@self.opthdr.default
@self.opthdr.default
def func(*args):
pass # pragma no cover
def test_optionhandler_command_duplicated_withoud_stack_frame(self):
with unittest.mock.patch('inspect.currentframe', lambda: None):
with self.assertRaises(opttools.StructureError):
@self.opthdr.command
@self.opthdr.command
def func(*args):
pass # pragma no cover
def test_optionhandler_with_invalid_defaut(self):
with self.assertRaises(opttools.StructureError):
self.opthdr.default(object())
def test_optionhandler_with_invalid_command(self):
with self.assertRaises(opttools.StructureError):
self.opthdr.command(object())
def test_optionhandler_except_default(self):
with self.assertRaises(SystemExit) as cm:
@self.opthdr.default
def func(*args):
raise TestException
self.opthdr.run(['test'])
self.assertEqual(cm.exception.code, 127) # opttools default
def test_optionhandler_except_custom(self):
with self.assertRaises(SystemExit) as cm:
@self.opthdr.default
def func(*args):
raise TestException32
self.opthdr.run(['test'])
self.assertEqual(cm.exception.code, 32)
def test_optionhandler_except_unexpected(self):
with self.assertRaises(TestExceptionUnexpected):
@self.opthdr.default
def func(*args):
raise TestExceptionUnexpected
self.opthdr.run(['test'])
def test_optionhandler_unexpected_option(self):
with unittest.mock.patch('sys.stderr', new=io.StringIO()) as stderr:
with self.assertRaises(SystemExit) as cm:
@self.opthdr.default
def func():
pass
self.opthdr.run(['test', 'unexpected'])
self.assertEqual(cm.exception.code, 127) # opttools default
self.assertTrue(stderr.tell() > 0)
def test_optionhandler_class_chain(self):
with unittest.mock.patch('sys.stdout', new=io.StringIO()) as stdout:
@self.opthdr.default(n='_n:int')
class Counter():
def __init__(self, *, n=0):
self._value = 0
@self.opthdr.command(n='_n:int')
def add(self, *, n=1):
self._value += n
return self
@self.opthdr.command
def value(self):
print(self._value)
return self
self.opthdr.run(['test', 'add', '-n20', 'add', 'value'])
self.assertEqual(stdout.getvalue(), '21\n')
def test_optionhandler_without_default(self):
with unittest.mock.patch('sys.stdout', new=io.StringIO()) as stdout:
@self.opthdr.command
def func():
print('Hello world!')
self.opthdr.run(['test', 'func'])
self.assertEqual(stdout.getvalue(), 'Hello world!\n')
def test_optionhandler_with_invalid_exception(self):
with self.assertRaises(opttools.StructureError):
@self.opthdr.error()
class NotAnException(BaseException):
pass
def test_optionhandler_docstring(self):
with unittest.mock.patch('sys.stdout', new=io.StringIO()) as stdout:
@self.opthdr.command
def sin(deg=0):
"""
:param float deg: Print sine of deg degrees.
"""
print('{:0.3f}'.format(math.sin(math.radians(deg))))
self.opthdr.run(['test', 'sin', '90'])
self.assertEqual(stdout.getvalue(), '1.000\n')
def test_optionhandler_docstring_alt(self):
with unittest.mock.patch('sys.stdout', new=io.StringIO()) as stdout:
@self.opthdr.command
def sin(deg=0):
"""
:param deg: Print sine of deg degrees.
:type deg: int or float.
"""
print('{:0.3f}'.format(math.sin(math.radians(deg))))
self.opthdr.run(['test', 'sin', '90'])
self.assertEqual(stdout.getvalue(), '1.000\n')
def test_optionhandler_docstring_with_hint(self):
with unittest.mock.patch('sys.stdout', new=io.StringIO()) as stdout:
@self.opthdr.command(deg='_:int,float')
def sin(deg=0):
"""
:param float deg: Print sine of deg degrees.
"""
print('{:0.3f}'.format(math.sin(math.radians(deg))))
self.opthdr.run(['test', 'sin', '90'])
self.assertEqual(stdout.getvalue(), '1.000\n')
def test_optionhandler_docstring_duplicated(self):
with unittest.mock.patch('sys.stdout', new=io.StringIO()) as stdout:
with self.assertRaises(opttools.StructureError):
@self.opthdr.command
def sin(deg=0):
"""
:param float deg: Print sine of deg degrees.
:param float deg: Print sine of deg degrees.
"""
pass # pragma no cover
self.opthdr.run(['test', 'sin', '90'])
def test_optionhandler_docstring_alt_duplicated(self):
with unittest.mock.patch('sys.stdout', new=io.StringIO()) as stdout:
with self.assertRaises(opttools.StructureError):
@self.opthdr.command
def sin(deg=0):
"""
:param float deg: Print sine of deg degrees.
:param deg: Print sine of deg degrees.
"""
pass # pragma no cover
self.opthdr.run(['test', 'sin', '90'])
def test_optionhandler_docstring_alt_type_duplicated(self):
with unittest.mock.patch('sys.stdout', new=io.StringIO()) as stdout:
with self.assertRaises(opttools.StructureError):
@self.opthdr.command
def sin(deg=0):
"""
:param float deg: Print sine of deg degrees.
:type deg: int or float.
"""
pass # pragma no cover
self.opthdr.run(['test', 'sin', '90'])
def test_optionhandler_docstring_with_hint_duplicated(self):
with unittest.mock.patch('sys.stdout', new=io.StringIO()) as stdout:
with self.assertRaises(opttools.StructureError):
@self.opthdr.command(deg='_:int,float')
def sin(deg=0):
"""
:param float deg: Print sine of deg degrees.
:param deg: Print sine of deg degrees.
"""
pass # pragma no cover
self.opthdr.run(['test', 'sin', '90'])
def test_optionhandler_docstring_with_unknown_type(self):
with unittest.mock.patch('sys.stdout', new=io.StringIO()) as stdout:
with self.assertRaises(opttools.OptionError):
@self.opthdr.default
def func(*,a=None, b=None, c=None):
"""
:type a: flag.
:type b: none.
:type c: flag or none or foobar.
"""
pass # pragma no cover
self.opthdr.run(['test', '--a', '--b', '--c=foobar'], debug=True)
class TestOptionHandlerDebug(TestOptionHandler):
def setUp(self):
super().setUp()
opttools.DEBUG = True
self.stderr = io.StringIO()
self._stderr = sys.stderr
sys.stderr = self.stderr
def tearDown(self):
super().tearDown()
opttools.DEBUG = False
sys.stderr = self._stderr
if __name__ == '__main__': # pragma: no cover
unittest.main()
| StarcoderdataPython |
291607 | '''
四数之和
给定一个包含 n 个整数的数组 nums 和一个目标值 target,判断 nums 中是否存在四个元素 a,b,c 和 d ,使得 a + b + c + d 的值与 target 相等?找出所有满足条件且不重复的四元组。
注意:
答案中不可以包含重复的四元组。
'''
from typing import List
'''
解题思路:双指针法
设置4个指针,逐步向内搜索可能的解
时间复杂度:O(n^3)
空间复杂度:O(1)
'''
class Solution:
def fourSum(self, nums: List[int], target: int) -> List[List[int]]:
ans = []
n = len(nums)
nums.sort()
for left in range(n - 3):
if left > 0 and nums[left] == nums[left - 1]:
continue
for right in range(n - 1, left + 2, -1):
if right < n - 1 and nums[right] == nums[right + 1]:
continue
t = target - (nums[left] + nums[right])
midRight = right - 1
for midLeft in range(left + 1, midRight):
if midLeft > left + 1 and nums[midLeft] == nums[midLeft - 1]:
continue
while midLeft < midRight and nums[midLeft] + nums[midRight] > t:
midRight -= 1
if midLeft == midRight:
break
if nums[midLeft] + nums[midRight] == t:
ans.append([nums[left], nums[midLeft], nums[midRight], nums[right]])
return ans
s = Solution()
print(s.fourSum([0, 0, 0, 0], 1))
print(s.fourSum([1, 0, -1, 0, -2, 2], 0))
| StarcoderdataPython |
3540998 | <filename>scripts/create_c_api_projections.py
#!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: PROJ
# Purpose: Parse XML output of Doxygen on coordinateoperation.hpp to creat
# C API for projections.
# Author: <NAME> <even.rouault at spatialys.com>
#
###############################################################################
# Copyright (c) 2018, <NAME> <even.rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
from lxml import etree
import os
script_dir_name = os.path.dirname(os.path.realpath(__file__))
# Make sure to run doxygen
if not 'SKIP_DOXYGEN' in os.environ:
os.system("bash " + os.path.join(script_dir_name, "doxygen.sh"))
xmlfilename = os.path.join(os.path.dirname(script_dir_name),
'docs/build/xml/classosgeo_1_1proj_1_1operation_1_1Conversion.xml')
tree = etree.parse(open(xmlfilename, 'rt'))
root = tree.getroot()
compounddef = root.find('compounddef')
header = open('projections.h', 'wt')
cppfile = open('projections.cpp', 'wt')
test_cppfile = open('test_projections.cpp', 'wt')
header.write("/* BEGIN: Generated by scripts/create_c_api_projections.py*/\n")
cppfile.write("/* BEGIN: Generated by scripts/create_c_api_projections.py*/\n")
cppfile.write("\n");
test_cppfile.write("/* BEGIN: Generated by scripts/create_c_api_projections.py*/\n")
def snake_casify(s):
out = ''
lastWasLowerAlpha = False
for c in s:
if c.isupper():
if lastWasLowerAlpha:
out += '_'
out += c.lower()
lastWasLowerAlpha = False
else:
out += c
lastWasLowerAlpha = c.isalpha()
return out
for sectiondef in compounddef.iter('sectiondef'):
if sectiondef.attrib['kind'] == 'public-static-func':
for func in sectiondef.iter('memberdef'):
name = func.find('name').text
assert name.startswith('create')
if name in ('create', 'createChangeVerticalUnit',
'createAxisOrderReversal', 'createGeographicGeocentric'):
continue
params = []
has_angle = False
has_linear = False
for param in func.iter('param'):
type = param.find('type').xpath("normalize-space()")
if type.find('Angle') >= 0:
has_angle = True
if type.find('Length') >= 0:
has_linear = True
paramname = param.find('declname').text
if paramname == 'properties':
continue
params.append((type, snake_casify(paramname)))
shortName = name[len('create'):]
c_shortName = snake_casify(shortName)
decl = "proj_create_conversion_"
decl += c_shortName
decl += "(\n"
decl += " PJ_CONTEXT *ctx,\n"
has_output_params = False
for param in params:
if has_output_params:
decl += ",\n"
if param[0] in ('int', 'bool'):
decl += " int " + param[1]
else:
decl += " double " + param[1]
has_output_params = True
if has_angle:
if has_output_params:
decl += ",\n"
decl += " const char* ang_unit_name, double ang_unit_conv_factor"
has_output_params = True
if has_linear:
if has_output_params:
decl += ",\n"
decl += " const char* linear_unit_name, double linear_unit_conv_factor"
decl += ")"
header.write("PJ PROJ_DLL *" + decl + ";\n\n")
briefdescription = func.find('briefdescription/para').xpath("normalize-space()")
briefdescription = briefdescription.replace("Instanciate ", "Instanciate a ProjectedCRS with ")
cppfile.write("// ---------------------------------------------------------------------------\n\n")
cppfile.write("/** \\brief " + briefdescription + "\n")
cppfile.write(" *\n")
cppfile.write(" * See osgeo::proj::operation::Conversion::create" + shortName + "().\n")
cppfile.write(" *\n")
cppfile.write(" * Linear parameters are expressed in (linear_unit_name, linear_unit_conv_factor).\n")
if has_angle:
cppfile.write(" * Angular parameters are expressed in (ang_unit_name, ang_unit_conv_factor).\n")
cppfile.write(" */\n")
cppfile.write("PJ* " + decl + "{\n");
cppfile.write(" SANITIZE_CTX(ctx);\n");
cppfile.write(" try {\n");
if has_linear:
cppfile.write(" UnitOfMeasure linearUnit(createLinearUnit(linear_unit_name, linear_unit_conv_factor));\n")
if has_angle:
cppfile.write(" UnitOfMeasure angUnit(createAngularUnit(ang_unit_name, ang_unit_conv_factor));\n")
cppfile.write(" auto conv = Conversion::create" + shortName + "(PropertyMap()")
for param in params:
if param[0] in 'int':
cppfile.write(", " + param[1])
elif param[0] in 'bool':
cppfile.write(", " + param[1] + " != 0")
elif param[0].find('Angle') >= 0:
cppfile.write(", Angle(" + param[1] + ", angUnit)")
elif param[0].find('Length') >= 0:
cppfile.write(", Length(" + param[1] + ", linearUnit)")
elif param[0].find('Scale') >= 0:
cppfile.write(", Scale(" + param[1] + ")")
cppfile.write(");\n")
cppfile.write(" return proj_create_conversion(ctx, conv);\n")
cppfile.write(" } catch (const std::exception &e) {\n");
cppfile.write(" proj_log_error(ctx, __FUNCTION__, e.what());\n")
cppfile.write(" }\n")
cppfile.write(" return nullptr;\n")
cppfile.write("}\n")
test_cppfile.write("{\n")
test_cppfile.write(" auto projCRS = proj_create_conversion_" + c_shortName + "(\n")
test_cppfile.write(" m_ctxt")
for param in params:
test_cppfile.write(", 0")
if has_angle:
test_cppfile.write(", \"Degree\", 0.0174532925199433")
if has_linear:
test_cppfile.write(", \"Metre\", 1.0")
test_cppfile.write(");\n")
test_cppfile.write(" ObjectKeeper keeper_projCRS(projCRS);\n")
test_cppfile.write(" ASSERT_NE(projCRS, nullptr);\n")
test_cppfile.write("}\n")
header.write("/* END: Generated by scripts/create_c_api_projections.py*/\n")
cppfile.write("/* END: Generated by scripts/create_c_api_projections.py*/\n")
test_cppfile.write("/* END: Generated by scripts/create_c_api_projections.py*/\n")
print('projections.h and .cpp, and test_projections.cpp have been generated. Manually merge them now') | StarcoderdataPython |
5030912 | <filename>src/lava/lib/dl/slayer/io.py
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
"""Spike/event Input Output and visualization module."""
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
class Event():
"""This class provides a way to store, read, write and visualize spike
event.
Members:
* x (numpy int array): x index of spike event.
* y (numpy int array): y index of spike event
(not used if the spatial dimension is 1).
* c (numpy int array): channel index of spike event.
* t (numpy double array): timestamp of spike event.
Time is assumed to be in ms.
* p (numpy int or double array): payload of spike event.
None for binary spike.
* graded (bool): flag to indicate graded or binary spike.
Parameters
----------
x_event : int array
x location of event
y_event : int array or None
y location of event
c_event : int array
c location of event
t_event : int array or float array
time of event
payload : int array or float array or None
payload of event. None for binary event. Defaults to None.
Examples
--------
>>> td_event = Event(x_event, y_event, c_event, t_event)
"""
def __init__(self, x_event, y_event, c_event, t_event, payload=None):
if y_event is None:
self.dim = 1
else:
self.dim = 2
self.graded = payload is not None
if type(x_event) is np.ndarray: # x spatial dimension
self.x = x_event
else:
self.x = np.asarray(x_event)
if type(y_event) is np.ndarray: # y spatial dimension
self.y = y_event
else:
self.y = np.asarray(y_event)
if type(c_event) is np.ndarray: # channel dimension
self.c = c_event
else:
self.c = np.asarray(c_event)
if type(t_event) is np.ndarray: # time stamp in ms
self.t = t_event
else:
self.t = np.asarray(t_event)
if self.graded:
if type(payload) is np.ndarray:
self.p = payload
else:
self.p = np.asarray(payload)
else:
self.p = None
if not issubclass(self.x.dtype.type, np.integer):
self.x = self.x.astype('int')
if not issubclass(self.c.dtype.type, np.integer):
self.c = self.c.astype('int')
if self.dim == 2:
if not issubclass(self.y.dtype.type, np.integer):
self.y = self.y.astype('int')
self.c -= self.c.min()
def to_tensor(self, sampling_time=1, dim=None): # Sampling time in ms
"""Returns a numpy tensor that contains the spike events sampled in
bins of ``sampling_time``. The array is of dimension
(channels, height, time) or``CHT`` for 1D data. The array is of
dimension (channels, height, width, time) or``CHWT`` for 2D data.
Parameters
----------
sampling_time : int
event data sampling time. Defaults to 1.
dim : int or None
desired dimension. It is inferred if None. Defaults to None.
Returns
-------
np array
spike tensor.
Examples
--------
>>> spike = td_event.to_tensor()
"""
if self.dim == 1:
if dim is None:
dim = (
np.round(max(self.c) + 1).astype(int),
np.round(max(self.x) + 1).astype(int),
np.round(max(self.t) / sampling_time + 1).astype(int),
)
frame = np.zeros((dim[0], 1, dim[1], dim[2]))
elif self.dim == 2:
if dim is None:
dim = (
np.round(max(self.c) + 1).astype(int),
np.round(max(self.y) + 1).astype(int),
np.round(max(self.x) + 1).astype(int),
np.round(max(self.t) / sampling_time + 1).astype(int),
)
frame = np.zeros((dim[0], dim[1], dim[2], dim[3]))
return self.fill_tensor(frame, sampling_time).reshape(dim)
def fill_tensor(
self, empty_tensor,
sampling_time=1, random_shift=False, binning_mode='OR'
): # Sampling time in ms
"""Returns a numpy tensor that contains the spike events sampled in
bins of ``sampling_time``. The tensor is of dimension
(channels, height, width, time) or``CHWT``.
Parameters
----------
empty_tensor : numpy or torch tensor
an empty tensor to hold spike data .
sampling_time : float
the width of time bin. Defaults to 1.
random_shift : bool
flag to randomly shift the sample in time. Defaults to False.
binning_mode : str
the way spikes are binned. Options are 'OR'|'SUM'. If the event is
graded binning mode is overwritten to 'SUM'. Defaults to 'OR'.
Returns
-------
numpy or torch tensor
spike tensor.
Examples
--------
>>> spike = td_event.fill_tensor( torch.zeros((2, 240, 180, 5000)) )
"""
if random_shift is True:
t_start = np.random.randint(
max(
int(self.t.min() / sampling_time),
int(self.t.max() / sampling_time) - empty_tensor.shape[3],
empty_tensor.shape[3] - int(self.t.max() / sampling_time),
1,
)
)
else:
t_start = 0
x_event = np.round(self.x).astype(int)
c_event = np.round(self.c).astype(int)
t_event = np.round(self.t / sampling_time).astype(int) - t_start
if self.graded:
payload = self.p
binning_mode = 'SUM'
# print('shifted sequence by', t_start)
if self.dim == 1:
valid_ind = np.argwhere(
(x_event < empty_tensor.shape[2])
& (c_event < empty_tensor.shape[0])
& (t_event < empty_tensor.shape[3])
& (x_event >= 0)
& (c_event >= 0)
& (t_event >= 0)
)
if binning_mode.upper() == 'OR':
empty_tensor[
c_event[valid_ind],
0,
x_event[valid_ind],
t_event[valid_ind]
] = payload if self.graded is True else 1 / sampling_time
elif binning_mode.upper() == 'SUM':
empty_tensor[
c_event[valid_ind],
0,
x_event[valid_ind],
t_event[valid_ind]
] += payload if self.graded is True else 1 / sampling_time
else:
raise Exception(
f'Unsupported binning_mode. It was {binning_mode}'
)
elif self.dim == 2:
y_event = np.round(self.y).astype(int)
valid_ind = np.argwhere(
(x_event < empty_tensor.shape[2])
& (y_event < empty_tensor.shape[1])
& (c_event < empty_tensor.shape[0])
& (t_event < empty_tensor.shape[3])
& (x_event >= 0)
& (y_event >= 0)
& (c_event >= 0)
& (t_event >= 0)
)
if binning_mode.upper() == 'OR':
empty_tensor[
c_event[valid_ind],
y_event[valid_ind],
x_event[valid_ind],
t_event[valid_ind]
] = payload if self.graded is True else 1 / sampling_time
elif binning_mode.upper() == 'SUM':
empty_tensor[
c_event[valid_ind],
y_event[valid_ind],
x_event[valid_ind],
t_event[valid_ind]
] += payload if self.graded is True else 1 / sampling_time
else:
raise Exception(
'Unsupported binning_mode. It was {binning_mode}'
)
return empty_tensor
def _show_event_1d(
self, fig=None, frame_rate=24,
pre_compute_frames=True, repeat=False, plot=True
):
"""
"""
_ = pre_compute_frames # just for consistency
if self.dim != 1:
raise Exception(
'Expected self dimension to be 1. It was: {}'.format(self.dim)
)
if fig is None:
fig = plt.figure()
interval = 1e3 / frame_rate # in ms
x_dim = self.x.max() + 1
t_max = self.t.max()
t_min = self.t.min()
# c_max = self.c.max() + 1
min_frame = int(np.floor(t_min / interval))
max_frame = int(np.ceil(t_max / interval)) + 1
# ignore pre_compute_frames
raster, = plt.plot([], [], '.')
scan_line, = plt.plot([], [])
plt.axis((t_min - 0.1 * t_max, 1.1 * t_max, -0.1 * x_dim, 1.1 * x_dim))
def animate(i):
"""
"""
t_end = (i + min_frame + 1) * interval
ind = (self.t < t_end)
# update raster
raster.set_data(self.t[ind], self.x[ind])
# update raster scan line
scan_line.set_data(
[t_end + interval, t_end + interval], [0, x_dim]
)
anim = animation.FuncAnimation(
fig, animate, frames=max_frame,
interval=interval, repeat=repeat
)
if plot is True:
plt.show()
return anim
def _show_event_1d_graded(
self, fig=None, frame_rate=24,
pre_compute_frames=True, repeat=False, plot=True
):
"""
"""
_ = pre_compute_frames # just for consistency
if self.dim != 1:
raise Exception(
'Expected self dimension to be 1. It was: {}'.format(self.dim)
)
if self.graded is not True:
raise Exception(
'Expected graded events. It was: {}'.format(self.graded)
)
if fig is None:
fig = plt.figure()
interval = 1e3 / frame_rate # in ms
x_dim = self.x.max() + 1
t_max = self.t.max()
t_min = self.t.min()
# c_max = self.c.max() + 1
p_min = self.p.min()
p_max = self.p.max()
min_frame = int(np.floor(t_min / interval))
max_frame = int(np.ceil(t_max / interval)) + 1
# ignore pre_compute_frames
cmap = plt.get_cmap("tab10")
scatter = plt.scatter([], [], [])
scan_line, = plt.plot([], [], color=cmap(1))
plt.axis((t_min - 0.1 * t_max, 1.1 * t_max, -0.1 * x_dim, 1.1 * x_dim))
def animate(i):
"""
"""
t_end = (i + min_frame + 1) * interval
ind = (self.t < t_end)
# update raster
alpha = (self.p[ind] - p_min) / (p_max - p_min)
# scatter.set_data(self.t[ind], self.x[ind], alpha*50, alpha=alpha)
scatter.set_offsets(np.vstack([self.t[ind], self.x[ind]]).T)
scatter.set_sizes(alpha * 50)
scatter.set_alpha(alpha)
# update raster scan line
scan_line.set_data(
[t_end + interval, t_end + interval], [0, x_dim]
)
anim = animation.FuncAnimation(
fig, animate, frames=max_frame,
interval=interval, repeat=repeat
)
if plot is True:
plt.show()
return anim
def _show_event_2d(
self, fig=None, frame_rate=24,
pre_compute_frames=True, repeat=False, plot=True
):
"""
"""
if self.dim != 2:
raise Exception(
'Expected self dimension to be 2. It was: {}'.format(self.dim)
)
if fig is None:
fig = plt.figure()
interval = 1e3 / frame_rate # in ms
x_dim = self.x.max() + 1
y_dim = self.y.max() + 1
if pre_compute_frames is True:
min_frame = int(np.floor(self.t.min() / interval))
max_frame = int(np.ceil(self.t.max() / interval))
image = plt.imshow(np.zeros((y_dim, x_dim, 3)))
frames = np.zeros((max_frame - min_frame, y_dim, x_dim, 3))
# precompute frames
for i in range(len(frames)):
t_start = (i + min_frame) * interval
t_end = (i + min_frame + 1) * interval
time_mask = (self.t >= t_start) & (self.t < t_end)
r_ind = (time_mask & (self.c == 1))
g_ind = (time_mask & (self.c == 2))
b_ind = (time_mask & (self.c == 0))
frames[i, self.y[r_ind], self.x[r_ind], 0] = 1
frames[i, self.y[g_ind], self.x[g_ind], 1] = 1
frames[i, self.y[b_ind], self.x[b_ind], 2] = 1
def animate(frame):
"""
"""
image.set_data(frame)
return image
anim = animation.FuncAnimation(
fig, animate,
frames=frames, interval=interval, repeat=repeat
)
else:
min_frame = int(np.floor(self.t.min() / interval))
max_frame = int(np.ceil(self.t.max() / interval))
image = plt.imshow(np.zeros((y_dim, x_dim, 3)))
def animate(i):
"""
"""
t_start = (i + min_frame) * interval
t_end = (i + min_frame + 1) * interval
frame = np.zeros((y_dim, x_dim, 3))
time_mask = (self.t >= t_start) & (self.t < t_end)
r_ind = (time_mask & (self.c == 1))
g_ind = (time_mask & (self.c == 2))
b_ind = (time_mask & (self.c == 0))
frame[self.y[r_ind], self.x[r_ind], 0] = 1
frame[self.y[g_ind], self.x[g_ind], 1] = 1
frame[self.y[b_ind], self.x[b_ind], 2] = 1
image.set_data(frame)
return image
anim = animation.FuncAnimation(
fig, animate,
frames=max_frame - min_frame,
interval=interval, repeat=repeat
)
# save the animation as an mp4. This requires ffmpeg or mencoder to be
# installed. The extra_args ensure that the x264 codec is used, so that
# the video can be embedded in html5. You may need to adjust this for
# your system: for more information, see
# http://matplotlib.sourceforge.net/api/animation_api.html
# if saveAnimation: anim.save('show_self_animation.mp4', fps=30)
if plot is True:
plt.show()
return anim
def show(
self, fig=None, frame_rate=24,
pre_compute_frames=True, repeat=False
):
"""Visualizes spike event.
Parameters
----------
fig : int
plot figure ID. Defaults to None.
frame_rate : int
frame rate of visualization. Defaults to 24.
pre_compute_frames : bool
flag to enable precomputation of frames for faster visualization.
Defaults to True.
repeat : bool
flag to enable repeat of animation. Defaults to False.
Examples
--------
>>> self.show()
"""
if fig is None:
fig = plt.figure()
if self.dim == 1:
if self.graded is True:
self._show_event_1d_graded(
fig, frame_rate=frame_rate,
pre_compute_frames=pre_compute_frames,
repeat=repeat
)
else:
self._show_event_1d(
fig, frame_rate=frame_rate,
pre_compute_frames=pre_compute_frames,
repeat=repeat
)
else:
self._show_event_2d(
fig, frame_rate=frame_rate,
pre_compute_frames=pre_compute_frames,
repeat=repeat
)
def anim(
self, fig=None, frame_rate=24,
pre_compute_frames=True, repeat=True
):
"""Get animation object for spike event.
Parameters
----------
fig : int
plot figure ID. Defaults to None.
frame_rate : int
frame rate of visualization. Defaults to 24.
pre_compute_frames : bool
flag to enable precomputation of frames for faster visualization.
Defaults to True.
repeat : bool
flag to enable repeat of animation. Defaults to False.
Returns
-------
anim
matplotlib anim object.
Examples
--------
>>> anim = self.anim()
"""
if fig is None:
fig = plt.figure()
if self.dim == 1:
if self.graded is True:
anim = self._show_event_1d_graded(
fig,
frame_rate=frame_rate,
pre_compute_frames=pre_compute_frames,
repeat=repeat,
plot=False
)
else:
anim = self._show_event_1d(
fig,
frame_rate=frame_rate,
pre_compute_frames=pre_compute_frames,
repeat=repeat,
plot=False
)
else:
anim = self._show_event_2d(
fig,
frame_rate=frame_rate,
pre_compute_frames=pre_compute_frames,
repeat=repeat,
plot=False
)
plt.close(anim._fig)
return anim
def tensor_to_event(spike_tensor, sampling_time=1):
"""Returns td_event event from a numpy array (of dimension 3 or 4).
The numpy array must be of dimension (channels, height, time) or ``CHT``
for 1D data.
The numpy array must be of dimension (channels, height, width, time) or
``CHWT`` for 2D data.
Parameters
----------
spike_tensor : numpy or torch tensor
spike tensor.
sampling_time : float
the width of time bin. Defaults to 1.
Returns
-------
Event
spike event
Examples
--------
>>> td_event = tensor_to_Event(spike)
"""
if spike_tensor.ndim == 3:
spike_event = np.argwhere(spike_tensor != 0)
x_event = spike_event[:, 1]
y_event = None
c_event = spike_event[:, 0]
t_event = spike_event[:, 2]
payload = spike_tensor[
spike_event[:, 0],
spike_event[:, 1],
spike_event[:, 2]
] * sampling_time
elif spike_tensor.ndim == 4:
spike_event = np.argwhere(spike_tensor != 0)
x_event = spike_event[:, 2]
y_event = spike_event[:, 1]
c_event = spike_event[:, 0]
t_event = spike_event[:, 3]
payload = spike_tensor[
spike_event[:, 0],
spike_event[:, 1],
spike_event[:, 2],
spike_event[:, 3]
] * sampling_time
else:
raise Exception(
f'Expected numpy array of 3 or 4 dimension. '
f'It was {spike_tensor.ndim}'
)
if np.abs(payload - np.ones_like(payload)).sum() < 1e-6: # binary spikes
return Event(x_event, y_event, c_event, t_event * sampling_time)
else:
return Event(
x_event, y_event, c_event, t_event * sampling_time, payload
)
def read_1d_spikes(filename):
"""Reads one dimensional binary spike file and returns a td_event event.
The binary file is encoded as follows:
* Each spike event is represented by a 40 bit number.
* First 16 bits (bits 39-24) represent the neuronID.
* Bit 23 represents the sign of spike event: 0=>OFF event, 1=>ON event.
* the last 23 bits (bits 22-0) represent the spike event timestamp in
microseconds.
Parameters
----------
filename : str
name of spike file.
Returns
-------
Event
spike event.
Examples
--------
>>> td_event = read_1d_spikes(file_path)
"""
with open(filename, 'rb') as input_file:
input_byte_array = input_file.read()
input_as_int = np.asarray([x for x in input_byte_array])
x_event = (input_as_int[0::5] << 8) | input_as_int[1::5]
c_event = input_as_int[2::5] >> 7
t_event = (
(input_as_int[2::5] << 16)
| (input_as_int[3::5] << 8)
| (input_as_int[4::5])
) & 0x7FFFFF
# convert spike times to ms
return Event(x_event, None, c_event, t_event / 1000)
def encode_1d_spikes(filename, td_event):
"""Writes one dimensional binary spike file from a td_event event.
The binary file is encoded as follows:
* Each spike event is represented by a 40 bit number.
* First 16 bits (bits 39-24) represent the neuronID.
* Bit 23 represents the sign of spike event: 0=>OFF event, 1=>ON event.
* the last 23 bits (bits 22-0) represent the spike event timestamp in
microseconds.
Parameters
----------
filename : str
name of spike file.
td_event : event
spike event object
Examples
--------
>>> encode_1d_spikes(file_path, td_event)
"""
if td_event.dim != 1:
raise Exception(
'Expected td_event dimension to be 1. '
'It was: {}'.format(td_event.dim)
)
x_event = np.round(td_event.x).astype(int)
c_event = np.round(td_event.c).astype(int)
# encode spike time in us
t_event = np.round(td_event.t * 1000).astype(int)
output_byte_array = bytearray(len(t_event) * 5)
output_byte_array[0::5] = np.uint8((x_event >> 8) & 0xFF00).tobytes()
output_byte_array[1::5] = np.uint8((x_event & 0xFF)).tobytes()
output_byte_array[2::5] = np.uint8(
((t_event >> 16) & 0x7F)
| (c_event.astype(int) << 7)
).tobytes()
output_byte_array[3::5] = np.uint8((t_event >> 8) & 0xFF).tobytes()
output_byte_array[4::5] = np.uint8(t_event & 0xFF).tobytes()
with open(filename, 'wb') as output_file:
output_file.write(output_byte_array)
def read_2d_spikes(filename):
"""Reads two dimensional binary spike file and returns a td_event event.
It is the same format used in neuromorphic datasets NMNIST & NCALTECH101.
The binary file is encoded as follows:
* Each spike event is represented by a 40 bit number.
* First 8 bits (bits 39-32) represent the xID of the neuron.
* Next 8 bits (bits 31-24) represent the yID of the neuron.
* Bit 23 represents the sign of spike event: 0=>OFF event, 1=>ON event.
* The last 23 bits (bits 22-0) represent the spike event timestamp in
microseconds.
Parameters
----------
filename : str
name of spike file.
Returns
-------
Event
spike event.
Examples
--------
>>> td_event = read_2d_spikes(file_path)
"""
with open(filename, 'rb') as input_file:
input_byte_array = input_file.read()
input_as_int = np.asarray([x for x in input_byte_array])
x_event = input_as_int[0::5]
y_event = input_as_int[1::5]
c_event = input_as_int[2::5] >> 7
t_event = (
(input_as_int[2::5] << 16)
| (input_as_int[3::5] << 8)
| (input_as_int[4::5])
) & 0x7FFFFF
# convert spike times to ms
return Event(x_event, y_event, c_event, t_event / 1000)
def encode_2d_spikes(filename, td_event):
"""Writes two dimensional binary spike file from a td_event event.
It is the same format used in neuromorphic datasets NMNIST & NCALTECH101.
The binary file is encoded as follows:
* Each spike event is represented by a 40 bit number.
* First 8 bits (bits 39-32) represent the xID of the neuron.
* Next 8 bits (bits 31-24) represent the yID of the neuron.
* Bit 23 represents the sign of spike event: 0=>OFF event, 1=>ON event.
* The last 23 bits (bits 22-0) represent the spike event timestamp in
microseconds.
Parameters
----------
filename : str
name of spike file.
td_event : event
spike event object
Examples
--------
>>> encode_2d_spikes(file_path, td_event)
"""
if td_event.dim != 2:
raise Exception(
'Expected td_event dimension to be 2. '
'It was: {}'.format(td_event.dim)
)
x_event = np.round(td_event.x).astype(int)
y_event = np.round(td_event.y).astype(int)
c_event = np.round(td_event.c).astype(int)
# encode spike time in us
t_event = np.round(td_event.t * 1000).astype(int)
output_byte_array = bytearray(len(t_event) * 5)
output_byte_array[0::5] = np.uint8(x_event).tobytes()
output_byte_array[1::5] = np.uint8(y_event).tobytes()
output_byte_array[2::5] = np.uint8(
((t_event >> 16) & 0x7F)
| (c_event.astype(int) << 7)
).tobytes()
output_byte_array[3::5] = np.uint8((t_event >> 8) & 0xFF).tobytes()
output_byte_array[4::5] = np.uint8(t_event & 0xFF).tobytes()
with open(filename, 'wb') as output_file:
output_file.write(output_byte_array)
def read_np_spikes(filename, fmt='xypt', time_unit=1e-3):
"""Reads numpy spike event and returns a td_event event.
The numpy array is assumed to be of nEvent x event dimension.
Parameters
----------
filename : str
name of spike file.
fmt : str
format of numpy event ordering. Options are 'xypt'. Defaults to 'xypt'.
time_unit : float
scale factor that converts the data to seconds. Defaults to 1e-3.
Returns
-------
Event
spike object.
Examples
--------
>>> td_event = read_np_spikes(file_path)
>>> td_event = read_np_spikes(file_path, fmt='xypt')
>>> td_event = read_np_spikes(file_path, time_unit=1e-6)
"""
np_event = np.load(filename)
if fmt == 'xypt':
if np_event.shape[1] == 3:
return Event(
np_event[:, 0].astype('int'),
None,
np_event[:, 1],
np_event[:, 2] * time_unit * 1e3
)
elif np_event.shape[1] == 4:
return Event(
np_event[:, 0],
np_event[:, 1],
np_event[:, 2],
np_event[:, 3] * time_unit * 1e3
)
else:
raise Exception(
'Numpy array format did not match. '
'Expected it to be nEvents x eventd_eventim.'
)
else:
raise Exception(f"{fmt=} not implemented.")
# TODO: modify for graded spikes
def encode_np_spikes(filename, td_event, fmt='xypt', time_unit=1e-3):
"""Writes td_event event into numpy file.
Parameters
----------
filename : str
name of spike file.
td_event : event
spike event.
fmt : str
format of numpy event ordering. Options are 'xypt'. Defaults to 'xypt'.
time_unit : float
scale factor that converts the data to seconds. Defaults to 1e-3.
Examples
--------
>>> encode_np_spikes(file_path, td_event)
>>> encode_np_spikes(file_path, td_event, fmt='xypt')
"""
if fmt == 'xypt':
if td_event.dim == 1:
np_event = np.zeros((len(td_event.x), 3))
np_event[:, 0] = td_event.x
np_event[:, 1] = td_event.c
np_event[:, 2] = td_event.t
elif td_event.dim == 2:
np_event = np.zeros((len(td_event.x), 4))
np_event[:, 0] = td_event.x
np_event[:, 1] = td_event.y
np_event[:, 2] = td_event.c
np_event[:, 3] = td_event.t
else:
raise Exception(
'Numpy array format did not match. '
'Expected it to be nEvents x eventd_eventim.'
)
else:
raise Exception(f"{fmt=} not implemented.")
np.save(filename, np_event)
# TODO: modify for graded spikes
| StarcoderdataPython |
3430179 | <filename>evilmc/__init__.py
from .core import *
from .example import * | StarcoderdataPython |
6673060 | import udees.datasets.mitdb as mitdb
from udees.datasets.mitdb import Record
from udees.datasets.mitdb import interleave_record
import unittest
from unittest.mock import Mock
from unittest.mock import patch
@patch("udees.datasets.mitdb.np")
@patch("udees.datasets.mitdb.pd")
@patch("udees.datasets.mitdb.Path")
@patch("udees.datasets.mitdb.wfdb")
@unittest.skip("redesigned records, tests need to be fixed")
class MITDB_Test(unittest.TestCase):
def rdann_mock(symbols, samples):
annotations = Mock()
annotations.symbols.return_value = symbols
annotations.samples.return_value = samples
return annotations
def test_download_arrhythmia_calls_wfdb(self, wfdb, *args):
mitdb.Arrhythmia.download()
wfdb.dl_database.assert_called_once_with('mitdb', dl_dir='arrhythmia')
def test_get_record_list_called_with_correct_parameter(self, wfdb, *args):
mitdb.Arrhythmia.get_record_data()
wfdb.get_record_list.assert_called_once_with(db_dir='mitdb')
def test_get_empty_record_list(self, wfdb, *args):
wfdb.get_record_list.return_value = []
wfdb.rdsamp.return_value = (None, None)
wfdb.rdann.return_value = None
data = mitdb.Arrhythmia.get_record_data()
self.assertSequenceEqual([], data, msg="data={}".format(data))
def test_get_one_record(self, wfdb, Path, pd, np):
wfdb.get_record_list.return_value = ['record_file_path']
wfdb.rdsamp.return_value = ("some data", 'field descriptors')
data = mitdb.Arrhythmia.get_record_data()[0]
expected = mitdb.Record(
data='some data',
file_name='record_file_path',
labels=np.stack()
)
self.assertEqual(expected, data,
msg="expected: {}, actual: {}"
.format(expected, data))
def test_raise_assert_exception(self, wfdb, Path, pd, np):
path = unittest.mock.Mock()
path.exists.return_value = False
Path.return_value = path
with self.assertRaises(AssertionError) as error:
mitdb.Arrhythmia.get_record_data()
self.assertEqual(
"you have to download the dataset arrhythmia first",
str(error.exception)
)
def test_rdann_called_correctly_once(self, wfdb, Path, pd, np):
wfdb.get_record_list.return_value = ['file_name']
wfdb.rdsamp.return_value = ('', '')
mitdb.Arrhythmia.get_record_data()
wfdb.rdann.assert_called_once_with(
record_name='{}/{}'.format('arrhythmia', 'file_name'),
extension='atr'
)
def test_rdann_called_correctly_twice(self, wfdb, Path, pd, np):
wfdb.get_record_list.return_value = ['first', 'second']
wfdb.rdsamp.return_value = ('', '')
mitdb.Arrhythmia.get_record_data()
for name in ['first', 'second']:
wfdb.rdann.assert_any_call(record_name='{}/{}'
.format('arrhythmia', name),
extension='atr')
def test_np_stack_called_correctly(self, wfdb, Path, pd, np):
wfdb.get_record_list.return_value = ['name']
wfdb.rdsamp.return_value = ('', '')
mitdb.Arrhythmia.get_record_data()
annotations = wfdb.rdann()
np.stack.assert_called_once_with([annotations.symbol,
annotations.sample])
def test_labels_read_correctly(self, wfdb, Path, pd, np):
wfdb.get_record_list.return_value = ['first']
wfdb.rdsamp.return_value = ('', '')
def np_stack(items):
return list(zip(items[0], items[1]))
np.stack.side_effect = np_stack
for label in ['label1', 'secondlabel']:
with self.subTest():
annotations = Mock()
annotations.symbol = [label]
annotations.sample = [5]
wfdb.rdann.return_value = annotations
expected = np_stack([annotations.symbol, annotations.sample])
data = mitdb.Arrhythmia.get_record_data()[0]
self.assertSequenceEqual(expected, data.labels)
def test_read_samples(self, wfdb, Path, pd, np):
wfdb.get_record_list.return_value = ['record_file_path']
wfdb.rdsamp.return_value = ('some data', 'field descriptors')
data = mitdb.Arrhythmia.get_record_data()[0]
expected = Record(
data='some data',
file_name='record_file_path',
labels=np.stack()
)
self.assertEqual(expected,
data,
msg="expected: {}, actual: {}"
.format(expected, data))
@unittest.skip("redesigned records, tests need to be fixed")
class Record_Test(unittest.TestCase):
def test_inequality_for_different_data(self):
r1 = Record()
r2 = Record(data=1)
self.assertFalse(r1 == r2)
def test_equality_of_non_empty_records(self):
self.assertEqual(Record(data=1, file_name="file", labels=["label"]),
Record(data=1, file_name="file", labels=["label"]))
def test_equality_of_empty_records(self):
self.assertEqual(Record(), Record())
def test_inequality_for_different_file_name(self):
r1 = Record(data=1, file_name="file", labels=["label"])
r2 = Record(data=1, file_name="other", labels=["label"])
self.assertFalse(r1 == r2)
def test_inequality_for_different_labels(self):
r1 = Record(data=1, file_name="file", labels=["labels"])
r2 = Record(data=1, file_name="file", labels=["labels", "more labels"])
self.assertFalse(r1 == r2)
class SimplifyNumpyExamplesFromMITDB(unittest.TestCase):
def create_dummy_sample(self, length):
sample = [[x, y] for x, y in zip(range(length), reversed(range(length)))]
return sample
def test_content_one_element(self):
sample = self.create_dummy_sample(1)
self.assertSequenceEqual([0, 0], interleave_record(sample))
def test_content_two_elements(self):
sample = self.create_dummy_sample(2)
self.assertSequenceEqual([0, 1, 1, 0],
interleave_record(sample))
def test_content_three_elements(self):
sample = self.create_dummy_sample(3)
self.assertSequenceEqual(
[0, 2, 1, 1, 2, 0],
interleave_record(sample)
)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1978729 | <gh_stars>0
import paho.mqtt.client as mqtt
import time
from gpiozero import CamJamKitRobot
MQTT_SERVER = "localhost"
MQTT_PATH = "CommandChannel"
robot = CamJamKitRobot()
# The test callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe(MQTT_PATH)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print(msg.topic+" "+str(msg.payload))
do_commands (str(msg.payload))
# more callbacks, etc
# This routine decodes and runs the robot
def do_commands (payload):
print ("in do commands")
for command in payload:
if command == "F":
print ("command = " + command)
robot.forward()
time.sleep(1)
robot.stop()
if command == "f":
robot.forward()
time.sleep(2)
robot.stop()
if command == "B":
robot.backward()
time.sleep(1)
robot.stop()
if command == "L":
robot.left()
time.sleep(0.3)
robot.stop()
if command == "R":
robot.right()
time.sleep(0.3)
robot.stop()
if command == "X":
process = False
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect(MQTT_SERVER, 1883, 60)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
client.loop_forever()
| StarcoderdataPython |
6651471 | <reponame>Gawesomer/specs2016
# Some inputs that we will use multiple times
ls_out = \
"""
-rw-r--r-- 1 synp staff 1462 Mar 9 10:45 Makefile
-rw-r--r-- 1 synp staff 4530 Mar 4 19:36 Makefile.cached_depends
-rw-r--r-- 1 synp staff 4576 Mar 4 19:36 Makefile.cached_depends_vs
-rw-r--r-- 1 synp staff 4997 Feb 27 21:12 clang_on_centos.txt
-rw-r--r-- 1 synp staff 11931 Feb 27 21:14 clang_on_mac.txt
-rw-r--r-- 1 synp staff 4997 Feb 27 21:25 clc.txt
drwxr-xr-x 7 synp staff 224 Mar 9 10:45 cli
-rw-r--r-- 1 synp staff 172076 Feb 27 22:18 clm.txt
-rwxr-xr-x 1 synp staff 170 Mar 4 19:36 mkcache.sh
-rw-r--r-- 1 synp staff 771 Mar 9 10:49 patch.txt
drwxr-xr-x 20 synp staff 640 Mar 9 10:54 processing
-rw-r--r-- 1 synp staff 7341 Mar 4 19:36 setup.py
drwxr-xr-x 10 synp staff 320 Mar 9 10:45 specitems
drwxr-xr-x 16 synp staff 512 Mar 9 10:45 test
drwxr-xr-x 19 synp staff 608 Mar 9 10:45 utils
-rw-r--r-- 1 synp staff 11931 Feb 27 21:39 xxx
"""
ls_out_inodes = \
"""
8630081633 Makefile
8630080989 Makefile.cached_depends
8630080990 Makefile.cached_depends_vs
8631159018 clang_on_centos.txt
8631159021 clang_on_mac.txt
8630953247 clc.txt
8618332863 cli
8630994304 clm.txt
8630080994 mkcache.sh
8630080991 patch.txt
8618569561 processing
8630081005 setup.py
8618553163 specitems
8630997293 test
8619309532 utils
8630997296 xxx
"""
ls_out_inodes_mismatched = \
"""
8630081633 Makefile
8630080989 Makefile.cached_depends
8630080990 Makefile.cached_depends_vs
8630953247 clc.txt
8618332863 cli
8630994304 clm.txt
8630080994 mkcache.sh
8630080991 patch.txt
8618569561 processing
8630081005 setup.py
8618553163 specitems
8630997293 test
8619309532 utils
8630997296 xxx
"""
Jabberwocky = \
"""
'Twas brillig, and the slithy toves
Did gyre and gimble in the wabe:
All mimsy were the borogoves,
And the mome raths outgrabe.
'Beware the Jabberwock, my son!
The jaws that bite, the claws that catch!
Beware the Jubjub bird, and shun
The frumious Bandersnatch!'
He took his vorpal sword in hand;
Long time the manxome foe he sought--
So rested he by the Tumtum tree
And stood awhile in thought.
And, as in uffish thought he stood,
The Jabberwock, with eyes of flame,
Came whiffling through the tulgey wood,
And burbled as it came!
One, two! One, two! And through and through
The vorpal blade went snicker-snack!
He left it dead, and with its head
He went galumphing back.
'And hast thou slain the Jabberwock?
Come to my arms, my beamish boy!
O frabjous day! Callooh! Callay!'
He chortled in his joy.
'Twas brillig, and the slithy toves
Did gyre and gimble in the wabe:
All mimsy were the borogoves,
And the mome raths outgrabe.
"""
employees = \
'''
Payroll,Eddy,Eck
Payroll,Janel,Polen
Finance,Leonard,Cockett
Finance,Dorie,Lugo
Finance,Wiley,Cheung
Finance,Carmelo,Reitz
Finance,Donetta,Rybak
R&D,Jamaal,Mcgillis
R&D,Jonna,Scheffer
R&D,Shawnna,Driskell
R&D,Maybell,Ditmore
R&D,Ami,Fentress
R&D,Randee,Tarkington
R&D,Jerica,Jimenez
Sales,Kristopher,Lind
Sales,Margret,Picone
Sales,Damien,Daniel
Support,Deann,Rushton
Support,Spencer,Marse
Support,Devora,Fortier
'''
gitlog = \
'''
commit df3438ed9e95c2aa37a429ab07f0956164ec4229
Author: synp71 <<EMAIL>>
Date: Sun Jan 20 21:40:41 2019 +0200
Add NEWS section to Readme.md
commit e6d7f9ac591379d653a5685f9d75deccc1792545
Author: synp71 <<EMAIL>>
Date: Sun Jan 20 21:09:47 2019 +0200
Issue #33: Some more docs improvement
Also fixed the stats to conform to current timestamp format.
commit 241002cf5a66737bbfd29888244a0a463cd9bcae
Author: synp71 <<EMAIL>>
Date: Thu Jan 17 23:45:21 2019 +0200
Issue #33: fix formatting
commit 9efb13277c561a3a28195d469420031add60946e
Author: synp71 <<EMAIL>>
Date: Thu Jan 17 23:38:01 2019 +0200
Issue #33 basic specification and CLI switches
'''
httplog = \
'''
test8:mmind.wariat.org - - [04/Jul/1995:08:12:26 -0400] "GET /shuttle/countdown/video/livevideo.gif HTTP/1.0" 304 0
test8:bruosh01.brussels.hp.com - - [04/Jul/1995:08:12:26 -0400] "GET /shuttle/missions/sts-71/mission-sts-71.html HTTP/1.0" 200 12418
test8:beastie-ppp1.knoware.nl - - [04/Jul/1995:08:12:26 -0400] "GET /shuttle/missions/sts-71/images/KSC-95EC-0423.txt HTTP/1.0" 200 1224
test8:piweba3y.prodigy.com - - [04/Jul/1995:08:12:28 -0400] "GET /shuttle/countdown/liftoff.html HTTP/1.0" 200 4535
test8:sullivan.connix.com - - [04/Jul/1995:08:12:28 -0400] "GET /shuttle/missions/sts-71/images/index71.gif HTTP/1.0" 200 57344
test8:bruosh01.brussels.hp.com - - [04/Jul/1995:08:12:33 -0400] "GET /shuttle/missions/sts-71/sts-71-patch-small.gif HTTP/1.0" 200 12054
test9:mmind.wariat.org - - [04/Jul/1995:08:12:33 -0400] "GET /shuttle/countdown/liftoff.html HTTP/1.0" 304 0
test9:www-d4.proxy.aol.com - - [04/Jul/1995:08:12:34 -0400] "GET /shuttle/missions/sts-71/sts-71-day-01-highlights.html HTTP/1.0" 200 2722
test9:mmind.wariat.org - - [04/Jul/1995:08:12:35 -0400] "GET /shuttle/countdown/video/livevideo.gif HTTP/1.0" 304 0
test9:eepc50.ee.surrey.ac.uk - - [04/Jul/1995:08:12:35 -0400] "GET /shuttle/countdown/video/livevideo.jpeg HTTP/1.0" 200 50437
test10:piweba3y.prodigy.com - - [04/Jul/1995:08:12:37 -0400] "GET /shuttle/countdown/video/livevideo.gif HTTP/1.0" 200 61490
test10:crocus-fddi.csv.warwick.ac.uk - - [04/Jul/1995:08:12:39 -0400] "GET /shuttle/missions/sts-71/mission-sts-71.html HTTP/1.0" 200 12418
test10:crocus-fddi.csv.warwick.ac.uk - - [04/Jul/1995:08:12:41 -0400] "GET /shuttle/missions/sts-71/sts-71-patch-small.gif HTTP/1.0" 200 12054
'''
| StarcoderdataPython |
6435173 | <reponame>denizcangi/stereoscope
#!/usr/bin/env python3
import sys
from os import mkdir, getcwd
import os.path as osp
import argparse as arp
import torch as t
from torch.cuda import is_available
from torch.utils.data import Dataset
import numpy as np
import pandas as pd
import stsc.fit as fit
import stsc.datasets as D
import stsc.models as M
import stsc.utils as utils
import stsc.parser as parser
def run(prs : arp.ArgumentParser,
args : arp.Namespace,
)-> None:
"""Run analysis
Depending on specified arguments performs
either single cell parameter estimation,
ST-data proportion estimates or both.
Parameter:
---------
prs : argparse.ArgumentParser
args : argparse.Namespace
"""
# generate unique identifier for analysis
timestamp = utils.generate_identifier()
# ensure arguments are provided
if len(sys.argv[1::]) < 2:
prs.print_help()
sys.exit(-1)
# set output directory to cwd if none specified
if args.out_dir is None:
args.out_dir = getcwd()
# create output directory if non-existant
elif not osp.exists(args.out_dir):
mkdir(args.out_dir)
# instatiate logger
log = utils.Logger(osp.join(args.out_dir,
'.'.join(['stsc',
timestamp,
'log'])
)
)
# convert args to list if not
args.st_cnt = (args.st_cnt if \
isinstance(args.st_cnt,list) else \
[args.st_cnt])
# set device
if args.gpu:
device = t.device('cuda')
else:
device = t.device('cpu')
device = (device if is_available() else t.device('cpu'))
log.info("Using device {}".format(str(device)))
# If parameters should be fitted from sc data
if not all(args.sc_fit):
log.info(' | '.join(["fitting sc data",
"count file : {}".format(args.sc_cnt),
"labels file : {}".format(args.sc_labels),
])
)
# control thafbt paths to sc data exists
if not all([osp.exists(args.sc_cnt)]):
log.error(' '.join(["One or more of the specified paths to",
"the sc data does not exist"]))
sys.exit(-1)
# load pre-fitted model if provided
if args.sc_model is not None:
log.info("loading state from provided sc_model")
# Create data set for single cell data
sc_data = D.make_sc_dataset(args.sc_cnt,
args.sc_labels,
topn_genes = args.topn_genes,
gene_list_pth = args.gene_list,
lbl_colname = args.label_colname,
filter_genes = args.filter_genes,
min_counts = args.min_sc_counts,
min_cells = args.min_cells,
transpose = args.sc_transpose,
lower_bound = args.sc_lower_bound,
upper_bound = args.sc_upper_bound,
)
log.info(' '.join(["SC data GENES : {} ".format(sc_data.G),
"SC data CELLS : {} ".format(sc_data.M),
"SC data TYPES : {} ".format(sc_data.Z),
])
)
# generate LossTracker object
oname_loss_track = osp.join(args.out_dir,
'.'.join(["sc_loss",timestamp,"txt"])
)
sc_loss_tracker = utils.LossTracker(oname_loss_track,
interval = 100,
)
# estimate parameters from single cell data
sc_res = fit.fit_sc_data(sc_data,
loss_tracker = sc_loss_tracker,
sc_epochs = args.sc_epochs,
sc_batch_size = args.sc_batch_size,
learning_rate = args.learning_rate,
sc_from_model = args.sc_model,
device = device,
)
R,logits,sc_model = sc_res['rates'],sc_res['logits'],sc_res['model']
# save sc model
oname_sc_model = osp.join(args.out_dir,
'.'.join(['sc_model',timestamp,'pt']))
t.save(sc_model.state_dict(),oname_sc_model)
# save estimated parameters
oname_R = osp.join(args.out_dir,
'.'.join(['R',timestamp,'tsv']))
oname_logits = osp.join(args.out_dir,
'.'.join(['logits',timestamp,'tsv']))
utils.write_file(R,oname_R)
utils.write_file(logits,oname_logits)
# Load already estimated single cell parameters
elif args.st_cnt is not None:
log.info(' | '.join(["load sc parameter",
"rates (R) : {}".format(args.sc_fit[0]),
"logodds (logits) : {}".format(args.sc_fit[1]),
])
)
R = utils.read_file(args.sc_fit[0])
logits = utils.read_file(args.sc_fit[1])
# If ST data is provided estiamte proportions
if args.st_cnt[0] is not None:
# generate identifiying tag for each section
sectiontag = list(map(lambda x: '.'.join(osp.basename(x).split('.')[0:-1]),args.st_cnt))
log.info("fit st data section(s) : {}".format(args.st_cnt))
# check that provided files exist
if not all([osp.exists(x) for x in args.st_cnt]):
log.error("Some of the provided ST-data paths does not exist")
sys.exit(-1)
if args.st_model is not None:
log.info("loading state from provided st_model")
# create data set for st data
st_data = D.make_st_dataset(args.st_cnt,
topn_genes = args.topn_genes,
min_counts = args.min_st_counts,
min_spots = args.min_spots,
filter_genes = args.filter_genes,
transpose = args.st_transpose,
)
log.info(' '.join(["ST data GENES : {} ".format(st_data.G),
"ST data SPOTS : {} ".format(st_data.M),
])
)
# generate LossTracker object
oname_loss_track = osp.join(args.out_dir,
'.'.join(["st_loss",timestamp,"txt"])
)
st_loss_tracker = utils.LossTracker(oname_loss_track,
interval = 100,
)
# estimate proportions of cell types within st data
st_res = fit.fit_st_data(st_data,
R = R,
logits = logits,
loss_tracker = st_loss_tracker,
st_epochs = args.st_epochs,
st_batch_size = args.st_batch_size,
learning_rate = args.learning_rate,
silent_mode = args.silent_mode,
st_from_model = args.st_model,
device = device,
keep_noise = args.keep_noise,
freeze_beta = args.freeze_beta,
)
W,st_model = st_res['proportions'],st_res['model']
# split joint matrix into multiple
wlist = utils.split_joint_matrix(W)
# save st model
oname_st_model = osp.join(args.out_dir,
'.'.join(['st_model',timestamp,'pt']))
t.save(st_model.state_dict(),oname_st_model)
# save st data proportion estimates results
for s in range(len(wlist)):
section_dir = osp.join(args.out_dir,sectiontag[s])
if not osp.exists(section_dir):
mkdir(section_dir)
oname_W = osp.join(section_dir,'.'.join(['W',timestamp,'tsv']))
log.info("saving proportions for section {} to {}".format(sectiontag[s],
oname_W))
utils.write_file(wlist[s],oname_W)
| StarcoderdataPython |
5108699 | import torch
from .training import get_predictions, get_num_correct_predictions
def get_score_fusion_accuracy(data_loaders, models, device):
"""
Receives two lists of data loaders and models (synchronized), gets predictions
and fuses the data of all the models based on a max, product and sum rule. Returns the accuracy for
all three rules.
:param data_loaders: List of data loaders
:param models: List of models
:param device: Device to be used
:return: max rule accuracy, product rule accuracy, sum rule accuracy
"""
# TODO change hardcoded numbers
c_scores = torch.zeros((len(models), 416, 27))
c_labels = torch.zeros((len(models), 416, 27))
for idx in range(len(models)):
all_predictions, all_labels = get_predictions(data_loaders[idx], models[idx], device)
c_scores[idx, :, :] = all_predictions
c_labels[idx, :, :] = all_labels
# Perform all three available rules of fusion
(fused_scores_max, _) = c_scores.max(dim=0)
fused_scores_prod = c_scores.prod(dim=0)
fused_scores_sum = c_scores.sum(dim=0)
# Calculate individual predictions for each rule
correct_pred_max = get_num_correct_predictions(fused_scores_max, c_labels[0])
correct_pred_prod = get_num_correct_predictions(fused_scores_prod, c_labels[0])
correct_pred_sum = get_num_correct_predictions(fused_scores_sum, c_labels[0])
# Return a tuple of max, product and sum rules accuracy
return correct_pred_max / 416, correct_pred_prod / 416, correct_pred_sum / 416
| StarcoderdataPython |
8188028 | import pandas as pd
import csv
from collections import defaultdict
def analysis():
disease_list = []
def return_list(disease):
disease_list = []
match = disease.replace('^','_').split('_')
ctr = 1
for group in match:
if ctr%2==0:
disease_list.append(group)
ctr = ctr + 1
return disease_list
with open("Scraped-Data/dataset_uncleaned.csv") as csvfile:
reader = csv.reader(csvfile)
disease=""
weight = 0
disease_list = []
dict_wt = {}
dict_=defaultdict(list)
for row in reader:
if row[0]!="\xc2\xa0" and row[0]!="":
disease = row[0]
disease_list = return_list(disease)
weight = row[1]
if row[2]!="\xc2\xa0" and row[2]!="":
symptom_list = return_list(row[2])
for d in disease_list:
for s in symptom_list:
dict_[d].append(s)
dict_wt[d] = weight
##print (dict_)
with open("Scraped-Data/dataset_clean.csv","w") as csvfile:
writer = csv.writer(csvfile)
for key,values in dict_.items():
for v in values:
#key = str.encode(key)
key = str.encode(key).decode('utf-8')
#.strip()
#v = v.encode('utf-8').strip()
#v = str.encode(v)
writer.writerow([key,v,dict_wt[key]])
columns = ['Source','Target','Weight']
data = pd.read_csv("Scraped-Data/dataset_clean.csv",names=columns, encoding ="ISO-8859-1")
data.head()
data.to_csv("Scraped-Data/dataset_clean.csv",index=False)
slist = []
dlist = []
with open("Scraped-Data/nodetable.csv","w") as csvfile:
writer = csv.writer(csvfile)
for key,values in dict_.items():
for v in values:
if v not in slist:
writer.writerow([v,v,"symptom"])
slist.append(v)
if key not in dlist:
writer.writerow([key,key,"disease"])
dlist.append(key)
nt_columns = ['Id','Label','Attribute']
nt_data = pd.read_csv("Scraped-Data/nodetable.csv",names=nt_columns, encoding ="ISO-8859-1",)
nt_data.to_csv("Scraped-Data/nodetable.csv",index=False)
data = pd.read_csv("Scraped-Data/dataset_clean.csv", encoding ="ISO-8859-1")
len(data['Source'].unique())
len(data['Target'].unique())
return data
def analysis2(data):
df = pd.DataFrame(data)
df_1 = pd.get_dummies(df.Target)
df_s = df['Source']
df_pivoted = pd.concat([df_s,df_1], axis=1)
df_pivoted.drop_duplicates(keep='first',inplace=True)
len(df_pivoted)
cols = df_pivoted.columns
cols = cols[1:]
df_pivoted = df_pivoted.groupby('Source').sum()
df_pivoted = df_pivoted.reset_index()
return df_pivoted
| StarcoderdataPython |
4970760 | #!/usr/bin/env python
# rebatch.py
#
# Copyright (C) 2011 <NAME>, <NAME>
#
# This code is distributed under the BSD license, a copy of which is
# included in the root directory of this package.
#
# Replacement for CCP4 program rebatch, using cctbx Python.
#
from __future__ import absolute_import, division
import sys
from cctbx.array_family import flex
from iotbx import mtz
def compact_batches(batches):
'''Pack down batches to lists of continuous batches.'''
from operator import itemgetter
from itertools import groupby
return [map(itemgetter(1), g) for k, g in groupby(enumerate(batches),
lambda i_x:i_x[0]-i_x[1])]
def rebatch(hklin, hklout, first_batch=None, add_batch=None,
include_range=None, exclude_range=None, exclude_batches=None,
pname=None, xname=None, dname=None):
'''Need to implement: include batch range, exclude batches, add N to
batches, start batches at N.'''
if include_range is None:
include_range = []
if exclude_range is None:
exclude_range = []
if first_batch is not None and add_batch is not None:
raise RuntimeError('both first and add specified')
assert not (len(include_range) and len(exclude_range))
assert not (len(exclude_range) and len(exclude_batches))
assert not (len(include_range) and first_batch)
assert not (len(exclude_range) and first_batch)
if exclude_batches:
exclude_range = [(b[0], b[-1]) for b in compact_batches(exclude_batches)]
mtz_obj = mtz.object(file_name=hklin)
batch_column = None
batch_dataset = None
for crystal in mtz_obj.crystals():
for dataset in crystal.datasets():
for column in dataset.columns():
if column.label() == 'BATCH':
batch_column = column
batch_dataset = dataset
if not batch_column:
raise RuntimeError('no BATCH column found in %s' % hklin)
batches = [b.num() for b in mtz_obj.batches()]
batch_column_values = batch_column.extract_values(
not_a_number_substitute = -1)
valid = flex.bool()
offset = 0
if exclude_range:
exclude_sel = flex.bool(batch_column_values.size(), False)
for (start, end) in exclude_range:
exclude_sel.set_selected(
(batch_column_values >= start) & (batch_column_values <= end), True)
mtz_obj.delete_reflections(exclude_sel.iselection())
elif include_range:
exclude_sel = flex.bool(batch_column_values.size(), True)
for (start, end) in include_range:
exclude_sel.set_selected(
(batch_column_values >= start) & (batch_column_values <= end), False)
mtz_obj.delete_reflections(exclude_sel.iselection())
# modify batch columns, and also the batch headers
elif first_batch is not None or add_batch is not None:
if first_batch is not None:
offset = first_batch - min(batches)
else:
offset = add_batch
batch_column_values = batch_column_values + offset
for batch in mtz_obj.batches():
batch.set_num(int(batch.num() + offset))
# done modifying
batch_column.set_values(values=batch_column_values, selection_valid=valid)
if pname and xname and dname:
for c in mtz_obj.crystals():
for d in c.datasets():
d.set_name(dname)
if c.name() == 'HKL_base':
continue
c.set_project_name(pname)
c.set_name(xname)
# and write this lot out as hklout
mtz_obj.write(file_name=hklout)
new_batches = (min(batches) + offset, max(batches) + offset)
return new_batches
def copy_r_file(hklin, hklout):
mtz_obj = mtz.object(file_name = hklin)
mtz_out = mtz.object()
mtz_out.set_space_group(mtz_obj.space_group())
for batch in mtz_obj.batches():
if batch.num() % 2 == 0:
batch_out = mtz_out.add_batch()
batch_out.set_num(batch.num())
batch_out.set_title(batch.title())
batch_out.set_gonlab(batch.gonlab())
batch_out.set_ndet(batch.ndet())
batch_out.set_phixyz(batch.phixyz())
batch_out.set_detlm(batch.detlm())
batch_column = None
for crystal in mtz_obj.crystals():
crystal_out = mtz_out.add_crystal(
crystal.name(), crystal.project_name(), crystal.unit_cell())
for dataset in crystal.datasets():
dataset_out = crystal_out.add_dataset(dataset.name(),
dataset.wavelength())
for column in dataset.columns():
dataset_out.add_column(column.label(), column.type())
if column.label() == 'BATCH':
batch_column = column
if not batch_column:
raise RuntimeError('no BATCH column found in %s' % hklin)
batch_column_values = batch_column.extract_values(
not_a_number_substitute = -1)
valid = flex.bool()
remove = []
for j, b in enumerate(batch_column_values):
if b % 2 != 0:
remove.append(j)
remove.reverse()
for crystal in mtz_obj.crystals():
for dataset in crystal.datasets():
for column in dataset.columns():
print column.label()
values = column.extract_values(
not_a_number_substitute = -999999)
for r in remove:
del(values[r])
mtz_out.get_column(column.label()).set_values(
values = values, selection_valid = valid)
mtz_out.write(file_name = hklout)
return
if __name__ == '__main__':
import sys
hklin = sys.argv[1]
hklout = sys.argv[2]
# should be a no-op essentially...
rebatch(hklin, hklout, first_batch=42, pname="pname", xname="xname", dname="dname")
| StarcoderdataPython |
9736523 | from typing import List
import sys
import gym
import numpy as np
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
from prettytable import PrettyTable
import neat.hyperneat as hn
from neat.phenotypes import Phenotype, FeedforwardCUDA
from neat.mapElites import MapElitesConfiguration, MapElitesUpdate
import time
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['TF_CPP_MIN_VLOG_LEVEL'] = '3'
env_name = "BipedalWalker-v2"
nproc = 4
envs_size = 50
pop_size = 200
max_stagnation = 25
encoding_dim = 8
behavior_dimensions = 7
behavior_steps = 60
behavior_matrix_size = behavior_dimensions * behavior_steps
# _ALE_LOCK = threading.Lock()
def take_step(env_action):
env, action = env_action
state, reward, done, info = env.step(action)
# with _ALE_LOCK:
# env.render()
return np.array([state, reward, done])
def make_env(env_id, seed):
def _f():
env = gym.make(env_id)
env.seed(seed)
return env
return _f
def test_organism(phenotypes, envs, render=False):
feedforward = FeedforwardCUDA(phenotypes)
observations = envs.reset()
obs_32 = np.float32(observations)
actions = feedforward.update(obs_32)
fitnesses = np.zeros(len(envs.remotes), dtype=np.float64)
done = False
done_tracker = np.array([False for _ in range(len(envs.remotes))])
diff = len(phenotypes) - len(envs.remotes)
if diff < 0:
done_tracker[diff:] = True
distances = np.zeros(len(envs.remotes))
last_distances = np.zeros(len(envs.remotes))
stagnations = np.zeros(len(envs.remotes))
all_states = []
max_steps = 50
steps = max_steps
while not done:
actions = np.pad(actions, (0, abs(diff)), 'constant')
states, rewards, dones, info = envs.step(actions)
# if render:
# envs.remotes[0].send(('render', None))
# envs.remotes[0].recv()
actions = feedforward.update(states)
fitnesses[done_tracker == False] += np.around(rewards[done_tracker == False], decimals=4)
# fitnesses[done_tracker == False] = np.around(rewards[done_tracker == False], decimals=2)
envs_done = dones == True
done_tracker[envs_done] = dones[envs_done]
envs_running = len([d for d in done_tracker if d == False])
# print("\r"+" "* 100, end='', flush=True)
# print("\rEnvs running: {}/{}".format(envs_running, len(phenotypes)), end='')
done = envs_running == 0
distances += np.around(states.T[2], decimals=2)
stagnations += distances == last_distances
done_tracker[stagnations >= 100] = True
last_distances = distances
if steps == max_steps:
steps = 0
all_states.append(states[:, [0, 4, 6, 8, 9, 11, 13]])
steps += 1
all_states = np.array(all_states)
flattened_states = []
for row_i in range(all_states.shape[1]):
flattened_states.append(all_states[:, row_i].flatten())
flattened_states = pad_matrix(np.array(flattened_states), behavior_matrix_size)
return (fitnesses, flattened_states)
def chunk_testing(phenotypes):
all_states = []
fitnesses = []
for chunk in chunks(phenotypes, envs_size):
fitness, states = test_organism(chunk, envs)
all_states.extend(states)
fitnesses.extend(fitness)
return (np.array(all_states), np.array(fitnesses))
def run_env_once(phenotype):
single_envs = SubprocVecEnv([make_env(env_name, envs_size)])
test_organism([phenotype], single_envs, render=True)
# Visualize().update(phenotype)
feedforward_highest = FeedforwardCUDA([phenotype])
states = env.reset()
done = False
distance = 0.0
last_distance = 0.0
distance_stagnation = 0
while not done:
actions = feedforward_highest.update(np.array([states]))
states, reward, done, info = env.step(actions[0])
distance += np.around(states[2], decimals=2)
if distance <= last_distance:
distance_stagnation += 1
else:
distance_stagnation = 0
if distance_stagnation >= 100:
done = True
last_distance = distance
env.render()
env.close()
# Visualize().close()
def refine_ae(autoencoder, phenotypes):
autoencoder.load_weights('basic.h5')
last_loss = 1000.0
loss = 1000.0
while loss <= last_loss:
last_loss = loss
states, _ = chunk_testing(phenotypes)
ten_percent = max(1, int(states.shape[0] * 0.1))
train = states[:-ten_percent]
test = states[-ten_percent:]
hist = autoencoder.fit(train, train,
epochs=50,
batch_size=256,
shuffle=True,
validation_data=(test, test),
verbose=0)
loss = abs(hist.history['val_loss'][-1])
print("Training autoencoder. Loss: {}".format(loss))
def pad_matrix(all_states, matrix_width):
padded = []
for row in all_states:
# row = all_states[:, i].flatten()
row = np.pad(row, (0, abs(matrix_width - row.shape[0])), 'constant')
padded.append(row)
return np.array(padded)
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
if __name__ == '__main__':
from keras.layers import Input, Dense
from keras.models import Model
from keras.backend.tensorflow_backend import set_session
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
config.log_device_placement = True # to log device placement (on which device the operation ran)
sess = tf.Session(config=config)
set_session(sess) # set this TensorFlow session as the default session for Keras
sys.setrecursionlimit(10000)
env = gym.make(env_name)
inputs = env.observation_space.shape[0]
outputs = env.action_space.shape[0]
print("Inputs: {} | Outputs: {}".format(inputs, outputs))
############################################## Auto encoder ##############################################
# this is our input placeholder
input_img = Input(shape=(behavior_matrix_size,))
# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim, activation='sigmoid')(input_img)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(behavior_matrix_size, activation='relu')(encoded)
# this model maps an input to its reconstruction
autoencoder = Model(input_img, decoded)
# this model maps an input to its encoded representation
encoder = Model(input_img, encoded)
# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(encoding_dim,))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(encoded_input, decoder_layer(encoded_input))
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
autoencoder.save_weights('basic.h5')
############################################################################################################
print("Creating hyperneat object")
# pop_config = SpeciesConfiguration(pop_size, inputs, outputs)
pop_config = MapElitesConfiguration(4, pop_size, encoding_dim, inputs, outputs)
# hyperneat = hn.HyperNEAT(pop_config)
hyperneat = hn.NEAT(pop_config)
start_fitness = np.zeros(pop_size)
start_features = np.zeros((pop_size, encoding_dim))
phenotypes: List[Phenotype] = hyperneat.epoch(MapElitesUpdate(start_fitness, start_features))
# phenotypes: List[Phenotype] = hyperneat.epoch(SpeciesUpdate(start_fitness))
highest_fitness: float = -1000.0
highestDistance: float = 0.0
envs = [make_env(env_name, seed) for seed in range(envs_size)]
print("Creating envs...")
envs = SubprocVecEnv(envs)
print("Done.")
loss = 1000.0
progress_stagnation = 0
train_ae = progress_stagnation >= max_stagnation
refine_ae(autoencoder, phenotypes)
epoch_num = 0
while True:
epoch_num += 1
progress_stagnation += 1
train_ae = progress_stagnation == max_stagnation
if train_ae:
ae_phenotypes = []
sorted_genomes_and_fitness = []
if len(hyperneat.population) > 0:
sorted_genomes_and_fitness = sorted(hyperneat.population.population_and_fitnesses(), key=lambda a: a['fitness'])
ae_phenotypes = [g['genome'].createPhenotype() for g in sorted_genomes_and_fitness]
else:
sorted_genomes_and_fitness = hyperneat.population.population_and_fitnesses()
ae_phenotypes = phenotypes
refine_ae(autoencoder, ae_phenotypes)
top_percent = max(1, int(len(ae_phenotypes) * 0.25) + 1)
ae_phenotypes = ae_phenotypes[:top_percent]
# Re-evaluate all the genomes in the archives
ae_states, ae_fitnesses = chunk_testing(ae_phenotypes)
ae_pred = encoder.predict(ae_states)
ae_fitnesses = ae_fitnesses[:top_percent]
sorted_genomes = [g['genome'] for g in sorted_genomes_and_fitness]
hyperneat.population.archive = {}
hyperneat.population.archivedGenomes = []
hyperneat.population.genomes = sorted_genomes[:top_percent]
phenotypes = hyperneat.epoch(MapElitesUpdate(ae_fitnesses, ae_pred))
progress_stagnation = 0
print("########## Epoch {} ##########".format(epoch_num))
# Test the phenotypes in the envs
start = time.time()
all_states, fitnesses = chunk_testing(phenotypes)
end = time.time()
print("Time:", end - start)
pred = encoder.predict(all_states)
print("Highest fitness this epoch:", max(fitnesses))
max_fitness = max(zip(fitnesses, phenotypes), key=lambda e: e[0])
# mpc = mp.get_context('spawn')
# p = mpc.Process(target=run_env_once, args=(max_fitness[1],))
# p.start()
if max_fitness[0] > highest_fitness:
highest_fitness = max_fitness[0] if max_fitness[0] > highest_fitness else highest_fitness
best_phenotype = max_fitness[1]
# Visualize().update(best_phenotype)
run_env_once(best_phenotype)
# Visualize().close()
# progress_stagnation = 0
# else:
# progress_stagnation += 1
phenotypes = hyperneat.epoch(MapElitesUpdate(fitnesses, pred))
print("Highest fitness all-time: {}".format(highest_fitness))
print("Progress stagnation: {}".format(progress_stagnation))
total = pow(hyperneat.population.configuration.mapResolution,
hyperneat.population.configuration.features)
archiveFilled = len(hyperneat.population.archivedGenomes) / total
print("Genomes in archive: {}".format(len(hyperneat.population.archive)))
archive_size = max(1, len(hyperneat.population.archive))
population = hyperneat.population.population()
avg_neurons = sum([len(g.neurons) for g in population]) / archive_size - (
inputs + outputs)
avg_links = sum([len(g.links) for g in population]) / archive_size
avg_fitness = sum([g['fitness'] for g in hyperneat.population.population_and_fitnesses()]) / max(1, len(
hyperneat.population.archive))
table = PrettyTable(["Epoch", "fitness", "max fitness", "neurons", "links", "avg. fitness", "archive"])
table.add_row([
epoch_num,
"{:1.4f}".format(max_fitness[0]),
"{:1.4f}".format(highest_fitness),
"{:1.4f}".format(avg_neurons),
"{:1.4f}".format(avg_links),
"{:1.4f}".format(avg_fitness),
"{:1.8f}".format(archiveFilled)])
print(table)
env.close()
| StarcoderdataPython |
1658572 | # Standard imports
import datetime
import io
import json
import os
# Django imports
from django import http
from rest_framework import viewsets
# Third-party imports
import piexif
from PIL import Image
# Local imports
from . import filters, models, serializers, utils
from .membership import permissions
# Provide API access to python log files (used by log_view)
def log_api(request, *args, **kwargs):
# Ensure request is authorised
if not request.user.is_superuser:
return http.HttpResponseForbidden()
if "start_time" in request.GET:
start_time = datetime.datetime.strptime(request.GET["start_time"], "%Y-%m-%dT%H:%M:%S")
else:
start_time = None
logs, end_time = utils.read_logs(start_time)
result = {"end_time": end_time, "logs": logs}
return http.JsonResponse(result)
# Provide UI access to python log files (for admin page)
def log_view(request, *args, **kwargs):
# Ensure request is authorised
if not request.user.is_superuser:
return http.HttpResponseForbidden()
html = """
<div id="log"></div>
<style>
body {
margin: 0;
}
#log {
background-color: black;
color: white;
font-family: ubuntu;
padding: 10px;
}
</style>
<script>
var logdiv = document.getElementById("log");
var nextstart;
function load_logs (start_time) {
var xhr = new XMLHttpRequest();
xhr.responseType = "json";
xhr.addEventListener("load", function () {
var data = this.response;
var atBottom = window.scrollY + window.innerHeight == document.body.scrollHeight;
logdiv.innerText += data.logs;
if (atBottom) window.scrollTo(0, document.body.scrollHeight);
if (data.logs.length > 0) nextstart = data.end_time;
});
xhr.open("GET", "log_api" + (start_time ? ("?start_time=" + start_time) : ""));
xhr.send();
}
load_logs();
window.setInterval(function () { load_logs(nextstart); }, 5000);
</script>
"""
return http.HttpResponse(html)
# Provide an image from File or Scan model ID, with width/height/quality options
def image_view(request, *args, **kwargs):
# EXIF orientations constant
rotations = {1: 0, 3: 180, 6: 270, 8: 90}
# Ensure request is authorised
if not permissions.FileserverPermission().has_permission(request):
return http.HttpResponseForbidden()
is_scan = "scans" in request.path
# Get file, ensure it exists and is an image
file_qs = (models.Scan if is_scan else models.File).objects.filter(id=kwargs["file_id"])
if file_qs.exists():
file = file_qs.first()
if not os.path.isfile(file.get_real_path()):
return http.HttpResponseNotFound()
if is_scan or file.type == "image":
# Scale image if appropriate
if "width" in kwargs and "height" in kwargs:
# Determine the desired quality
if "quality" in kwargs:
quality = kwargs["quality"]
else:
quality = 75 # TODO user config?
# Load image
image = Image.open(file.get_real_path())
# Scale down the image
if file.orientation in [6, 8]:
image.thumbnail((kwargs["height"], kwargs["width"]))
else:
image.thumbnail((kwargs["width"], kwargs["height"]))
# Rotate if needed
if file.orientation in rotations and file.orientation != 1:
image = image.rotate(rotations[file.orientation], expand=True)
# Create response from image
response = http.HttpResponse(content_type="image/jpeg")
image.save(response, "JPEG", quality=quality)
else:
exif_orientation = (utils.get_if_exist(json.loads(file.metadata), ["exif", "Image", "Orientation"]) or 1) if not is_scan else 1
if exif_orientation == file.orientation or exif_orientation not in rotations or file.orientation not in rotations:
# Create response from unaltered image data
data = open(file.get_real_path(), "rb").read()
response = http.HttpResponse(data, content_type="image/jpeg")
else:
# Load and rotate image
image = Image.open(file.get_real_path())
image = image.rotate(rotations[file.orientation] - rotations[exif_orientation], expand=True)
response = http.HttpResponse(content_type="image/jpeg")
image.save(response, "JPEG", quality=95)
response["Content-Disposition"] = "filename=\"%s.%s\"" % (file.name, file.format)
return response
else:
return http.HttpResponseBadRequest()
else:
return http.HttpResponseNotFound()
# Provide EXIF thumbnail of image File or Scan if available
def image_thumb_view(request, *args, **kwargs):
# EXIF orientations constant
rotations = {3: 180, 6: 270, 8: 90}
# Ensure request is authorised
if not permissions.FileserverPermission().has_permission(request):
return http.HttpResponseForbidden()
is_scan = "scans" in request.path
# Get file, ensure it exists and is an image
file_qs = (models.Scan if is_scan else models.File).objects.filter(id=kwargs["file_id"])
if file_qs.exists():
file = file_qs.first()
if not os.path.isfile(file.get_real_path()):
return http.HttpResponseNotFound()
if is_scan or file.type == "image":
# Load exif thumbnail
exif = piexif.load(file.get_real_path())
data = exif["thumbnail"]
# Reject if no thumbnail in EXIF data
if data is None:
return http.HttpResponseNotFound()
# Rotate if needed
if file.orientation in rotations:
image = Image.open(io.BytesIO(data))
image = image.rotate(rotations[file.orientation], expand=True)
data_io = io.BytesIO()
image.save(data_io, "JPEG")
data = data_io.getvalue()
# Return the thumbnail response
response = http.HttpResponse(data, content_type="image/jpeg")
response["Content-Disposition"] = "filename=\"%s.%s\"" % (file.name, file.format)
return response
else:
return http.HttpResponseBadRequest()
else:
return http.HttpResponseNotFound()
# Provide saved thumbnail image for face
def face_view(request, *args, **kwargs):
# Ensure request is authorised
if not permissions.FileserverPermission().has_permission(request):
return http.HttpResponseForbidden()
# Get face and ensure it exists
face_qs = models.Face.objects.filter(id=kwargs["face_id"])
if face_qs.exists():
face = face_qs.first()
# Save thumbnail if not already saved
if face.thumbnail is None:
face.save_thumbnail()
if isinstance(face.thumbnail, bytes):
thumb_bytes = face.thumbnail
else:
thumb_bytes = face.thumbnail.tobytes()
return http.HttpResponse(thumb_bytes, content_type="image/jpeg")
else:
return http.HttpResponseNotFound()
# File API, with filtering by folder/album, searching and pagination
class FileViewSet(viewsets.ModelViewSet):
serializer_class = serializers.FileSerializer
http_method_names = list(filter(lambda n: n not in ["put", "post", "delete"], viewsets.ModelViewSet.http_method_names))
filter_class = filters.FileFilter
queryset = models.File.objects.all().order_by("folder", "name")
filter_backends = (filters.BACKEND, filters.CustomSearchFilter, filters.PermissionFilter)
pagination_class = filters.CustomPagination
# Folder API, with filtering by parent and searching
class FolderViewSet(viewsets.ModelViewSet):
serializer_class = serializers.FolderSerializer
http_method_names = list(filter(lambda n: n not in ["put", "post", "delete"], viewsets.ModelViewSet.http_method_names))
filter_class = filters.FolderFilter
queryset = models.Folder.objects.all().order_by("parent", "name")
filter_backends = (filters.BACKEND, filters.CustomSearchFilter, filters.PermissionFilter)
# Album API
class AlbumViewSet(viewsets.ModelViewSet):
serializer_class = serializers.AlbumSerializer
queryset = models.Album.objects.all()
# Album-File API (for adding/removing files from albums)
class AlbumFileViewSet(viewsets.ModelViewSet):
queryset = models.AlbumFile.objects.all()
serializer_class = serializers.AlbumFileSerializer
filter_class = filters.AlbumFileFilter
# Person API
class PersonViewSet(viewsets.ModelViewSet):
serializer_class = serializers.PersonSerializer
queryset = models.Person.objects.all()
# Face API, with filtering by person and pagination
class FaceViewSet(viewsets.ModelViewSet):
http_method_names = ["get", "patch", "head", "options"]
serializer_class = serializers.FaceSerializer
queryset = models.Face.objects.all().order_by("-status", "uncertainty", "id")
filter_class = filters.FaceFilter
pagination_class = filters.CustomPagination
# PersonGroup API
class PersonGroupViewSet(viewsets.ModelViewSet):
serializer_class = serializers.PersonGroupSerializer
queryset = models.PersonGroup.objects.all()
# GeoTagArea API
class GeoTagAreaViewSet(viewsets.ModelViewSet):
serializer_class = serializers.GeoTagAreaSerializer
queryset = models.GeoTagArea.objects.all()
# ScanFolder API, with filtering by parent
class ScanFolderViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.ScanFolderSerializer
filter_class = filters.ScanFolderFilter
queryset = models.ScanFolder.objects.all()
# Scan API, with filtering by parent and pagination
class ScanViewSet(viewsets.ModelViewSet):
http_method_names = list(filter(lambda n: n not in ["put", "post", "delete"], viewsets.ModelViewSet.http_method_names))
serializer_class = serializers.ScanSerializer
filter_class = filters.ScanFilter
queryset = models.Scan.objects.all()
pagination_class = filters.CustomPagination
| StarcoderdataPython |
216248 | """
Download and render afferent mesoscale projection data using the AllenBrainAtlas (ABA) and Scene classes
"""
import brainrender
from brainrender import Scene, Animation
from vedo import settings as vsettings
from brainrender.video import VideoMaker
# // DEFAULT SETTINGS //
# You can see all the default settings here: https://github.com/brainglobe/brainrender/blob/19c63b97a34336898871d66fb24484e8a55d4fa7/brainrender/settings.py
# --------------------------- brainrender settings --------------------------- #
# Change some of the default settings
brainrender.settings.BACKGROUND_COLOR = "white" # color of the background window (defaults to "white", try "blackboard")
brainrender.settings.DEFAULT_ATLAS = "allen_mouse_25um" # default atlas
brainrender.settings.DEFAULT_CAMERA = "three_quarters" # Default camera settings (orientation etc. see brainrender.camera.py)
brainrender.settings.INTERACTIVE = False # rendering interactive ?
brainrender.settings.LW = 2 # e.g. for silhouettes
brainrender.settings.ROOT_COLOR = [0.4, 0.4, 0.4] # color of the overall brain model's actor (defaults to [0.8, 0.8, 0.8])
brainrender.settings.ROOT_ALPHA = 0.2 # transparency of the overall brain model's actor (defaults to 0.2)
brainrender.settings.SCREENSHOT_SCALE = 1 # values >1 yield higher resolution screenshots
brainrender.settings.SHADER_STYLE = "cartoon" # affects the look of rendered brain regions, values can be: ["metallic", "plastic", "shiny", "glossy", "cartoon"] and can be changed in interactive mode
brainrender.settings.SHOW_AXES = False
brainrender.settings.WHOLE_SCREEN = True # If true render window is full screen
brainrender.settings.OFFSCREEN = False
# ------------------------------- vedo settings ------------------------------ #
# For transparent background with screenshots
vsettings.screenshotTransparentBackground = True # vedo for transparent bg
vsettings.useFXAA = False # This needs to be false for transparent bg
# // SET PARAMETERS //
# Save folder
save_folder = r"D:\Dropbox (UCL)\Project_transcriptomics\analysis\PAG_scRNAseq_brainrender\output"
# // CREATE SCENE //
# Create a scene with no title. You can also use scene.add_text to add other text elsewhere in the scene
scene = Scene(root = True, atlas_name = 'allen_mouse_10um', inset = False, title = 'PAG_areas_overview', screenshots_folder = save_folder, plotter = None)
# // GET CENTER OF MASS AND PROJECTIONS TO IT //
# Get the center of mass of the region of interest
p0 = scene.atlas.get_region_CenterOfMass("PAG")
# Get projections to that point
analyzer = ABA()
tract = analyzer.get_projection_tracts_to_target(p0 = p0)
# // ADD BRAIN REGIONS //
pag = scene.add_brain_regions(["PAG"],
alpha = 0.4, color = "darkgoldenrod", add_labels = False, use_original_color = False, wireframe = False)
superior_colliculus = scene.add_brain_regions(["SCdg", "SCdw", "SCig", "SCiw", "SCm", "SCop", "SCs", "SCsg", "SCzo"],
alpha = 0.1, color = "olivedrab", add_labels = False, use_original_color = False, wireframe = True)
# hypothalamus = scene.add_brain_regions(["HY"],
# alpha = .2, color = "lightsalmon", add_labels = False, use_original_color = False, wireframe = True)
# Add the projections to the chosen brain region
scene.add_tractography(tract,
color = None, #"darkseagreen" # color of rendered tractography data
color_by = "target_region", #"manual", "region", or "target_region"
VIP_regions = ["SCdg", "SCdw", "SCig", "SCiw", "SCm", "SCop", "SCs", "SCsg", "SCzo"], # list of brain regions with VIP treatement (Default value = [])
VIP_color = "darkseagreen", # color to use for VIP data (Default value = None)
others_color = "ivory", # color for not VIP data (Default value = "white")
others_alpha = 0.1, # Default is 1
verbose = True, # Prints all areas projecting to target
include_all_inj_regions = False,
display_injection_volume = True) # add a spehere to display the injection coordinates and volume
# This renders tractography data and adds it to the scene. A subset of tractography data can receive special treatment using the with VIP regions argument: if the injection site for the tractography data is in a VIP region, this is colored differently.
# Try "SC", "VMH", "SCm", "LHA":
##["SCdg", "SCdw", "SCig", "SCiw", "SCm", "SCop", "SCs", "SCsg", "SCzo"] -- Superior Colliculus
##(LHA) -- Lateral hypothalamic area
##(VMH) -- Ventromedial hypothalamic nucleus
##(ZI) -- Zona incerta
# // RENDER INTERACTIVELY //
# Render interactively. You can press "s" to take a screenshot
scene.render(interactive = True, camera = "sagittal", zoom = 1) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.