hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringdate 2015-01-01 00:00:47 2022-03-31 23:42:18 ⌀ | max_issues_repo_issues_event_max_datetime stringdate 2015-01-01 17:43:30 2022-03-31 23:59:58 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c94aca271568ab00f3c86f9599a88f50e9eeab3a | 95 | py | Python | fruitsales/apps.py | khajime/fruit-sales-management-console | 4f802578cd9ddcdbbc3259263d0d19df11432a0c | [
"MIT"
] | null | null | null | fruitsales/apps.py | khajime/fruit-sales-management-console | 4f802578cd9ddcdbbc3259263d0d19df11432a0c | [
"MIT"
] | 16 | 2019-02-21T14:12:01.000Z | 2019-03-11T08:00:15.000Z | fruitsales/apps.py | khajime/fruit-sales-management-console | 4f802578cd9ddcdbbc3259263d0d19df11432a0c | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class FruitsalesConfig(AppConfig):
name = 'fruitsales'
| 15.833333 | 34 | 0.768421 |
c94dc603c09e41f347618a870bb8e3d545494ed0 | 61 | py | Python | run.py | Tokisaki-Kurumi001/ASMART-34 | 04ffbabe4a1c18f8ed68a2ee883145985fc5ec7f | [
"MIT"
] | 3 | 2021-04-17T08:34:08.000Z | 2021-04-17T08:57:23.000Z | run.py | Tokisaki-Kurumi001/ASMART-34 | 04ffbabe4a1c18f8ed68a2ee883145985fc5ec7f | [
"MIT"
] | null | null | null | run.py | Tokisaki-Kurumi001/ASMART-34 | 04ffbabe4a1c18f8ed68a2ee883145985fc5ec7f | [
"MIT"
] | null | null | null | import os
os.system('python function_18351015.py > log.txt')
| 20.333333 | 50 | 0.770492 |
c950e89a11e706b3a1a0ba3575143820351f7247 | 3,337 | py | Python | upandas_test.py | kokes/upandas | f2150e5a74c815b27fd08fc841da01c3b455dadc | [
"MIT"
] | null | null | null | upandas_test.py | kokes/upandas | f2150e5a74c815b27fd08fc841da01c3b455dadc | [
"MIT"
] | null | null | null | upandas_test.py | kokes/upandas | f2150e5a74c815b27fd08fc841da01c3b455dadc | [
"MIT"
] | null | null | null | import sys, os
import upandas as upd
# Run a single Python script
# For many simple, single file projects, you may find it inconvenient
# to write a complete Dockerfile. In such cases, you can run a Python
# script by using the Python Docker image directly:
# versions to consider: 3 (600+ MB), slim (150 MB) alpine (90 MB)
# $ docker run -it --rm --name my-running-script -v "$PWD":/usr/src/myapp -w /usr/src/myapp python:3 python your-daemon-or-script.py
# $ docker run -it --rm -v "$PWD":/usr/src/upandas -w /usr/src/upandas python:alpine python upandas_test.py
if __name__ == '__main__':
if len(sys.argv) < 2:
print('no testing approach supplied, see...')
sys.exit(1)
env = sys.argv[1]
if env == 'local':
print('Testing locally')
elif env == 'docker':
print('Using docker to test')
ex = os.system(
'docker run -it --rm -v "$PWD":/usr/src/upandas -w /usr/src/upandas '
'python:alpine python upandas_test.py local')
sys.exit(os.WEXITSTATUS(ex))
elif env == 'virtualenv':
raise NotImplementedError
else:
print('Unsupported environment: {}'.format(env))
sys.argv = sys.argv[:1] # strip our settings out
import unittest
import math
skip_pandas_tests = True # TODO: make this explicit in the sys.argv stuff above
try:
import pandas as pd
skip_pandas_tests = False
except:
pass
# Series methods
# ==============
class TestSeriesInit(unittest.TestCase):
# dict, list, single value, another series, iterator
def test_basic_init(self):
samples = [[1, 2, 3], [4, 5, 6],
list(range(1000)), [1, None, 2, None], []]
for ds in samples:
s = upd.Series(ds)
self.assertEqual(len(s), len(ds))
# test shapes
self.assertEqual(len(s), s.shape[0])
self.assertEqual(len(s.shape), 1)
self.assertEqual(type(s.shape), tuple)
for j, el in enumerate(s):
self.assertEqual(el, ds[j])
if not skip_pandas_tests:
pass
# TODO: add a function to compare pd.Series and upd.Series
# spd = pd.Series(ds)
# self.assertEqual([j for j in s], [j for j in spd])
class TestSeriesApply(unittest.TestCase):
# TODO: args, kwargs?
def test_apply(self):
s = upd.Series([1, 2, 3])
s = s.apply(lambda x: x**2 - 3)
self.assertEqual(s.values, [-2, 1, 6])
class TestSeriesCopy(unittest.TestCase):
def test_copy(self):
s = upd.Series([1, 2, 3])
sc = s.copy()
self.assertEqual(s.values, sc.values)
sc[0] = 10
self.assertNotEqual(s.values,
sc.values) # TODO: add comparisons of frames?
def test_deep_copy(self): # ...or lack thereof
s = upd.Series([1, 2, {'foo': 'bar'}])
sc = s.copy()
sc[2]['foo'] = 'baz'
self.assertEqual(s[2]['foo'], sc[2]['foo'])
class TestSeriesValues(unittest.TestCase):
def test_values(self):
samples = [[1, 2, 3], [4, 5, 6],
list(range(1000)), [1, None, 2, None], []]
for ds in samples:
s = upd.Series(ds)
self.assertEqual(s.values, ds)
if __name__ == '__main__':
unittest.main()
| 29.530973 | 132 | 0.574768 |
c953f88756774d3e9d070501efa3054134aaa4e2 | 6,555 | py | Python | prettyqt/widgets/lineedit.py | phil65/PrettyQt | 26327670c46caa039c9bd15cb17a35ef5ad72e6c | [
"MIT"
] | 7 | 2019-05-01T01:34:36.000Z | 2022-03-08T02:24:14.000Z | prettyqt/widgets/lineedit.py | phil65/PrettyQt | 26327670c46caa039c9bd15cb17a35ef5ad72e6c | [
"MIT"
] | 141 | 2019-04-16T11:22:01.000Z | 2021-04-14T15:12:36.000Z | prettyqt/widgets/lineedit.py | phil65/PrettyQt | 26327670c46caa039c9bd15cb17a35ef5ad72e6c | [
"MIT"
] | 5 | 2019-04-17T11:48:19.000Z | 2021-11-21T10:30:19.000Z | from __future__ import annotations
from typing import Literal
from prettyqt import constants, core, gui, widgets
from prettyqt.qt import QtCore, QtWidgets
from prettyqt.utils import InvalidParamError, bidict
ECHO_MODE = bidict(
normal=QtWidgets.QLineEdit.EchoMode.Normal,
no_echo=QtWidgets.QLineEdit.EchoMode.NoEcho,
password=QtWidgets.QLineEdit.EchoMode.Password,
echo_on_edit=QtWidgets.QLineEdit.EchoMode.PasswordEchoOnEdit,
)
EchoModeStr = Literal["normal", "no_echo", "password", "echo_on_edit"]
ACTION_POSITION = bidict(
leading=QtWidgets.QLineEdit.ActionPosition.LeadingPosition,
trailing=QtWidgets.QLineEdit.ActionPosition.TrailingPosition,
)
ActionPositionStr = Literal["leading", "trailing"]
QtWidgets.QLineEdit.__bases__ = (widgets.Widget,)
class LineEdit(QtWidgets.QLineEdit):
focusLost = core.Signal()
enterPressed = core.Signal()
editComplete = core.Signal(str)
value_changed = core.Signal(str)
def __init__(
self,
default_value: str = "",
read_only: bool = False,
parent: QtWidgets.QWidget | None = None,
):
super().__init__(default_value, parent)
self.textChanged.connect(self._set_validation_color)
self.textChanged.connect(self.value_changed)
self.set_read_only(read_only)
def __repr__(self):
return f"{type(self).__name__}: {self.serialize_fields()}"
def __setstate__(self, state):
super().__setstate__(state)
self.set_text(state["text"])
self.setValidator(state["validator"])
self.setInputMask(state["input_mask"])
self.setMaxLength(state["max_length"])
self.setPlaceholderText(state["placeholder_text"])
self.setReadOnly(state["read_only"])
self.setFrame(state["has_frame"])
self.setClearButtonEnabled(state["clear_button_enabled"])
# self.setAlignment(state["alignment"])
self.set_cursor_move_style(state["cursor_move_style"])
self.set_echo_mode(state["echo_mode"])
self.setCursorPosition(state["cursor_position"])
self.setDragEnabled(state["drag_enabled"])
self.setModified(state["is_modified"])
def __reduce__(self):
return type(self), (), self.__getstate__()
def __add__(self, other: str):
self.append_text(other)
return self
def serialize_fields(self):
return dict(
text=self.text(),
# alignment=self.alignment(),
validator=self.validator(),
max_length=self.maxLength(),
read_only=self.isReadOnly(),
input_mask=self.inputMask(),
has_frame=self.hasFrame(),
placeholder_text=self.placeholderText(),
clear_button_enabled=self.isClearButtonEnabled(),
cursor_move_style=self.get_cursor_move_style(),
echo_mode=self.get_echo_mode(),
cursor_position=self.cursorPosition(),
drag_enabled=self.dragEnabled(),
is_modified=self.isModified(),
)
def focusOutEvent(self, event):
self.focusLost.emit()
return super().focusOutEvent(event)
def keyPressEvent(self, event):
if event.key() in [QtCore.Qt.Key.Key_Enter, QtCore.Qt.Key.Key_Return]:
self.enterPressed.emit()
return super().keyPressEvent(event)
def _on_edit_complete(self):
self.editComplete.emit(self.text())
def font(self) -> gui.Font:
return gui.Font(super().font())
def append_text(self, text: str):
self.set_text(self.text() + text)
def set_text(self, text: str):
self.setText(text)
def set_read_only(self, value: bool = True):
"""Set text to read-only.
Args:
value: True, for read-only, otherwise False
"""
self.setReadOnly(value)
def set_regex_validator(self, regex: str, flags=0) -> gui.RegularExpressionValidator:
validator = gui.RegularExpressionValidator(self)
validator.set_regex(regex, flags)
self.set_validator(validator)
return validator
def set_range(self, lower: int | None, upper: int | None):
val = gui.IntValidator()
val.set_range(lower, upper)
self.set_validator(val)
def set_validator(self, validator: gui.Validator):
self.setValidator(validator)
self._set_validation_color()
def set_input_mask(self, mask: str):
self.setInputMask(mask)
def _set_validation_color(self, state: bool = True):
color = "orange" if not self.is_valid() else None
self.set_background_color(color)
def set_echo_mode(self, mode: EchoModeStr):
"""Set echo mode.
Args:
mode: echo mode to use
Raises:
InvalidParamError: invalid echo mode
"""
if mode not in ECHO_MODE:
raise InvalidParamError(mode, ECHO_MODE)
self.setEchoMode(ECHO_MODE[mode])
def get_echo_mode(self) -> EchoModeStr:
"""Return echo mode.
Returns:
echo mode
"""
return ECHO_MODE.inverse[self.echoMode()]
def set_cursor_move_style(self, style: constants.CursorMoveStyleStr):
"""Set cursor move style.
Args:
style: cursor move style to use
Raises:
InvalidParamError: invalid cursor move style
"""
if style not in constants.CURSOR_MOVE_STYLE:
raise InvalidParamError(style, constants.CURSOR_MOVE_STYLE)
self.setCursorMoveStyle(constants.CURSOR_MOVE_STYLE[style])
def get_cursor_move_style(self) -> constants.CursorMoveStyleStr:
"""Return cursor move style.
Returns:
cursor move style
"""
return constants.CURSOR_MOVE_STYLE.inverse[self.cursorMoveStyle()]
def add_action(
self, action: QtWidgets.QAction, position: ActionPositionStr = "trailing"
):
self.addAction(action, ACTION_POSITION[position])
def set_value(self, value: str):
self.setText(value)
def get_value(self) -> str:
return self.text()
def is_valid(self) -> bool:
return self.hasAcceptableInput()
if __name__ == "__main__":
app = widgets.app()
widget = LineEdit()
action = widgets.Action(text="hallo", icon="mdi.folder")
widget.add_action(action)
widget.setPlaceholderText("test")
widget.setClearButtonEnabled(True)
# widget.set_regex_validator("[0-9]+")
widget.setFont(gui.Font("Consolas"))
widget.show()
app.main_loop()
| 31.066351 | 89 | 0.653547 |
c9544ffadc07ec885bd33e7c84ffb14a0d5a171b | 555 | py | Python | puzzles/easy/puzzle8e.py | mhw32/Code-Boola-Python-Workshop | 08bc551b173ff372a267592f58586adb52c582e3 | [
"MIT"
] | null | null | null | puzzles/easy/puzzle8e.py | mhw32/Code-Boola-Python-Workshop | 08bc551b173ff372a267592f58586adb52c582e3 | [
"MIT"
] | null | null | null | puzzles/easy/puzzle8e.py | mhw32/Code-Boola-Python-Workshop | 08bc551b173ff372a267592f58586adb52c582e3 | [
"MIT"
] | null | null | null | # ------------------------------------
# CODE BOOLA 2015 PYTHON WORKSHOP
# Mike Wu, Jonathan Chang, Kevin Tan
# Puzzle Challenges Number 8
# ------------------------------------
# INSTRUCTIONS:
# Write a function that takes an integer
# as its argument and converts it to a
# string. Return the first character of
# of that string.
# EXAMPLE:
# select(12345) => "1"
# select(519) => "5"
# select(2) => "2"
# HINT:
# Use str() to convert an integer to a string.
# Remember that a string can be indexed
# just like a list!
def select(n):
pass
| 21.346154 | 46 | 0.585586 |
c95465582eabaa7004deb1d71c383aba26908941 | 1,086 | py | Python | nis_visualizeer/ukf-nis-vis.py | vikram216/unscented-kalman-filter | 1619fe365c73f198b39fa1de70fd5e203f8715a0 | [
"MIT"
] | null | null | null | nis_visualizeer/ukf-nis-vis.py | vikram216/unscented-kalman-filter | 1619fe365c73f198b39fa1de70fd5e203f8715a0 | [
"MIT"
] | null | null | null | nis_visualizeer/ukf-nis-vis.py | vikram216/unscented-kalman-filter | 1619fe365c73f198b39fa1de70fd5e203f8715a0 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
"""
A chi square (X2) statistic is used to investigate whether distributions
of categorical variables differ from one another. Here we consider 3 degrees
of freedom for our system. Plotted against 95% line"""
lidar_nis = []
with open('NISvals_laser.txt') as f:
for line in f:
lidar_nis.append(line.strip())
print("Number of LIDAR Measurements :\t", len(lidar_nis))
radar_nis = []
with open('NISvals_radar.txt') as f:
for line in f:
radar_nis.append(line.strip())
print("Number of RADAR Measurements :\t", len(radar_nis))
k = [7.815 for x in range(len(lidar_nis))]
# We skip the first row to cut out the unrealistically high NIS value
# from the first measurement. The Kalman filter has not found its groove yet.
lidar_nis = lidar_nis[1:]
radar_nis = radar_nis[1:]
plt.plot(lidar_nis)
plt.plot(k)
plt.title("LIDAR NIS")
plt.xlabel("Measurement Instance")
plt.ylabel("NIS")
plt.show()
plt.plot(radar_nis)
plt.plot(k)
plt.title("RADAR NIS")
plt.xlabel("Measurement Instance")
plt.ylabel("NIS")
plt.ylim(0, 20)
plt.show()
| 24.681818 | 78 | 0.721915 |
c95546315e55dfb705f35c46c08aaa6f9bae96a5 | 695 | py | Python | benchmark/OfflineRL/offlinerl/config/algo/crr_config.py | ssimonc/NeoRL | 098c58c8e4c3e43e67803f6384619d3bfe7fce5d | [
"Apache-2.0"
] | 50 | 2021-02-07T08:10:28.000Z | 2022-03-25T09:10:26.000Z | benchmark/OfflineRL/offlinerl/config/algo/crr_config.py | ssimonc/NeoRL | 098c58c8e4c3e43e67803f6384619d3bfe7fce5d | [
"Apache-2.0"
] | 7 | 2021-07-29T14:58:31.000Z | 2022-02-01T08:02:54.000Z | benchmark/OfflineRL/offlinerl/config/algo/crr_config.py | ssimonc/NeoRL | 098c58c8e4c3e43e67803f6384619d3bfe7fce5d | [
"Apache-2.0"
] | 4 | 2021-04-01T16:30:15.000Z | 2022-03-31T17:38:05.000Z | import torch
from offlinerl.utils.exp import select_free_cuda
task = "Hopper-v3"
task_data_type = "low"
task_train_num = 99
seed = 42
device = 'cuda'+":"+str(select_free_cuda()) if torch.cuda.is_available() else 'cpu'
obs_shape = None
act_shape = None
max_action = None
hidden_features = 256
hidden_layers = 2
atoms = 21
advantage_mode = 'mean'
weight_mode = 'exp'
advantage_samples = 4
beta = 1.0
gamma = 0.99
batch_size = 1024
steps_per_epoch = 1000
max_epoch = 200
lr = 1e-4
update_frequency = 100
#tune
params_tune = {
"beta" : {"type" : "continuous", "value": [0.0, 10.0]},
}
#tune
grid_tune = {
"advantage_mode" : ['mean', 'max'],
"weight_mode" : ['exp', 'binary'],
}
| 16.547619 | 83 | 0.680576 |
c9555f153510ab57941a2d63dc997b5c2a9d5575 | 8,325 | py | Python | cykel/models/cykel_log_entry.py | mohnbroetchen2/cykel_jenarad | 6ed9fa45d8b98e1021bc41a57e1250ac6f0cfcc4 | [
"MIT"
] | null | null | null | cykel/models/cykel_log_entry.py | mohnbroetchen2/cykel_jenarad | 6ed9fa45d8b98e1021bc41a57e1250ac6f0cfcc4 | [
"MIT"
] | null | null | null | cykel/models/cykel_log_entry.py | mohnbroetchen2/cykel_jenarad | 6ed9fa45d8b98e1021bc41a57e1250ac6f0cfcc4 | [
"MIT"
] | null | null | null | from django.contrib.admin.options import get_content_type_for_model
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.urls import reverse
from django.utils.html import format_html
from django.utils.translation import gettext_lazy as _
# log texts that only contain {object}
LOG_TEXTS_BASIC = {
"cykel.bike.rent.unlock": _("{object} has been unlocked"),
"cykel.bike.rent.longterm": _("{object} has been running for a long time"),
"cykel.bike.forsaken": _("{object} had no rent in some time"),
"cykel.bike.missing_reporting": _("{object} (missing) reported its status again!"),
"cykel.tracker.missing_reporting": _(
"{object} (missing) reported its status again!"
),
"cykel.tracker.missed_checkin": _("{object} missed its periodic checkin"),
}
LOG_TEXTS = {
"cykel.bike.rent.finished.station": _(
"{object} finished rent at Station {station} with rent {rent}"
),
"cykel.bike.rent.finished.freefloat": _(
"{object} finished rent freefloating at {location} with rent {rent}"
),
"cykel.bike.rent.started.station": _(
"{object} began rent at Station {station} with rent {rent}"
),
"cykel.bike.rent.started.freefloat": _(
"{object} began rent freefloating at {location} with rent {rent}"
),
"cykel.bike.tracker.battery.critical": _(
"{object} (on Bike {bike}) had critical battery voltage {voltage} V"
),
"cykel.bike.tracker.battery.warning": _(
"{object} (on Bike {bike}) had low battery voltage {voltage} V"
),
"cykel.tracker.battery.critical": _(
"{object} had critical battery voltage {voltage} V"
),
"cykel.tracker.battery.warning": _("{object} had low battery voltage {voltage} V"),
"cykel.bike.tracker.missed_checkin": _(
"{object} (on Bike {bike}) missed its periodic checkin"
),
}
class CykelLogEntry(models.Model):
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField(db_index=True)
content_object = GenericForeignKey("content_type", "object_id")
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
action_type = models.CharField(max_length=200)
data = models.JSONField(default=dict)
class Meta:
ordering = ("-timestamp",)
verbose_name = "Log Entry"
verbose_name_plural = "Log Entries"
def delete(self, using=None, keep_parents=False):
raise TypeError("Logs cannot be deleted.")
def __str__(self):
return (
f"CykelLogEntry(content_object={self.content_object}, "
+ f"action_type={self.action_type}, timestamp={self.timestamp})"
)
@staticmethod
def create_unless_time(timefilter, **kwargs):
obj = kwargs["content_object"]
action_type = kwargs["action_type"]
if not CykelLogEntry.objects.filter(
content_type=get_content_type_for_model(obj),
object_id=obj.pk,
action_type=action_type,
timestamp__gte=timefilter,
).exists():
CykelLogEntry.objects.create(**kwargs)
def display_object(self):
from bikesharing.models import Bike, LocationTracker, Rent
try:
co = self.content_object
except ObjectDoesNotExist:
return ""
text = None
data = None
if isinstance(co, Bike):
text = _("Bike {ref}")
data = {
"url": reverse(
"admin:%s_%s_change" % (co._meta.app_label, co._meta.model_name),
args=[co.id],
),
"ref": co.bike_number,
}
if isinstance(co, LocationTracker):
text = _("Tracker {ref}")
data = {
"url": reverse(
"admin:%s_%s_change" % (co._meta.app_label, co._meta.model_name),
args=[co.id],
),
"ref": co.device_id,
}
if isinstance(co, Rent):
text = _("Rent {ref}")
data = {
"url": reverse(
"admin:%s_%s_change" % (co._meta.app_label, co._meta.model_name),
args=[co.id],
),
"ref": co.id,
}
if text and data:
data["ref"] = format_html('<a href="{url}">{ref}</a>', **data)
return format_html(text, **data)
elif text:
return text
return ""
def display(self):
from bikesharing.models import Bike, Location, Station
if self.action_type in LOG_TEXTS_BASIC:
return format_html(
LOG_TEXTS_BASIC[self.action_type], object=self.display_object()
)
if self.action_type in LOG_TEXTS:
fmt = LOG_TEXTS[self.action_type]
data = {"object": self.display_object()}
if self.action_type.startswith(
"cykel.bike.tracker.battery."
) or self.action_type.startswith("cykel.tracker.battery."):
voltage = self.data.get("voltage")
if voltage:
data["voltage"] = voltage
else:
data["voltage"] = "[unknown]"
if self.action_type.startswith("cykel.bike.tracker."):
bike_id = self.data.get("bike_id")
if bike_id:
try:
bike = Bike.objects.get(pk=bike_id)
ref = bike.bike_number
except ObjectDoesNotExist:
ref = bike_id
bike_url = reverse("admin:bikesharing_bike_change", args=[bike_id])
data["bike"] = format_html(
'<a href="{url}">{ref}</a>', url=bike_url, ref=ref
)
else:
data["bike"] = "[unknown]"
if self.action_type.startswith("cykel.bike.rent."):
rent_id = self.data.get("rent_id")
if rent_id:
rent_url = reverse("admin:bikesharing_rent_change", args=[rent_id])
data["rent"] = format_html(
'<a href="{url}">{ref}</a>', url=rent_url, ref=rent_id
)
else:
data["rent"] = "[unknown]"
if self.action_type.startswith(
"cykel.bike.rent."
) and self.action_type.endswith(".station"):
station_id = self.data.get("station_id")
if station_id:
try:
station = Station.objects.get(pk=station_id)
ref = station.station_name
except ObjectDoesNotExist:
ref = station_id
station_url = reverse(
"admin:bikesharing_station_change", args=[station_id]
)
data["station"] = format_html(
'<a href="{url}">{ref}</a>', url=station_url, ref=ref
)
else:
data["station"] = "[unknown]"
if self.action_type.startswith(
"cykel.bike.rent."
) and self.action_type.endswith(".freefloat"):
location_id = self.data.get("location_id")
if location_id:
try:
loc = Location.objects.get(pk=location_id)
ref = "{}, {}".format(loc.geo.y, loc.geo.x)
except ObjectDoesNotExist:
ref = location_id
location_url = reverse(
"admin:bikesharing_location_change", args=[location_id]
)
data["location"] = format_html(
'<a href="{url}">{ref}</a>', url=location_url, ref=ref
)
else:
data["location"] = "[unknown]"
return format_html(fmt, **data)
return self.action_type
| 37.669683 | 87 | 0.538498 |
c9565831d1ae75fe2b15d03a39a78761d5e269d5 | 7,991 | py | Python | mlx/od/archive/ssd/test_utils.py | lewfish/mlx | 027decf72bf9d96de3b4de13dcac7b352b07fd63 | [
"Apache-2.0"
] | null | null | null | mlx/od/archive/ssd/test_utils.py | lewfish/mlx | 027decf72bf9d96de3b4de13dcac7b352b07fd63 | [
"Apache-2.0"
] | null | null | null | mlx/od/archive/ssd/test_utils.py | lewfish/mlx | 027decf72bf9d96de3b4de13dcac7b352b07fd63 | [
"Apache-2.0"
] | null | null | null | import unittest
import torch
from torch.nn.functional import binary_cross_entropy as bce, l1_loss
from mlx.od.ssd.utils import (
ObjectDetectionGrid, BoxList, compute_intersection, compute_iou, F1)
class TestIOU(unittest.TestCase):
def test_compute_intersection(self):
a = torch.tensor([[0, 0, 2, 2],
[1, 1, 3, 3],
[2, 2, 4, 4]], dtype=torch.float)
b = torch.tensor([[0, 0, 2, 2],
[1, 1, 3, 3]], dtype=torch.float)
inter = compute_intersection(a, b)
exp_inter = torch.tensor(
[[4, 1],
[1, 4],
[0, 1]], dtype=torch.float)
self.assertTrue(inter.equal(exp_inter))
def test_compute_iou(self):
a = torch.tensor([[0, 0, 2, 2],
[1, 1, 3, 3],
[2, 2, 4, 4]], dtype=torch.float)
b = torch.tensor([[0, 0, 2, 2],
[1, 1, 3, 3]], dtype=torch.float)
inter = compute_iou(a, b)
exp_inter = torch.tensor(
[[1, 1./7],
[1./7, 1],
[0, 1./7]], dtype=torch.float)
self.assertTrue(inter.equal(exp_inter))
class TestBoxList(unittest.TestCase):
def test_score_filter(self):
boxes = torch.tensor([[0, 0, 2, 2],
[1, 1, 3, 3]], dtype=torch.float)
labels = torch.tensor([0, 1])
scores = torch.tensor([0.3, 0.7])
bl = BoxList(boxes, labels, scores)
filt_bl = bl.score_filter(0.5)
exp_bl = BoxList(torch.tensor([[1, 1, 3, 3]], dtype=torch.float),
torch.tensor([1]),
torch.tensor([0.7]))
self.assertTrue(filt_bl.equal(exp_bl))
def test_nms(self):
boxes = torch.tensor([[0, 0, 10, 10],
[1, 1, 11, 11],
[9, 9, 19, 19],
[0, 0, 10, 10],
[20, 20, 21, 21]], dtype=torch.float)
labels = torch.tensor([0, 0, 0, 1, 1])
scores = torch.tensor([0.5, 0.7, 0.5, 0.5, 0.5])
bl = BoxList(boxes, labels, scores)
bl = bl.nms(0.5)
exp_boxes = torch.tensor([[1, 1, 11, 11],
[9, 9, 19, 19],
[0, 0, 10, 10],
[20, 20, 21, 21]], dtype=torch.float)
exp_labels = torch.tensor([0, 0, 1, 1])
exp_scores = torch.tensor([0.7, 0.5, 0.5, 0.5])
exp_bl = BoxList(exp_boxes, exp_labels, exp_scores)
self.assertTrue(bl.equal(exp_bl))
class TestDetectorGrid(unittest.TestCase):
def setUp(self):
grid_sz = 2
anc_sizes = torch.tensor([
[2, 0.5],
[0.5, 2]])
num_classes = 2
self.grid = ObjectDetectionGrid(grid_sz, anc_sizes, num_classes)
def test_decode(self):
batch_sz = 1
out = torch.zeros(self.grid.get_out_shape(batch_sz), dtype=torch.float)
# y_offset, x_offset, y_scale, x_scale, c0, c1
out[0, 1, :, 0, 0] = torch.tensor([0.5, 0, 1, 1, 0.1, 0.7])
exp_boxes = torch.tensor([-0.25, -1.5, 0.25, 0.5])
exp_labels = torch.ones((1, 8), dtype=torch.long)
exp_labels[0, 1] = torch.tensor(1)
exp_scores = torch.zeros((1, 8))
exp_scores[0, 1] = torch.tensor(0.7)
boxes, labels, scores = self.grid.decode(out)
self.assertTrue(boxes[0, 1, :].equal(exp_boxes))
self.assertTrue(labels.equal(exp_labels))
self.assertTrue(scores.equal(exp_scores))
def test_encode(self):
exp_out = torch.zeros(self.grid.get_out_shape(1), dtype=torch.float)
# y_offset, x_offset, y_scale, x_scale, c0, c1
exp_out[0, 1, :, 0, 1] = torch.tensor([0, 0, 1, 0.5, 0, 1])
boxes = torch.tensor([[[-0.75, 0, -0.25, 1]]])
labels = torch.tensor([[1]])
out = self.grid.encode(boxes, labels)
self.assertTrue(out.equal(exp_out))
def test_get_preds(self):
grid_sz = 2
anc_sizes = torch.tensor([
[1., 1],
[2, 2]])
num_classes = 2
grid = ObjectDetectionGrid(grid_sz, anc_sizes, num_classes)
boxes = torch.tensor([[[0, 0, 0.5, 0.5]]])
labels = torch.tensor([[1]])
output = grid.encode(boxes, labels)
b, l, s = grid.get_preds(output)
self.assertTrue(b.equal(boxes))
self.assertTrue(l.equal(labels))
def test_compute_losses(self):
boxes = torch.tensor([[[-0.75, 0, -0.25, 1]]])
labels = torch.tensor([[1]])
gt = self.grid.encode(boxes, labels)
boxes = torch.tensor([[[-1., 0, 0, 1]]])
labels = torch.tensor([[0]])
out = self.grid.encode(boxes, labels)
bl, cl = self.grid.compute_losses(out, gt)
bl, cl = bl.item(), cl.item()
exp_bl = l1_loss(torch.tensor([0, 0, 1, 0.5]),
torch.tensor([0, 0, 2, 0.5])).item()
self.assertEqual(bl, exp_bl)
num_class_els = 16
exp_cl = ((2 * bce(torch.tensor(1.), torch.tensor(0.))).item() /
num_class_els)
self.assertEqual(cl, exp_cl)
class TestF1(unittest.TestCase):
def setUp(self):
grid_sz = 2
anc_sizes = torch.tensor([
[1., 1],
[2, 2]])
num_classes = 3
self.grid = ObjectDetectionGrid(grid_sz, anc_sizes, num_classes)
self.f1 = F1(self.grid, score_thresh=0.3, iou_thresh=0.5)
self.f1.on_epoch_begin()
def test1(self):
# Two images in each batch. Each image has:
# Two boxes, both match.
boxes = torch.tensor([
[[0, 0, 0.5, 0.5], [-1, -1, -0.5, -0.5]],
[[0, 0, 0.5, 0.5], [-1, -1, -0.5, -0.5]]
])
labels = torch.tensor([[1, 1], [1, 1]])
output = self.grid.encode(boxes, labels)
target_boxes = torch.tensor([
[[0, 0, 0.5, 0.5], [-1, -1, -0.5, -0.5]],
[[0, 0, 0.5, 0.5], [-1, -1, -0.5, -0.5]]
])
target_labels = torch.tensor([[1, 1], [1, 1]])
target = (target_boxes, target_labels)
# Simulate two batches
self.f1.on_batch_end(output, target)
self.f1.on_batch_end(output, target)
metrics = self.f1.on_epoch_end({})
exp_f1 = self.f1.compute_f1(8, 0, 0)
self.assertEqual(exp_f1, 1.0)
self.assertEqual(exp_f1, metrics['last_metrics'][0])
def test2(self):
# Two boxes, one matches, the other doesn't overlap enough.
boxes = torch.tensor([
[[0, 0, 0.5, 0.5], [-1, -1, -0.5, -0.5]]
])
labels = torch.tensor([[1, 1]])
output = self.grid.encode(boxes, labels)
target_boxes = torch.tensor([
[[0, 0, 0.1, 0.1], [-1, -1, -0.5, -0.5]],
])
target_labels = torch.tensor([[1, 1]])
target = (target_boxes, target_labels)
self.f1.on_batch_end(output, target)
metrics = self.f1.on_epoch_end({})
exp_f1 = self.f1.compute_f1(1, 1, 1)
self.assertEqual(exp_f1, 0.5)
self.assertEqual(exp_f1, metrics['last_metrics'][0])
def test3(self):
# Three boxes, one matches, one overlaps but has the wrong label,
# and one doesn't match.
boxes = torch.tensor([
[[0, 0, 0.5, 0.5], [-1, -1, -0.5, -0.5], [-0.5, 0, 0, 0.5]]
])
labels = torch.tensor([[1, 2, 1]])
output = self.grid.encode(boxes, labels)
target_boxes = torch.tensor([
[[0, 0, 0.5, 0.5], [-1, -1, -0.5, -0.5], [-0.5, 0, -0.4, 0.1]]
])
target_labels = torch.tensor([[1, 1, 1]])
target = (target_boxes, target_labels)
self.f1.on_batch_end(output, target)
metrics = self.f1.on_epoch_end({})
exp_f1 = self.f1.compute_f1(1, 2, 2)
self.assertEqual(exp_f1, metrics['last_metrics'][0])
if __name__ == '__main__':
unittest.main() | 36.322727 | 79 | 0.511325 |
c956809dc40104300810383514543a84d7e16eb4 | 3,284 | py | Python | src/utilsmodule/main.py | jke94/WilliamHill-WebScraping | d570ff7ba8a5c35d7c852327910d39b715ce5125 | [
"MIT"
] | null | null | null | src/utilsmodule/main.py | jke94/WilliamHill-WebScraping | d570ff7ba8a5c35d7c852327910d39b715ce5125 | [
"MIT"
] | 1 | 2020-10-13T15:44:40.000Z | 2020-10-13T15:44:40.000Z | src/utilsmodule/main.py | jke94/WilliamHill-WebScraping | d570ff7ba8a5c35d7c852327910d39b715ce5125 | [
"MIT"
] | null | null | null | '''
AUTOR: Javier Carracedo
Date: 08/10/2020
Auxiliar class to test methods from WilliamHillURLs.py
'''
import WilliamHillURLs
if __name__ == "__main__":
myVariable = WilliamHillURLs.WilliamHillURLs()
# Print all matches played actually.
for item in myVariable.GetAllMatchsPlayedActually(myVariable.URL_FootballOnDirect):
print(item)
'''
OUTPUT EXAMPLE at 08/10/2020 20:19:29:
Islas Feroe Sub 21 v España Sub 21: 90/1 | 15/2 | 1/40
Dornbirn v St Gallen: 90/1 | 15/2 | 1/40
Corellano v Peña Azagresa: 90/1 | 15/2 | 1/40
Esbjerg v Silkeborg: 90/1 | 15/2 | 1/40
Koge Nord v Ishoj: 90/1 | 15/2 | 1/40
Vasco da Gama Sub 20 v Bangu Sub 20: 90/1 | 15/2 | 1/40
Rangers de Talca v Dep. Valdivia: 90/1 | 15/2 | 1/40
San Marcos v Dep. Santa Cruz: 90/1 | 15/2 | 1/40
Melipilla v Puerto Montt: 90/1 | 15/2 | 1/40
Kray v TuRU Dusseldorf: 90/1 | 15/2 | 1/40
Siegen v Meinerzhagen: 90/1 | 15/2 | 1/40
1. FC M'gladbach v Kleve: 90/1 | 15/2 | 1/40
Waldgirmes v Turkgucu-Friedberg: 90/1 | 15/2 | 1/40
Zamalek v Wadi Degla: 90/1 | 15/2 | 1/40
Elva v Flora B: 90/1 | 15/2 | 1/40
Fujairah FC v Ajman: 90/1 | 15/2 | 1/40
Vanersborg v Ahlafors: 90/1 | 15/2 | 1/40
'''
# Print all URL mathes played actually.
for item in myVariable.GetAllUrlMatches(myVariable.URL_FootballOnDirect):
print(item)
'''OUTPUT EXAMPLE at 08/10/2020 20:19:29:
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18701125/islas-feroe-sub-21-â-españa-sub-21
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18701988/dornbirn-â-st-gallen
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18702077/corellano-â-peña-azagresa
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18694620/esbjerg-â-silkeborg
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18702062/koge-nord-â-ishoj
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18701883/vasco-da-gama-sub-20-â-bangu-sub-20
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18694610/rangers-de-talca-â-dep-valdivia
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18694611/san-marcos-â-dep-santa-cruz
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18694612/melipilla-â-puerto-montt
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18694624/kray-â-turu-dusseldorf
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18694625/siegen-â-meinerzhagen
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18694626/1-fc-mgladbach-â-kleve
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18694627/waldgirmes-â-turkgucu-friedberg
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18694162/zamalek-â-wadi-degla
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18701762/elva-â-flora-b
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18701661/fujairah-fc-â-ajman
https://sports.williamhill.es/betting/es-es/fútbol/OB_EV18701852/vanersborg-â-ahlafors
'''
| 49.014925 | 109 | 0.670524 |
c9570eba69366671540e993ccc63b21a8b23a785 | 3,185 | py | Python | mys/cli/subparsers/install.py | nsauzede/mys | 5f5db80b25e44e3ab9c4b97cb9a0fd6fa3fc0267 | [
"MIT"
] | null | null | null | mys/cli/subparsers/install.py | nsauzede/mys | 5f5db80b25e44e3ab9c4b97cb9a0fd6fa3fc0267 | [
"MIT"
] | null | null | null | mys/cli/subparsers/install.py | nsauzede/mys | 5f5db80b25e44e3ab9c4b97cb9a0fd6fa3fc0267 | [
"MIT"
] | null | null | null | import glob
import os
import shutil
import sys
import tarfile
from tempfile import TemporaryDirectory
from ..utils import ERROR
from ..utils import Spinner
from ..utils import add_jobs_argument
from ..utils import add_no_ccache_argument
from ..utils import add_verbose_argument
from ..utils import box_print
from ..utils import build_app
from ..utils import build_prepare
from ..utils import read_package_configuration
from ..utils import run
def install_clean():
if not os.path.exists('package.toml'):
raise Exception('not a package')
with Spinner(text='Cleaning'):
shutil.rmtree('build', ignore_errors=True)
def install_download(args):
command = [
sys.executable, '-m', 'pip', 'download', f'mys-{args.package}'
]
run(command, 'Downloading package', args.verbose)
def install_extract():
archive = glob.glob('mys-*.tar.gz')[0]
with Spinner(text='Extracting package'):
with tarfile.open(archive) as fin:
fin.extractall()
os.remove(archive)
def install_build(args):
config = read_package_configuration()
is_application = build_prepare(args.verbose, 'speed', args.no_ccache, config)
if not is_application:
box_print(['There is no application to build in this package (src/main.mys ',
'missing).'],
ERROR)
raise Exception()
build_app(args.debug, args.verbose, args.jobs, is_application)
return config
def install_install(root, _args, config):
bin_dir = os.path.join(root, 'bin')
bin_name = config['package']['name']
src_file = 'build/app'
dst_file = os.path.join(bin_dir, bin_name)
with Spinner(text=f"Installing {bin_name} in {bin_dir}"):
os.makedirs(bin_dir, exist_ok=True)
shutil.copyfile(src_file, dst_file)
shutil.copymode(src_file, dst_file)
def install_from_current_dirctory(args, root):
install_clean()
config = install_build(args)
install_install(root, args, config)
def install_from_registry(args, root):
with TemporaryDirectory()as tmp_dir:
os.chdir(tmp_dir)
install_download(args)
install_extract()
os.chdir(glob.glob('mys-*')[0])
config = install_build(args)
install_install(root, args, config)
def do_install(_parser, args, _mys_config):
root = os.path.abspath(os.path.expanduser(args.root))
if args.package is None:
install_from_current_dirctory(args, root)
else:
install_from_registry(args, root)
def add_subparser(subparsers):
subparser = subparsers.add_parser(
'install',
description='Install an application from local package or registry.')
add_verbose_argument(subparser)
add_jobs_argument(subparser)
add_no_ccache_argument(subparser)
subparser.add_argument('--root',
default='~/.local',
help='Root folder to install into (default: %(default)s.')
subparser.add_argument(
'package',
nargs='?',
help=('Package to install application from. Installs current package if '
'not given.'))
subparser.set_defaults(func=do_install)
| 28.4375 | 85 | 0.674725 |
c957b9e1d84b2cf858f2f0ed59b9eda407c2dff9 | 1,011 | py | Python | app/api/v2/models/sale.py | kwanj-k/storemanager-v2 | 89e9573543e32de2e8503dc1440b4ad907bb10b5 | [
"MIT"
] | 1 | 2020-02-29T20:14:32.000Z | 2020-02-29T20:14:32.000Z | app/api/v2/models/sale.py | kwanj-k/storemanager-v2 | 89e9573543e32de2e8503dc1440b4ad907bb10b5 | [
"MIT"
] | 5 | 2018-10-24T17:28:48.000Z | 2019-10-22T11:09:19.000Z | app/api/v2/models/sale.py | kwanj-k/storemanager-v2 | 89e9573543e32de2e8503dc1440b4ad907bb10b5 | [
"MIT"
] | null | null | null | """
A model class for Sale
"""
# local imports
from app.api.common.utils import dt
from app.api.v2.db_config import conn
from app.api.v2.models.cart import Cart
# cursor to perform database operations
cur = conn.cursor()
class Sale(Cart):
"""
Sale object which inherites some of its attributes from cart
"""
def __init__(self, store_id, seller_id, product, number, amount):
super().__init__(
seller_id=seller_id,
product=product,
number=number,
amount=amount)
self.store_id = store_id
self.created_at = dt
def sell(self):
"""
The sell sql query
"""
sale = """INSERT INTO
sales (store_id,seller_id,product, number,amount,created_at)
VALUES
('%s','%s','%s','%s','%s','%s')""" \
% (self.store_id, self.seller_id, self.product, self.number, self.amount, self.created_at)
cur.execute(sale)
conn.commit()
| 25.275 | 106 | 0.578635 |
c9582e0280978de265a7060549f58e588eceb72b | 3,306 | py | Python | src/dembones/collector.py | TransactCharlie/dembones | b5540a89d4c6d535b589a1a2b06697569879bc05 | [
"MIT"
] | null | null | null | src/dembones/collector.py | TransactCharlie/dembones | b5540a89d4c6d535b589a1a2b06697569879bc05 | [
"MIT"
] | null | null | null | src/dembones/collector.py | TransactCharlie/dembones | b5540a89d4c6d535b589a1a2b06697569879bc05 | [
"MIT"
] | null | null | null | import aiohttp
from bs4 import BeautifulSoup
import asyncio
from dembones.webpage import WebPage
import dembones.urltools as ut
import logging
log = logging.getLogger(__name__)
class Collector:
url_hash = {}
def __init__(self, max_concurrent_fetches=3, max_depth=3, fetch_timeout=5,
target_validator=ut.validate_same_domain_up_path):
self.semaphore = asyncio.Semaphore(max_concurrent_fetches)
self.fetch_timeout = fetch_timeout
self.max_depth = max_depth
self.validate_targets = target_validator
async def fetch(self, url, session):
"""Fetch url using session."""
async with session.get(url, timeout=self.fetch_timeout) as r:
r = await r.read()
log.debug(r)
return r
async def recurse_collect(self, url, session, depth):
"""Fetch url and Soup it. Then work out which links we need to recurse."""
# Because we are scheduled at the mercy of the reactor loop. It's possible that
# Some other task is already fetching this page is awaiting the result. Lets check!
if url in self.url_hash:
return
# OK we are the only active task on this reactor. Before we await the page
# let other potential tasks know that we are working on it.
self.url_hash[url] = None
try:
async with self.semaphore:
page = await self.fetch(url, session)
log.info("Collected: Depth {}: Url {}".format(depth, url))
wp = WebPage.from_soup(BeautifulSoup(page, "html.parser"), url)
self.url_hash[url] = wp
# if we haven't hit max_depth yet work out links to recurse over
if depth < self.max_depth:
# Stripped target generator
stripped_targets = (ut.strip_fragment_identifier(t) for t in wp.links)
# Build a set of target urls that obey our restrictions
valid_targets = set([
st for st in stripped_targets
if st not in self.url_hash
and self.validate_targets(url, st)
])
# Generate Async tasks for the next depth level
tasks = [self.recurse_collect(vt, session, depth+1) for vt in valid_targets]
return await asyncio.gather(*tasks)
# There are a myriad of IO based exceptions that can happen - I don't know all of them.
# We want to continue processing other tasks though.
except Exception as e:
log.error(e)
# Upgrade our sentinel entry in the hashmap to at least be the WebPage object
self.url_hash[url] = WebPage()
async def start_recursive_collect(self, url, loop):
"""Start our collection using the event loop (loop)"""
depth = 1
async with aiohttp.ClientSession(loop=loop) as session:
await self.recurse_collect(url, session, depth)
def start_collection(self, url):
loop = asyncio.get_event_loop()
log.debug("Collector Event Loop Start")
loop.run_until_complete(self.start_recursive_collect(url, loop))
log.debug("Collector Event Loop Exit")
return {url: wp.to_dict() for url, wp in self.url_hash.items()}
| 38.894118 | 95 | 0.629462 |
c9599538e684b00c1b9eb75ec04458b635c13ae8 | 501 | py | Python | py_tdlib/constructors/input_inline_query_result_video.py | Mr-TelegramBot/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 24 | 2018-10-05T13:04:30.000Z | 2020-05-12T08:45:34.000Z | py_tdlib/constructors/input_inline_query_result_video.py | MrMahdi313/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 3 | 2019-06-26T07:20:20.000Z | 2021-05-24T13:06:56.000Z | py_tdlib/constructors/input_inline_query_result_video.py | MrMahdi313/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 5 | 2018-10-05T14:29:28.000Z | 2020-08-11T15:04:10.000Z | from ..factory import Type
class inputInlineQueryResultVideo(Type):
id = None # type: "string"
title = None # type: "string"
description = None # type: "string"
thumbnail_url = None # type: "string"
video_url = None # type: "string"
mime_type = None # type: "string"
video_width = None # type: "int32"
video_height = None # type: "int32"
video_duration = None # type: "int32"
reply_markup = None # type: "ReplyMarkup"
input_message_content = None # type: "InputMessageContent"
| 31.3125 | 60 | 0.688623 |
c959a09cafe37155453fcdb077c647271d246317 | 710 | py | Python | translation/eval_args.py | AkshatSh/BinarizedNMT | 7fa15149fdfcad6b1fd0956157c3730f3dcd781f | [
"MIT"
] | 10 | 2019-01-19T08:15:05.000Z | 2021-12-02T08:54:50.000Z | translation/eval_args.py | AkshatSh/BinarizedNMT | 7fa15149fdfcad6b1fd0956157c3730f3dcd781f | [
"MIT"
] | null | null | null | translation/eval_args.py | AkshatSh/BinarizedNMT | 7fa15149fdfcad6b1fd0956157c3730f3dcd781f | [
"MIT"
] | 2 | 2019-01-25T21:19:49.000Z | 2019-03-21T11:38:13.000Z | import argparse
import train_args
def get_arg_parser() -> argparse.ArgumentParser:
'''
A set of parameters for evaluation
'''
parser = train_args.get_arg_parser()
parser.add_argument('--load_path', type=str, help='the path of the model to test')
parser.add_argument('--eval_train', action='store_true', help='eval on the train set')
parser.add_argument('--eval_test', action='store_true', help='eval on the test set')
parser.add_argument('--eval_fast', action='store_true', help='eval quickly if implemented and supported (Greedy)')
parser.add_argument('--output_file', type=str, default=None, help='if specified will store the translations in this file')
return parser | 50.714286 | 126 | 0.723944 |
c959fbbb426057adb9170ca9df4b29dd550126f4 | 43,792 | py | Python | src/fidelity_estimation_pauli_sampling.py | akshayseshadri/minimax-fidelity-estimation | 07ff539dc5ea8280bc4f33444da3d6a90c606833 | [
"MIT"
] | 1 | 2021-12-16T14:23:46.000Z | 2021-12-16T14:23:46.000Z | src/fidelity_estimation_pauli_sampling.py | akshayseshadri/minimax-fidelity-estimation | 07ff539dc5ea8280bc4f33444da3d6a90c606833 | [
"MIT"
] | null | null | null | src/fidelity_estimation_pauli_sampling.py | akshayseshadri/minimax-fidelity-estimation | 07ff539dc5ea8280bc4f33444da3d6a90c606833 | [
"MIT"
] | null | null | null | """
Creates a fidelity estimator for any pure state, using randomized Pauli measurement strategy.
Author: Akshay Seshadri
"""
import warnings
import numpy as np
import scipy as sp
from scipy import optimize
import project_root # noqa
from src.optimization.proximal_gradient import minimize_proximal_gradient_nesterov
from src.utilities.qi_utilities import generate_random_state, generate_special_state, generate_Pauli_operator, generate_POVM, embed_hermitian_matrix_real_vector_space
from src.utilities.noise_process import depolarizing_channel
from src.utilities.quantum_measurements import Measurement_Manager
from src.fidelity_estimation import Fidelity_Estimation_Manager
def project_on_box(v, l, u):
"""
Projects the point v \in R^n on to the box C = {x \in R^n | l <= x <= u}, where the inequality x >= l and x <= u are to be interpreted
componentwise (i.e., x_k >= l_k and x_k <= u_k).
The projection of v on to the box is given as
\Pi(v)_k = l_k if v_k <= l_k
v_k if l_k <= v_k <= u_k
u_k if v_k >= u_k
Note that the above can be expressed in a compact form as \Pi(v)_k = min(max(v_k, l_k), u_k)
Here, l_k and u_k can be -\infty or \infty respectively.
"""
Pi_v = np.minimum(np.maximum(v, l), u)
return Pi_v
class Pauli_Sampler_Fidelity_Estimation_Manager():
"""
Computes the Juditsky & Nemirovski estimator and risk for pure target states when measurements
are performed as per the randomized Pauli measurement strategy described in Box II.1 of PRA submission.
In general, this involves finding a saddle point of the function
\Phi_r(sigma_1, sigma_2; phi, alpha) = Tr(rho sigma_1) - Tr(rho sigma_2)
+ \sum_{i = 1}^N alpha R_i log(\sum_{k = 1}^{N_i} exp(-phi^{i}_k/alpha) (p^{i}_1)_k)
+ \sum_{i = 1}^N alpha R_i log(\sum_{k = 1}^{N_i} exp(phi^{i}_k/alpha) (p^{i}_2)_k)
+ 2 alpha r
where
(p^{i}_1)_k = (Tr(E^(i)_k sigma_1) + \epsilon_o/Nm) / (1 + \epsilon_o) and
(p^{i}_2)_k = (Tr(E^{i}_k sigma_2) + \epsilon_o/Nm) / (1 + \epsilon_o)
are the probability distributions corresponding to the ith POVM {E^{i}_k}_{k = 1}^{N_i} with N_i elements.
R_i > 0 is a parameter that denotes the number of observations of the ith type of measurement (i.e., ith POVM).
There are a total of N POVMs.
X is the set of density matrices, rho is the "target" density matrix. r > 0 is a parameter.
Then, given the saddle point sigma_1*, sigma_2*, phi*, alpha*, we can construct an estimator
\hat{F}(\omega^{1}_1, ..., \omega^{1}_{R_1}, ... \omega^{N}_1, ..., \omega^{N}_{R_N})
= \sum_{i = 1}^N \sum_{l = 1}^{R_i} phi^{i}*(\omega^{i}_l) + c
where the constant 'c' is given by the optimization problem
c = 0.5 \max_{sigma_1} [Tr(rho sigma_1) + \sum_{i = 1}^N alpha R_i log(\sum_{k = 1}^{N_i} exp(-phi^{i}_k/alpha) (p^{i}_1)_k)]
- 0.5 \max_{sigma_2} [-Tr(rho sigma_2) + \sum_{i = 1}^N alpha R_i log(\sum_{k = 1}^{N_i} exp(phi^{i}_k/alpha) (p^{i}_2)_k)]
The saddle point value \Phi*(r) gives an upper bound for the confidence interval within which the error lies.
The above procedure described can be expensive in large dimensions. For the case of randomized Pauli measurement (RPM) strategy,
the algorithms are specialized so that very large dimensions can be handled.
For arbitrary pure target states, the RPM strategy corresponds to randomly sampling Pauli operators according to some predetermined
sampling probability, measuring these Pauli operators, and recording their outcomes (+1 or -1 eigenavalue).
For stabilizer states, this measurement strategy reduces to uniformly randomly sampling from the stabilizer group (all elements
except the identity) and measuring them.
"""
def __init__(self, n, R, NF, epsilon, epsilon_o, tol = 1e-6, random_init = False, print_progress = True):
"""
Assigns values to parameters and defines and initializes functions.
The estimator depends on the dimension of the target state, the number of repetitions of the measurement, a normalization factor,
and the confidence level.
It is independent of the actual target state used for the RPM strategy, except through the normalization factor described below.
The small parameter epsilon_o required to formalize Juditsky & Nemirovski's approach is used only in the optimization for finding alpha.
It is not used in finding the optimal sigma_1 and sigma_2 because those are computed "by hand".
Arguments:
- n : dimension of the system
- R : total number of repetitions used
- NF : the normalization factor, NF = \sum_i |tr(W_i rho)|,
where the sum is over all non-identity Paulis and rho is the target state
- epsilon : 1 - confidence level, should be between 0 and 0.25, end points excluded
- epsilon_o : constant to prevent zero probabilities in Born's rule
- tol : tolerance used by the optimization algorithms
- random_init : if True, a random initial condition is used for the optimization
- print_progress : if True, the progress of optimization is printed
"""
# confidence level
self.epsilon = epsilon
# obtain 'r' from \epsilon
self.r = np.log(2./epsilon)
# constant to keep the probabilities in Born rule positive
self.epsilon_o = epsilon_o
# dimension of the system
self.n = n
# number of repetitions of the (minimax optimal) measurement
self.R = R
# the normalization factor, NF = \sum_i |tr(W_i rho)|; state dependent
self.NF = NF
# quantities defining the POVM
self.omega1 = 0.5 * (n + NF - 1) / NF
self.omega2 = 0.5 * (NF - 1) / NF
# lower bound for the (classical) fidelity, used in the theory for optimization
self.gamma = (epsilon/2)**(2/R)
# minimum number of repetitions required for a risk less than 0.5
self.Ro = np.ceil(np.log(2/epsilon) / np.abs(np.log(np.sqrt(self.omega1 * self.omega2) + np.sqrt(np.abs((1 - self.omega1) * (1 - self.omega2))))))
# if gamma is not large enough, we have a risk of 0.5
if R <= self.Ro:
warnings.warn("The number of repetitions are very low. Consider raising the number of repetitions to at least %d." %self.Ro, MinimaxOptimizationWarning)
# tolerance for all the computations
self.tol = tol
# initialization for maximize_Phi_r_density_matrices_multiple_measurements (to be used specifically for find_alpha_saddle_point_fidelity_estimation)
if not random_init:
# we choose lambda_1 = lambda_2 = 0.9, which corresponds to sigma_1 = 0.9 rho + 0.1 rho_1_perp, sigma_2 = 0.9 rho + 0.1 rho_2_perp
self.mpdm_lambda_ds_o = np.array([0.9, 0.9])
else:
# take lambda_1 and lambda_2 as some random number between 0 and 1
self.mpdm_lambda_ds_o = np.random.random(size = 2)
# determine whether to print progress
self.print_progress = print_progress
# determine whether the optimization achieved the tolerance
self.success = True
###----- Finding x, y maximum and alpha minimum of \Phi_r
def maximize_Phi_r_alpha_density_matrices(self, alpha):
"""
Solves the optimization problem
\max_{sigma_1, sigma_2 \in X} \Phi_r_alpha(sigma_1, sigma_2)
= -\min_{sigma_1, sigma_2 \in X} -\Phi_r_alpha(sigma_1, sigma_2)
for a number alpha > 0.
The objective function is given as
Phi_r_alpha(sigma_1, sigma_2) = Tr(rho sigma_1) - Tr(rho sigma_2)
+ 2 alpha \sum_{i = 1}^N R_i log(\sum_{k = 1}^{N_i} \sqrt{(p^{i}_1)_k (p^{i}_2)_k})
where
(p^{i}_1)_k = (Tr(E^(i)_k sigma_1) + \epsilon_o/Nm) / (1 + \epsilon_o) and
(p^{i}_2)_k = (Tr(E^{i}_k sigma_2) + \epsilon_o/Nm) / (1 + \epsilon_o)
are the probability distributions corresponding to the ith POVM {E^{i}_k}_{k = 1}^{N_i} with N_i elements.
R_i > 0 is a parameter that denotes the number of observations of the ith type of measurement (i.e., ith POVM).
There are a total of N POVMs.
We parametrize the density matrices as the following convex combination
sigma_1 = lambda_1 rho + (1 - lambda_1) rho_1_perp
sigma_2 = lambda_2 rho + (1 - lambda_2) rho_2_perp
where 0 <= lambda_1, lambda_2 <= 1, and rho_1_perp and rho_2_perp are density matrices in the orthogonal complement of the target state rho.
The minimax measurement strategy consists of a single POVM with two elements {Omega, Delta_Omega}. With respect to this POVM, the Born probabilities are
Tr(Omega sigma_1) = omega_1 lambda_1 + omega_2 (1 - lambda_1)
Tr(Delta_Omega sigma_1) = (1 - omega_1) lambda_1 + (1 - omega_2) (1 - lambda_1)
and a similar expression can be written for sigma_2.
We include the parameter epsilon_o in the Born probabilities to avoid zero-division while calculating the derivative of Phi_r.
Using the above, we reduce the optimization to two dimensions, irrespective of the dimension of rho.
The optimization is performed using proximal gradient.
"""
# we work with direct sum lambda_ds = (lambda_1, lambda_2) for use in pre-written algorithms
# the objective function (we work with negative of \Phi_r_alpha so that we can minimize instead of maximize)
def f(lambda_ds):
lambda_1 = lambda_ds[0]
lambda_2 = lambda_ds[1]
# start with the terms that don't depend on POVMs
f_val = -lambda_1 + lambda_2
# number of repetitions of the POVM measurement
R = self.R
# the probability distributions corresponding to the minimax optimal POVM:
# p_1^{i}(k) = (<E^{i}_k, sigma_1> + \epsilon_o/Ni)/(1 + \epsilon_o) and
# p_2^{i}(k) = (<E^{i}_k, sigma_2> + \epsilon_o/Ni)/(1 + \epsilon_o)
p_1 = (np.array([self.omega1 * lambda_1 + self.omega2 * (1 - lambda_1), (1 - self.omega1) * lambda_1 + (1 - self.omega2) * (1 - lambda_1)]) + self.epsilon_o/2) / (1. + self.epsilon_o)
p_2 = (np.array([self.omega1 * lambda_2 + self.omega2 * (1 - lambda_2), (1 - self.omega1) * lambda_2 + (1 - self.omega2) * (1 - lambda_2)]) + self.epsilon_o/2) / (1. + self.epsilon_o)
f_val = f_val - 2*alpha * R * np.log(np.sqrt(p_1).dot(np.sqrt(p_2)))
return f_val
def gradf(lambda_ds):
lambda_1 = lambda_ds[0]
lambda_2 = lambda_ds[1]
# start with the terms that don't depend on POVMs
# gradient with respect to lambda_1
gradf_lambda_1_val = -1
# gradient with respect to lambda_2
gradf_lambda_2_val = 1
# number of repetitions of the POVM measurement
R = self.R
# the probability distributions corresponding to the POVM:
# p_1^{i}(k) = (<E^{i}_k, sigma_1> + \epsilon_o/Nm)/(1 + \epsilon_o) and
# p_2^{i}(k) = (<E^{i}_k, sigma_2> + \epsilon_o/Nm)/(1 + \epsilon_o)
p_1 = (np.array([self.omega1 * lambda_1 + self.omega2 * (1 - lambda_1), (1 - self.omega1) * lambda_1 + (1 - self.omega2) * (1 - lambda_1)]) + self.epsilon_o/2) / (1. + self.epsilon_o)
p_2 = (np.array([self.omega1 * lambda_2 + self.omega2 * (1 - lambda_2), (1 - self.omega1) * lambda_2 + (1 - self.omega2) * (1 - lambda_2)]) + self.epsilon_o/2) / (1. + self.epsilon_o)
# Hellinger affinity between p_1 and p_2
AffH = np.sqrt(p_1).dot(np.sqrt(p_2))
# gradient with respect to lambda_1
gradf_lambda_1_val = gradf_lambda_1_val - alpha * R * (self.omega1 - self.omega2) * np.sqrt(p_2/p_1).dot(np.array([1, -1]))/(AffH * (1. + self.epsilon_o))
# gradient with respect to lambda_2
gradf_lambda_2_val = gradf_lambda_2_val - alpha * R * (self.omega1 - self.omega2) * np.sqrt(p_1/p_2).dot(np.array([1, -1]))/(AffH * (1. + self.epsilon_o))
# gradient with respect to lambda_ds
gradf_val = np.array([gradf_lambda_1_val, gradf_lambda_2_val])
return gradf_val
# the other part of the objective function is an indicator function on X x X, so it is set to zero because all iterates in Nesterov's
# second method are inside the domain
P = lambda lambda_ds: 0.
# proximal operator of an indicator function is a projection
def prox_lP(lambda_ds, l, tol):
# we project each component of lambda_ds into the unit interval [0, 1]
lambda_1_projection = project_on_box(lambda_ds[0], 0, 1)
lambda_2_projection = project_on_box(lambda_ds[1], 0, 1)
lambda_ds_projection = np.array([lambda_1_projection, lambda_2_projection])
return lambda_ds_projection
# perform the minimization using Nesterov's second method (accelerated proximal gradient)
lambda_ds_opt, error = minimize_proximal_gradient_nesterov(f, P, gradf, prox_lP, self.mpdm_lambda_ds_o, tol = self.tol, return_error = True)
# check if tolerance is satisfied
if error > self.tol:
self.success = False
warnings.warn("The tolerance for the optimization was not achieved. The estimates may be unreliable. Consider using a random initial condition by setting random_init = True.", MinimaxOptimizationWarning)
# store the optimal point as initial condition for future use
self.mpdm_lambda_ds_o = lambda_ds_opt
# obtain the density matrices at the optimum
self.lambda_1_opt = lambda_ds_opt[0]
self.lambda_2_opt = lambda_ds_opt[1]
return (self.lambda_1_opt, self.lambda_2_opt, -f(lambda_ds_opt))
def find_density_matrices_alpha_saddle_point(self):
"""
Solves the optimization problem
\min_{alpha > 0} (alpha r + 0.5*inf_phi bar{Phi_r}(phi, alpha))
The function bar{\Phi_r} is given as
bar{\Phi_r}(phi, alpha) = \max_{sigma_1, sigma_2 \in X} \Phi_r(sigma_1, sigma_2; phi, alpha)
for any given vector phi \in R^{N_m} and alpha > 0.
The infinum over phi of bar{\Phi_r} can be solved to obtain
Phi_r_bar_alpha = \inf_phi bar{Phi_r}(phi, alpha)
= \max_{sigma_1, sigma_2 \in X} \inf_phi \Phi_r(sigma_1, sigma_2; phi, alpha)
= \max_{sigma_1, sigma_2 \in X} [Tr(rho sigma_1) - Tr(rho sigma_2)
+ 2 alpha \sum_{i = 1}^N R_i log(\sum_{k = 1}^{N_i} \sqrt{(p^{i}_1)_k (p^{i}_2)_k})]
where
(p^{i}_1)_k = (Tr(E^(i)_k sigma_1) + \epsilon_o/Nm) / (1 + \epsilon_o) and
(p^{i}_2)_k = (Tr(E^{i}_k sigma_2) + \epsilon_o/Nm) / (1 + \epsilon_o)
are the probability distributions corresponding to the ith POVM {E^{i}_k}_{k = 1}^{N_i} with N_i elements.
R_i > 0 is a parameter that denotes the number of observations of the ith type of measurement (i.e., ith POVM).
There are a total of N POVMs.
We define
Phi_r_alpha(sigma_1, sigma_2) = Tr(rho sigma_1) - Tr(rho sigma_2)
+ 2 alpha \sum_{i = 1}^N R_i log(\sum_{k = 1}^{N_i} \sqrt{(p^{i}_1)_k (p^{i}_2)_k})
so that
Phi_r_bar_alpha = \max_{sigma_1, sigma_2 \in X} Phi_r_alpha(sigma_1, sigma_2)
Note that Phi_r_bar_alpha >= 0 since Phi_r_alpha(sigma_1, sigma_1) = 0.
"""
# print progress, if required
if self.print_progress:
print("Beginning optimization".ljust(22), end = "\r", flush = True)
def Phi_r_bar_alpha(alpha):
Phi_r_bar_alpha_val = alpha*self.r + 0.5*self.maximize_Phi_r_alpha_density_matrices(alpha = alpha)[2]
return Phi_r_bar_alpha_val
# perform the minimization
alpha_optimization_result = sp.optimize.minimize_scalar(Phi_r_bar_alpha, bounds = (1e-16, 1e3), method = 'bounded')
# value of alpha at optimum
self.alpha_opt = alpha_optimization_result.x
# value of objective function at optimum: gives the risk
self.Phi_r_bar_alpha_opt = alpha_optimization_result.fun
# print progress, if required
if self.print_progress:
print("Optimization complete".ljust(22))
# check if alpha optimization was successful
if not alpha_optimization_result.success:
self.success = False
warnings.warn("The optimization has not converge properly to the saddle-point. The estimates may be unreliable. Consider using a random initial condition by setting random_init = True.", MinimaxOptimizationWarning)
return (self.lambda_1_opt, self.lambda_2_opt, self.alpha_opt)
###----- Finding x, y maximum and alpha minimum of \Phi_r
###----- Constructing the fidelity estimator
def find_fidelity_estimator(self):
"""
Constructs an estimator for fidelity between a pure state rho and an unknown state sigma.
First, the saddle point sigma_1*, sigma_2*, phi*, alpha* of the function
\Phi_r(sigma_1, sigma_2; phi, alpha) = Tr(rho sigma_1) - Tr(rho sigma_2)
+ \sum_{i = 1}^N alpha R_i log(\sum_{k = 1}^{N_i} exp(-phi^{i}_k/alpha) (p^{i}_1)_k)
+ \sum_{i = 1}^N alpha R_i log(\sum_{k = 1}^{N_i} exp(phi^{i}_k/alpha) (p^{i}_2)_k)
+ 2 alpha r
is found. Here,
(p^{i}_1)_k = (Tr(E^(i)_k sigma_1) + \epsilon_o/Nm) / (1 + \epsilon_o) and
(p^{i}_2)_k = (Tr(E^{i}_k sigma_2) + \epsilon_o/Nm) / (1 + \epsilon_o)
are the probability distributions corresponding to the ith POVM {E^{i}_k}_{k = 1}^{N_i} with N_i elements.
R_i > 0 is a parameter that denotes the number of observations of the ith type of measurement (i.e., ith POVM).
There are a total of N POVMs.
Then, an estimator is constructed as follows.
\hat{F}(\omega^{1}_1, ..., \omega^{1}_{R_1}, ..., \omega^{N}_1, ..., \omega^{N}_{R_N})
= \sum_{i = 1}^N \sum_{l = 1}^{R_i} phi*(\omega^{i}_l) + c
where the constant 'c' is given by the optimization problem
c = 0.5 \max_{sigma_1} [Tr(rho sigma_1) + \sum_{i = 1}^N alpha* R_i log(\sum_{k = 1}^{N_i} exp(-phi*^{i}_k/alpha*) (p^{i}_1)_k)]
- 0.5 \max_{sigma_2} [-Tr(rho sigma_2) + \sum_{i = 1}^N alpha* R_i log(\sum_{k = 1}^{N_i} exp(phi*^{i}_k/alpha*) (p^{i}_2)_k)]
We use the convention that the ith POVM outcomes are labelled as \Omega_i = {0, ..., N_m - 1}, as Python is zero-indexed.
The above is the general procedure to obtain Juditsky & Nemirovski's estimator.
For the special case of randomized Pauli measurement strategy, we simplify the above algorithms
so that we can compure the estimator for very large dimensions.
"""
# find x, y, and alpha components of the saddle point
lambda_1_opt, lambda_2_opt, alpha_opt = self.find_density_matrices_alpha_saddle_point()
# the saddle point value of \Phi_r
Phi_r_opt = self.Phi_r_bar_alpha_opt
# construct (phi/alpha)* at saddle point using lambda_1* and lambda_2*
# the probability distributions corresponding to sigma_1*, sigma_2*:
# p^{i}_1(k) = (<E^{i}_k, sigma_1*> + \epsilon_o/Ni)/(1 + \epsilon_o) and
# p^{i}_2(k) = (<E^{i}_k, sigma_2*> + \epsilon_o/Ni)/(1 + \epsilon_o)
p_1_opt = (np.array([self.omega1 * lambda_1_opt + self.omega2 * (1 - lambda_1_opt), (1 - self.omega1) * lambda_1_opt + (1 - self.omega2) * (1 - lambda_1_opt)]) + self.epsilon_o/2) / (1. + self.epsilon_o)
p_2_opt = (np.array([self.omega1 * lambda_2_opt + self.omega2 * (1 - lambda_2_opt), (1 - self.omega1) * lambda_2_opt + (1 - self.omega2) * (1 - lambda_2_opt)]) + self.epsilon_o/2) / (1. + self.epsilon_o)
# (phi/alpha)* at the saddle point
phi_alpha_opt = 0.5*np.log(p_1_opt/p_2_opt)
# obtain phi* at the saddle point
self.phi_opt = phi_alpha_opt * self.alpha_opt
# find the constant in the estimator
# c = 0.5 (Tr(rho sigma_1*) + Tr(rho sigma_2*)) = 0.5 (lambda_1* + lambda_2*)
self.c = 0.5*(lambda_1_opt + lambda_2_opt)
# build the estimator
def estimator(data):
"""
Given R independent and identically distributed elements from \Omega = {1, 2} (2 possible outcomes) sampled
as per p_{A(sigma)}, gives the estimate for the fidelity F(rho, sigma) = Tr(rho sigma).
\hat{F}(\omega_1, ..., \omega_R)
= \sum_{l = 1}^{R_i} phi^{i}*(\omega^{i}_l) + c
"""
# if a list of list is provided, following convention for Fidelity_Estimation_Manager, we obtain the list of data inside
if type(data[0]) in [list, tuple, np.ndarray]:
data = data[0]
# ensure that only data has only R elements (i.e., R repetitions), because the estimator is built for just that case
if len(data) != self.R:
raise ValueError("The estimator is built to handle only %d outcomes, while %d outcomes have been supplied." %(self.R, len(data)))
# start with the terms that don't depend on the POVMs
estimate = self.c
# build the estimate using the phi* component at the saddle point, accounting for data from the POVM
estimate = estimate + np.sum([self.phi_opt[l] for l in data])
return estimate
self.estimator = estimator
return (estimator, Phi_r_opt)
###----- Constructing the fidelity estimator
def generate_sampled_pauli_measurement_outcomes(rho, sigma, R, num_povm_list, epsilon_o, flip_outcomes = False):
"""
Generates the outcomes (index pointing to appropriate POVM element) for a Pauli sampling measurement strategy.
The strategy involves sampling the non-identity Pauli group elements, measuring them, and only using the
eigenvalue (either +1 or -1) of the measured outcome.
The sampling is done as per the probability distribution p_i = |tr(W_i rho)| / \sum_i |tr(W_i rho)|.
We represent this procedure by an effective POVM containing two elements.
If outcome eigenvalue is +1, that corresponds to index 0 of the effective POVM, while eigenvalue -1 corresponds to index 1 of the effective POVM.
If flip_outcomes is True, we measure the measure Paulis, and later flip the measurement outcomes (+1 <-> -1) as necessary. If not, we directly
measure negative of the Pauli operator.
The function requires the target state (rho) and the actual state "prepared in the lab" (sigma) as inputs.
The states (density matrices) are expected to be flattened in row-major style.
"""
# dimension of the system; rho is expected to be flattened, but this expression is agnostic to that
n = int(np.sqrt(rho.size))
# number of qubits
nq = int(np.log2(n))
if 2**nq != n:
raise ValueError("Pauli measurements possible only in systems of qubits, i.e., the dimension should be a power of 2")
# ensure that the states are flattened
rho = rho.ravel()
sigma = sigma.ravel()
# index of each Pauli of which weights need to be computed
pauli_index_list = range(1, 4**nq)
# find Tr(rho W) for each Pauli operator W (identity excluded); this is only a heuristic weight if rho is not pure
# these are not the same as Flammia & Liu weights
# computing each Pauli operator individulally (as opposed to computing a list of all Pauli operators at once) is a little slower, but can handle more number of qubits
pauli_weight_list = [np.real(np.conj(rho).dot(generate_Pauli_operator(nq = nq, index_list = pauli_index, flatten = True)[0])) for pauli_index in pauli_index_list]
# phase of each pauli operator (either +1 or -1)
pauli_phase_list = [np.sign(pauli_weight) for pauli_weight in pauli_weight_list]
# set of pauli operators along with their phases from which we will sample
pauli_measurements = list(zip(pauli_index_list, pauli_phase_list))
# probability distribution for with which the Paulis should be sampled
pauli_sample_prob = np.abs(pauli_weight_list)
# normalization factor for pauli probability
NF = np.sum(pauli_sample_prob)
# normalize the sampling probability
pauli_sample_prob = pauli_sample_prob / NF
# the effective POVM for minimax optimal strategy consists of just two POVM elements
# however, the actual measurements performed are 'R' Pauli measurements which are uniformly sampled from the pauli operators
# np.random.choice doesn't allow list of tuples directly, so indices are sampled instead
# see https://stackoverflow.com/questions/30821071/how-to-use-numpy-random-choice-in-a-list-of-tuples/55517163
uniformly_sampled_indices = np.random.choice(len(pauli_measurements), size = int(R), p = pauli_sample_prob)
pauli_to_measure_with_repetitions = [pauli_measurements[index] for index in uniformly_sampled_indices]
# unique Pauli measurements to be performed, with phase
pauli_to_measure = sorted(list(set(pauli_to_measure_with_repetitions)), key = lambda x: x[0])
# get the number of repetitions to be performed for each unique Pauli measurement (i.e., number of duplicates)
R_list, _ = np.histogram([pauli_index for (pauli_index, _) in pauli_to_measure_with_repetitions], bins = [pauli_index for (pauli_index, _) in pauli_to_measure] + [pauli_to_measure[-1][0] + 1], density = False)
# list of number of POVM elements for each (type of) measurement
# if a number is provided, a list (of integers) is created from it
if type(num_povm_list) not in [list, tuple, np.ndarray]:
num_povm_list = [int(num_povm_list)] * len(R_list)
else:
num_povm_list = [int(num_povm) for num_povm in num_povm_list]
# generate POVMs for measurement
POVM_list = [None] * len(R_list)
for (count, num_povm) in enumerate(num_povm_list):
# index of pauli opetator to measure, along with the phase
pauli, phase = pauli_to_measure[count]
if flip_outcomes:
# don't include the phase while measuring
# the phase is incorporated after the measurement outcomes are obtained
phase = 1
# generate POVM depending on whether projectors on subpace or projectors on each eigenvector is required
# note that when n = 2, subspace and eigenbasis projectors match, in which case we give precedence to eigenbasis projection
# this is because in the next block after measurements are generated, we check if num_povm is n and if that's true include phase
# but if subspace was used first, then phase would already be included and this would be the same operation twice
# so we use check for eigenbasis projection first
if num_povm == n:
# ensure that the supplied Pauli operator is a string composed of 0, 1, 2, 3
if type(pauli) in [int, np.int64]:
if pauli > 4**nq - 1:
raise ValueError("Each pauli must be a number between 0 and 4^{nq} - 1")
# make sure pauli is a string
pauli = np.base_repr(pauli, base = 4)
# pad pauli with 0s on the left so that the total string is of size nq (as we need a Pauli operator acting on nq qubits)
pauli = pauli.rjust(nq, '0')
elif type(pauli) == str:
# get the corresponding integer
pauli_num = np.array(list(pauli), dtype = 'int')
pauli_num = pauli_num.dot(4**np.arange(len(pauli) - 1, -1, -1))
if pauli_num > 4**nq - 1:
raise ValueError("Each pauli must be a number between 0 and 4^{nq} - 1")
# pad pauli with 0s on the left so that the total string is of size nq (as we need a Pauli operator acting on nq qubits)
pauli = pauli.rjust(nq, '0')
# we take POVM elements as rank 1 projectors on to the (orthonormal) eigenbasis of the Pauli operator specified by 'pauli' string
# - first create the computation basis POVM and then use the Pauli operator strings to get the POVM in the respective Pauli basis
computational_basis_POVM = generate_POVM(n = n, num_povm = n, projective = True, pauli = None, flatten = False, isComplex = True, verify = False)
# - to get Pauli X basis, we can rotate the computational basis using Hadamard
# - to get Pauli Y basis, we can rotate the computational basis using a matrix similar to Hadamard
# use a dictionary to make these mappings
comp_basis_transform_dict = {'0': np.eye(2, dtype = 'complex128'), '1': np.array([[1., 1.], [1., -1.]], dtype = 'complex128')/np.sqrt(2),\
'2': np.array([[1., 1.], [1.j, -1.j]], dtype = 'complex128')/np.sqrt(2), '3': np.eye(2, dtype = 'complex128')}
transform_matrix = np.eye(1)
# pauli contains tensor product of nq 1-qubit Pauli operators, so parse through them to get a unitary mapping computational basis to Pauli eigenbasis
for ithpauli in pauli:
transform_matrix = np.kron(transform_matrix, comp_basis_transform_dict[ithpauli])
# create the POVM by transforming the computational basis to given Pauli basis
# the phase doesn't matter when projecting on to the eigenbasis; the eigenvalues are +1, -1 or +i, -i, depending on the phase but we can infer that upon measurement
POVM = [transform_matrix.dot(Ei).dot(np.conj(transform_matrix.T)).ravel() for Ei in computational_basis_POVM]
elif num_povm == 2:
# the Pauli operator that needs to be measured
Pauli_operator = phase * generate_Pauli_operator(nq, pauli)[0]
# if W is the Pauli operator and P_+ and P_- are projectors on to the eigenspaces corresponding to +1 (+i) & -1 (-i) eigenvalues, then
# l P_+ - l P_- = W, and P_+ + P_- = \id. We can solve for P_+ and P_- from this. l \in {1, i}, depending on the pase.
# l = 1 or i can be obtained from the phase as sgn(phase) * phase, noting that phase is one of +1, -1, +i or -i
P_plus = 0.5*(np.eye(n, dtype = 'complex128') + Pauli_operator / (phase * np.sign(phase)))
P_minus = 0.5*(np.eye(n, dtype = 'complex128') - Pauli_operator / (phase * np.sign(phase)))
POVM = [P_plus.ravel(), P_minus.ravel()]
else:
raise ValueError("Pauli measurements with only 2 or 'n' POVM elements are supported")
# store the POVM for measurement
POVM_list[count] = POVM
# initiate the measurements
measurement_manager = Measurement_Manager(random_seed = None)
measurement_manager.n = n
measurement_manager.N = len(POVM_list)
measurement_manager.POVM_mat_list = [np.vstack(POVM) for POVM in POVM_list]
measurement_manager.N_list = [len(POVM) for POVM in POVM_list]
# perform the measurements
data_list = measurement_manager.perform_measurements(sigma, R_list, epsilon_o, num_sets_outcomes = 1, return_outcomes = True)[0]
# convert the outcomes of the Pauli measurements to those of the effective POVM
effective_outcomes = list()
for (count, data) in enumerate(data_list):
num_povm = num_povm_list[count]
pauli_index, phase = pauli_to_measure[count]
if flip_outcomes:
# store the actual phase for later use
actual_phase = int(phase)
# Pauli were measured without the phase, so do the conversion of outcomes to those of effective POVM with that in mind
phase = 1
# for num_povm = 2, there is nothing to do because outcome '0' corresponds to +1 eigenvalue and outcome 1 corresponds to -1 eigenvalue
# if flip_outcomes is False, then these are also the outcomes for the effective POVM because phase was already accounted for during measurement
# if flip_outcomes is True, then we will later flip the outcome index (0 <-> 1) to account for the phase
# for num_povm = n, we need to figure out the eigenvalue corresponding to outcome (an index from 0 to n - 1, pointing to the basis element)
# we map +1 value to 0 and -1 eigenvalue to 1, which corresponds to the respective indices of elements in the effective POVM
if num_povm == n:
# all Paulis have eigenvalues 1, -1, but we are doing projective measurements onto the eigenbasis of Pauli operators
# so, half of them will have +1 eigenvalue, the other half will have -1 eigenvalue
# we are mapping the computational basis to the eigenbasis of the Pauli operator to perform the measurement
# 0 for the ith qubit goes to the +1 eigenvalue eigenstate of the ith Pauli, and
# 1 for the ith qubit goes to the -1 eigenvalue eigenstate of the ith Pauli
# the exception is when the ith Pauli is identity, where the eigenstate is as described above but eigenvalue is always +1
# therefore, we assign an "eigenvalue weight" of 1 to non-identity 1-qubit Paulis (X, Y, Z) and an "eigenvalue weight" of 0 to the 1-qubit identity
# we then write the nq-qubit Pauli string W as an array of above weights w_1w_2...w_nq, where w_i is the "eigenvalue weight" of the ith Pauli in W
# then the computational basis state |i_1i_2...i_nq> has the eigenvalue (-1)^(i_1*w_1 + ... + i_nq*w_nq) when it has been transformed to an
# however, if the Pauli operator has a non-identity phase, the +1 and -1 eigenvalue are appropriately changed
# the general expression for eigenvalue takes the form phase * (-1)^(i_1*w_1 + ... + i_nq*w_nq)
# eigenstate of the Pauli operator W (using the transform_matrix defined in qi_utilities.generate_POVM)
# so given a pauli index (a number from 0 to 4^nq - 1), obtain the array of "eigenvalue weight" representing the Pauli operator as described above
# for this, convert the pauli index to an array of 0, 1, 2, 3 representing the Pauli operator (using np.base_repr, np.array), then set non-zero elements to 1 (using np.where)
pauli_eigval_weight = lambda pauli_index: np.where(np.array(list(np.base_repr(pauli_index, base = 4).rjust(nq, '0')), dtype = 'int8') == 0, 0, 1)
# get array of 0, 1 representing the computational basis element from the index (a number from 0 to 2^nq - 1) of the computational basis
computational_basis_array = lambda computational_basis_index: np.array(list(np.base_repr(computational_basis_index, base = 2).rjust(nq, '0')), dtype = 'int8')
# for the eigenvalues from the (computational basis) index of the outcome for each pauli measurement performed
# to convert the eigenvalue (+1 or -1) to index (0 or 1, respectively), we do the operation (1 - e) / 2, where e is the eigenvalue
# type-casted to integers because an index is expected as for each outcome
data = [int(np.real( (1 - phase*(-1)**(computational_basis_array(outcome_index).dot(pauli_eigval_weight(pauli_index)))) / 2 )) for outcome_index in data]
if flip_outcomes and actual_phase == -1:
# now that we have the data for the effective POVM (without considering the phase), we can flip the outcomes as necessary
data = [1 - outcome_index for outcome_index in data]
# include this in the list of outcomes for the effective measurement
effective_outcomes.extend(data)
return effective_outcomes
def fidelity_estimation_pauli_random_sampling(target_state = 'random', nq = 2, num_povm_list = 2, R = 100, epsilon = 0.05, risk = None, epsilon_o = 1e-5, noise = True,\
noise_type = 'depolarizing', state_args = None, flip_outcomes = False, tol = 1e-6, random_seed = 1, verify_estimator = False,\
print_result = True, write_to_file = False, dirpath = './Data/Computational/', filename = 'temp'):
"""
Generates the target_state defined by 'target_state' and state_args, and finds an estimator for fidelity using Juditsky & Nemirovski's approach for a specific measurement scheme
involving random sampling of Pauli operators.
The specialized approach allows for computation of the estimator for very large dimensions.
The random sampling is done as per the probability distribution p_i = |tr(W_i rho)| / \sum_i |tr(W_i rho)|, where W_i is the ith Pauli operator and rho is the target state.
This random sampling is accounted for by a single POVM, so number of types of measurement (N) is just one.
The estimator and the risk only depend on the dimension, the number of repetitions, the confidence level, and the normalization factor NF = \sum_i |tr(W_i rho)|.
If risk is a number less than 0.5, the number of repetitions of the minimax optimal measurement is chosen so that the risk of the estimator is less than or equal to the given risk.
The argument R is ignored in this case.
Checks are not performed to ensure that the given set of generators indeed form generators.
If verify_estimator is true, the estimator constructed for the special case of randomized Pauli measurement strategy is checked with the general construction
for Juditsky & Nemirovski's estimator.
"""
# set the random seed once here and nowhere else
if random_seed:
np.random.seed(int(random_seed))
# number of qubits
nq = int(nq)
# dimension of the system
n = int(2**nq)
### create the states
# create the target state from the specified generators
target_state = str(target_state).lower()
if target_state in ['ghz', 'w', 'cluster']:
state_args_dict = {'ghz': {'d': 2, 'M': nq}, 'w': {'nq': nq}, 'cluster': {'nq': nq}}
rho = generate_special_state(state = target_state, state_args = state_args_dict[target_state], density_matrix = True,\
flatten = True, isComplex = True)
elif target_state == 'stabilizer':
generators = state_args['generators']
# if generators are specified using I, X, Y, Z, convert them to 0, 1, 2, 3
generators = [g.lower().translate(str.maketrans('ixyz', '0123')) for g in generators]
rho = generate_special_state(state = 'stabilizer', state_args = {'nq': nq, 'generators': generators}, density_matrix = True, flatten = True, isComplex = True)
elif target_state == 'random':
rho = generate_random_state(n = n, pure = True, density_matrix = True, flatten = True, isComplex = True, verify = False, random_seed = None)
else:
raise ValueError("Please specify a valid target state. Currently supported arguments are GHZ, W, Cluster, stabilizer and random.")
# apply noise to the target state to create the actual state ("prepared in the lab")
if not ((noise is None) or (noise is False)):
# the target state decoheres due to noise
if type(noise) in [int, float]:
if not (noise >= 0 and noise <= 1):
raise ValueError("noise level must be between 0 and 1")
sigma = depolarizing_channel(rho, p = noise)
else:
sigma = depolarizing_channel(rho, p = 0.1)
else:
sigma = generate_random_state(n, pure = False, density_matrix = True, flatten = True, isComplex = True, verify = False,\
random_seed = None)
### generate the measurement outcomes for the effective (minimax optimal) POVM
# calculate the normalization factor
# computing each Pauli operator individulally (as opposed to computing a list of all Pauli operators at once) is a little slower, but can handle more number of qubits
NF = np.sum([np.abs(np.conj(rho).dot(generate_Pauli_operator(nq = nq, index_list = pauli_index, flatten = True)[0])) for pauli_index in range(1, 4**nq)])
# if risk is given, then choose the number of repetitions to achieve that risk (or a slightly lower risk)
if risk is not None:
if risk < 0.5:
R = int(np.ceil(2*np.log(2/epsilon) / np.abs(np.log(1 - (n/NF)**2 * risk**2))))
else:
raise ValueError("Only risk < 0.5 can be achieved by choosing appropriate number of repetitions of the minimax optimal measurement.")
effective_outcomes = generate_sampled_pauli_measurement_outcomes(rho, sigma, R, num_povm_list, epsilon_o, flip_outcomes)
### obtain the fidelity estimator
PSFEM = Pauli_Sampler_Fidelity_Estimation_Manager(n, R, NF, epsilon, epsilon_o, tol)
fidelity_estimator, risk = PSFEM.find_fidelity_estimator()
# obtain the estimate
estimate = fidelity_estimator(effective_outcomes)
# verify the estimator created for the specialized case using the general approach
if verify_estimator:
# the effective POVM for the optimal measurement strategy is simply {omega_1 rho + omega_2 Delta_rho, (1 - omega_1) rho + (1 - omega_2) Delta_rho},
# where omega_1 = (n + NF - 1)/2NF, omega_2 = (NF - 1)/2NF, and Delta_rho = I - rho
omega1 = 0.5 * (n + NF - 1) / NF
omega2 = 0.5 * (1 - 1/NF)
Delta_rho = np.eye(2**nq).ravel() - rho
POVM_list = [[omega1 * rho + omega2 * Delta_rho, (1 - omega1) * rho + (1 - omega2) * Delta_rho]]
# Juditsky & Nemirovski estimator
FEMC = Fidelity_Estimation_Manager_Corrected(R, epsilon, rho, POVM_list, epsilon_o, tol)
fidelity_estimator_general, risk_general = FEMC.find_fidelity_estimator()
# matrices at optimum
sigma_1_opt, sigma_2_opt = embed_hermitian_matrix_real_vector_space(FEMC.sigma_1_opt, reverse = True, flatten = True), embed_hermitian_matrix_real_vector_space(FEMC.sigma_2_opt, reverse = True, flatten = True)
# constraint at optimum
constraint_general = np.real(np.sum([np.sqrt((np.conj(Ei).dot(sigma_1_opt) + epsilon_o/2)*(np.conj(Ei).dot(sigma_2_opt) + epsilon_o/2)) / (1 + epsilon_o) for Ei in POVM_list[0]]))
if print_result:
print("True fidelity", np.real(np.conj(rho).dot(sigma)))
print("Estimate", estimate)
print("Risk", risk)
print("Repetitions", R)
# print results from the general approach
if verify_estimator:
print("Risk (general)", risk_general)
print("Constraint (general)", constraint_general, "Lower constraint bound", (epsilon / 2)**(1/R))
if not verify_estimator:
return PSFEM
else:
return (PSFEM, FEMC)
| 59.258457 | 226 | 0.643999 |
c95c3a9b1e12620c6fdf7ce0fba7e46782237c62 | 2,054 | py | Python | until.py | zlinao/COMP5212-project1 | fa6cb10d238de187fbb891499916c6b44a0cd7b7 | [
"Apache-2.0"
] | 3 | 2018-09-19T11:46:53.000Z | 2018-10-09T04:48:28.000Z | until.py | zlinao/COMP5212-project1 | fa6cb10d238de187fbb891499916c6b44a0cd7b7 | [
"Apache-2.0"
] | null | null | null | until.py | zlinao/COMP5212-project1 | fa6cb10d238de187fbb891499916c6b44a0cd7b7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 28 10:29:52 2018
@author: lin
"""
import numpy as np
import matplotlib.pyplot as plt
def accuracy(x,y,model):
a = model.predict(x,y)
a[a>=0.5]=1
a[a<0.5]=0
return np.sum(a==y)/len(a)
data1 = np.load("datasets/breast-cancer.npz")
data2 = np.load("datasets/diabetes.npz")
data3 = np.load("datasets/digit.npz")
data4 = np.load("datasets/iris.npz")
data5 = np.load("datasets/wine.npz")
def run_epoch(data, model, batch_size,lr):
epoch_size = (len(data["train_X"])//batch_size)+1
loss_total=0
for step in range(epoch_size):
if step == epoch_size-1:
input_data = data["train_X"][step*batch_size:,:]
labels = data["train_Y"][step*batch_size:]
else:
input_data = data["train_X"][step*batch_size:(step+1)*batch_size,:]
labels = data["train_Y"][step*batch_size:(step+1)*batch_size]
a = model.train(input_data,labels,lr)
loss = -np.sum(labels*np.log(a)+(1-labels)*np.log(1-a))
loss_total += loss
loss_avg = loss_total/len(data["train_X"])
acc = accuracy(data["train_X"],data["train_Y"],model)
#print("accuracy:",acc)
return loss_avg ,acc
def plot_loss_acc(loss,acc,i):
plt.figure(1+2*i)
plt.plot(loss,label='loss per epoch')
plt.title("dataset"+str(i+1)+" training loss")
plt.legend()
plt.xlabel('epoch_num')
plt.figure(2+2*i)
plt.plot(acc,color='orange',label='accuray per epoch')
plt.title("dataset"+str(i+1)+" training accuracy")
plt.legend()
plt.xlabel('epoch_num')
def sigmoid(x):
return 1/(1+np.exp(-x))
def choose_dataset(choice, config1):
if choice ==1:
config1.cancer()
elif choice ==2:
config1.diabetes()
elif choice ==3:
config1.digit()
elif choice ==4:
config1.iris()
elif choice ==5:
config1.wine()
else:
print("please choose the dataset number : 1-5")
return config1
| 25.358025 | 79 | 0.601266 |
c960f97df84624c96f4c85fc91f46edd0a467d9e | 11,996 | py | Python | dumpfreeze/main.py | rkcf/dumpfreeze | e9b18e4bc4574ff3b647a075cecd72977dc8f59a | [
"MIT"
] | 1 | 2020-01-30T17:59:50.000Z | 2020-01-30T17:59:50.000Z | dumpfreeze/main.py | rkcf/dumpfreeze | e9b18e4bc4574ff3b647a075cecd72977dc8f59a | [
"MIT"
] | null | null | null | dumpfreeze/main.py | rkcf/dumpfreeze | e9b18e4bc4574ff3b647a075cecd72977dc8f59a | [
"MIT"
] | null | null | null | # dumpfreeze
# Create MySQL dumps and backup to Amazon Glacier
import os
import logging
import datetime
import click
import uuid
import sqlalchemy as sa
from dumpfreeze import backup as bak
from dumpfreeze import aws
from dumpfreeze import inventorydb
from dumpfreeze import __version__
logger = logging.getLogger(__name__)
def abort_if_false(ctx, param, value):
if not value:
ctx.abort()
@click.group()
@click.option('-v', '--verbose', count=True)
@click.option('--local-db', default='~/.dumpfreeze/inventory.db')
@click.version_option(__version__, prog_name='dumpfreeze')
@click.pass_context
def main(ctx, verbose, local_db):
""" Create and manage MySQL dumps locally and on AWS Glacier """
# Set logger verbosity
if verbose == 1:
logging.basicConfig(level=logging.ERROR)
elif verbose == 2:
logging.basicConfig(level=logging.INFO)
elif verbose == 3:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.CRITICAL)
# Check if db exists, if not create it
expanded_db_path = os.path.expanduser(local_db)
if not os.path.isfile(expanded_db_path):
inventorydb.setup_db(expanded_db_path)
# Create db session
db_engine = sa.create_engine('sqlite:///' + expanded_db_path)
Session = sa.orm.sessionmaker(bind=db_engine)
ctx.obj['session_maker'] = Session
return
# Backup operations
@click.group()
@click.pass_context
def backup(ctx):
""" Operations on local backups """
pass
@backup.command('create')
@click.option('--user', default='root', help='Database user')
@click.option('--backup-dir',
default=os.getcwd(),
help='Backup storage directory')
@click.argument('database')
@click.pass_context
def create_backup(ctx, database, user, backup_dir):
""" Create a mysqldump backup"""
backup_uuid = uuid.uuid4().hex
try:
bak.create_dump(database, user, backup_dir, backup_uuid)
except Exception as e:
logger.critical(e)
raise SystemExit(1)
today = datetime.date.isoformat(datetime.datetime.today())
# Insert backup info into backup inventory db
backup_info = inventorydb.Backup(id=backup_uuid,
database_name=database,
backup_dir=backup_dir,
date=today)
local_db = ctx.obj['session_maker']()
backup_info.store(local_db)
click.echo(backup_uuid)
@backup.command('upload')
@click.option('--vault', required=True, help='Vault to upload to')
@click.argument('backup_uuid', metavar='UUID')
@click.pass_context
def upload_backup(ctx, vault, backup_uuid):
""" Upload a local backup dump to AWS Glacier """
# Get backup info
local_db = ctx.obj['session_maker']()
try:
query = local_db.query(inventorydb.Backup)
backup_info = query.filter_by(id=backup_uuid).one()
except Exception as e:
logger.critical(e)
local_db.rollback()
raise SystemExit(1)
finally:
local_db.close()
# Construct backup path
backup_file = backup_info.id + '.sql'
backup_path = os.path.join(backup_info.backup_dir, backup_file)
# Upload backup_file to Glacier
try:
upload_response = aws.glacier_upload(backup_path, vault)
except Exception as e:
logger.critical(e)
raise SystemExit(1)
archive_uuid = uuid.uuid4().hex
# Insert archive info into archive inventory db
archive_info = inventorydb.Archive(id=archive_uuid,
aws_id=upload_response['archiveId'],
location=upload_response['location'],
vault_name=vault,
database_name=backup_info.database_name,
date=backup_info.date)
local_db = ctx.obj['session_maker']()
archive_info.store(local_db)
click.echo(archive_uuid)
@backup.command('restore')
@click.option('--user', default='root', help='Database user')
@click.argument('backup_uuid', metavar='UUID')
@click.pass_context
def restore_backup(ctx, user, backup_uuid):
""" Restore a backup to the database """
# Get backup info
local_db = ctx.obj['session_maker']()
try:
query = local_db.query(inventorydb.Backup)
backup_info = query.filter_by(id=backup_uuid).one()
except Exception as e:
logger.critical(e)
local_db.rollback()
raise SystemExit(1)
finally:
local_db.close()
# Restore backup to database
bak.restore_dump(backup_info.database_name,
user,
backup_info.backup_dir,
backup_info.id)
@backup.command('delete')
@click.argument('backup_uuid', metavar='UUID')
@click.option('--yes',
'-y',
is_flag=True,
callback=abort_if_false,
expose_value=False,
prompt='Delete backup?')
@click.pass_context
def delete_backup(ctx, backup_uuid):
""" Delete a local dump backup """
# Get backup info
local_db = ctx.obj['session_maker']()
try:
query = local_db.query(inventorydb.Backup)
backup_info = query.filter_by(id=backup_uuid).one()
except Exception as e:
logger.critical(e)
local_db.rollback()
raise SystemExit(1)
finally:
local_db.close()
# Construct backup path
backup_file = backup_info.id + '.sql'
backup_path = os.path.join(backup_info.backup_dir, backup_file)
# Delete file
os.remove(backup_path)
# Remove from db
local_db = ctx.obj['session_maker']()
backup_info.delete(local_db)
click.echo(backup_info.id)
@backup.command('list')
@click.pass_context
def list_backup(ctx):
""" Return a list of all local backups """
# Get Inventory
local_db = ctx.obj['session_maker']()
try:
backups = local_db.query(inventorydb.Backup).all()
except Exception as e:
logger.critical(e)
local_db.rollback()
raise SystemExit(1)
finally:
local_db.close()
# do some formatting for printing
formatted = []
for backup in backups:
formatted.append([backup.id,
backup.database_name,
backup.backup_dir,
backup.date])
# Add header
formatted.insert(0, ['UUID', 'DATABASE', 'LOCATION', 'DATE'])
# Calculate widths
widths = [max(map(len, column)) for column in zip(*formatted)]
# Print inventory
for row in formatted:
print(" ".join((val.ljust(width)
for val, width in zip(row, widths))))
# Archive operations
@click.group()
@click.pass_context
def archive(ctx):
""" Operations on AWS Glacier Archives """
pass
@archive.command('delete')
@click.argument('archive_uuid', metavar='UUID')
@click.option('--yes',
'-y',
is_flag=True,
callback=abort_if_false,
expose_value=False,
prompt='Delete archive?')
@click.pass_context
def delete_archive(ctx, archive_uuid):
""" Delete an archive on AWS Glacier """
# Get archive info
local_db = ctx.obj['session_maker']()
try:
query = local_db.query(inventorydb.Archive)
archive_info = query.filter_by(id=archive_uuid).one()
except Exception as e:
logger.critical(e)
local_db.rollback()
raise SystemExit(1)
finally:
local_db.close()
# Send delete job to AWS
aws.delete_archive(archive_info)
# Remove from db
local_db = ctx.obj['session_maker']()
archive_info.delete(local_db)
click.echo(archive_uuid)
@archive.command('retrieve')
@click.argument('archive_uuid', metavar='UUID')
@click.pass_context
def retrieve_archive(ctx, archive_uuid):
""" Initiate an archive retrieval from AWS Glacier """
# Get archive info
local_db = ctx.obj['session_maker']()
try:
query = local_db.query(inventorydb.Archive)
archive_info = query.filter_by(id=archive_uuid).one()
except Exception as e:
logger.critical(e)
local_db.rollback()
raise SystemExit(1)
finally:
local_db.close()
# Initiate archive retrieval job
job_response = aws.retrieve_archive(archive_info)
# Insert backup info into backup inventory db
job_info = inventorydb.Job(account_id=job_response[0],
vault_name=job_response[1],
id=job_response[2])
local_db = ctx.obj['session_maker']()
job_info.store(local_db)
@archive.command('list')
@click.pass_context
def list_archive(ctx):
""" Return a list of uploaded archives """
# Get inventory
local_db = ctx.obj['session_maker']()
try:
archives = local_db.query(inventorydb.Archive).all()
except Exception as e:
logger.critical(e)
local_db.rollback()
raise SystemExit(1)
finally:
local_db.close()
# do some formatting for printing
formatted = []
for archive in archives:
formatted.append([archive.id,
archive.vault_name,
archive.database_name,
archive.date])
# Add header
formatted.insert(0, ('UUID', 'VAULT', 'DATABASE', 'DATE'))
# Calculate widths
widths = [max(map(len, column)) for column in zip(*formatted)]
# Print inventory
for row in formatted:
print(" ".join((val.ljust(width)
for val, width in zip(row, widths))))
@click.command('poll-jobs')
@click.pass_context
def poll_jobs(ctx):
""" Check each job in job list, check for completion,
and download job data
"""
# Get job list
local_db = ctx.obj['session_maker']()
try:
job_list = local_db.query(inventorydb.Job).all()
except Exception as e:
logger.critical(e)
local_db.rollback()
raise SystemExit(1)
finally:
local_db.close()
# Check for job completion
for job in job_list:
logger.info('Checking job %s for completion', job.id)
if aws.check_job(job):
logger.info('Job %s complete, getting data', job.id)
# Pull archive data
backup_data = aws.get_archive_data(job)
# Store backup data as new file
backup_dir = os.getcwd()
backup_uuid = uuid.uuid4().hex
backup_file = backup_uuid + '.sql'
backup_path = os.path.join(backup_dir, backup_file)
with open(backup_path, 'w') as f:
f.write(backup_data)
# Get corrosponding archive data
archive_id = aws.get_job_archive(job)
local_db = ctx.obj['session_maker']()
try:
query = local_db.query(inventorydb.Archive)
archive_info = query.filter_by(aws_id=archive_id).one()
except Exception as e:
logger.critical(e)
local_db.rollback()
raise SystemExit(1)
finally:
local_db.close()
database_name = archive_info.database_name
backup_date = archive_info.date
# Insert backup info into backup inventory db
backup_info = inventorydb.Backup(id=backup_uuid,
database_name=database_name,
backup_dir=backup_dir,
date=backup_date)
local_db = ctx.obj['session_maker']()
backup_info.store(local_db)
# Delete job from db
local_db = ctx.obj['session_maker']()
job.delete(local_db)
click.echo(backup_uuid)
main.add_command(backup)
main.add_command(archive)
main.add_command(poll_jobs, name='poll-jobs')
main(obj={})
| 29.766749 | 79 | 0.614288 |
c96260912cab6b5833f970ad06a26821cebe5439 | 886 | py | Python | 01-tapsterbot/click-accuracy/makeTestData.py | AppTestBot/AppTestBot | 035e93e662753e50d7dcc38d6fd362933186983b | [
"Apache-2.0"
] | null | null | null | 01-tapsterbot/click-accuracy/makeTestData.py | AppTestBot/AppTestBot | 035e93e662753e50d7dcc38d6fd362933186983b | [
"Apache-2.0"
] | null | null | null | 01-tapsterbot/click-accuracy/makeTestData.py | AppTestBot/AppTestBot | 035e93e662753e50d7dcc38d6fd362933186983b | [
"Apache-2.0"
] | null | null | null | import csv
FLAGS = None
def main():
with open('dataset/test.csv', 'w') as csv_file:
writer = csv.writer(csv_file, delimiter=',')
#for w in range(-FLAGS.width, FLAGS.width+1i):
w = -60
while w <= 60:
h = -28
#for h in range(-FLAGS.height, FLAGS.height+1):
while h <=26:
writer.writerow([h, w])
h += 6.75
w = w + 7.5
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='make coordinate.csv for data')
parser.add_argument('--width', '-w', type=int,
required=False,
help='input width')
parser.add_argument('--height', '-t', type=int,
required=False,
help='input height')
FLAGS = parser.parse_args()
main()
| 28.580645 | 80 | 0.497743 |
c96277ac68a88dc09c944967b21d05e1368096d4 | 3,546 | py | Python | CreateBigDataFrame.py | ezsolti/MVA_PWR_data | 3e64c5b1bd643d5ba5d6e275b426d601cff7b270 | [
"MIT"
] | 2 | 2022-02-04T10:47:37.000Z | 2022-03-15T13:03:19.000Z | CreateBigDataFrame.py | ezsolti/MVA_PWR_data | 3e64c5b1bd643d5ba5d6e275b426d601cff7b270 | [
"MIT"
] | null | null | null | CreateBigDataFrame.py | ezsolti/MVA_PWR_data | 3e64c5b1bd643d5ba5d6e275b426d601cff7b270 | [
"MIT"
] | 1 | 2022-01-13T15:55:17.000Z | 2022-01-13T15:55:17.000Z | """
Script to create dataframe from serpent bumat files
including all the nuclides.
Zsolt Elter 2019
"""
import json
import os
with open ('nuclides.json') as json_file:
nuclidesDict = json.load(json_file)
#final name of the file
dataFrame='PWR_UOX-MOX_BigDataFrame-SF-GSRC-noReactorType.csv'
def readInventory(filename):
"""Function to read Serpent bumat files
Parameter
---------
filename : str
path to the bumatfile to be read
Returns
-------
inventory : dict
dictionary to store the inventory. keys are ZAID identifiers (str), values
are atom densities (str) in b^{-1}cm^{-1}
"""
mat=open(filename)
matfile=mat.readlines()
mat.close()
inventory={}
for line in matfile[6:]:
x=line.strip().split()
inventory[x[0][:-4]]=x[1]
return inventory
#header of file
dataFrameStr=',BU,CT,IE,fuelType,TOT_SF,TOT_GSRC,TOT_A,TOT_H'
for nuclIDi in nuclidesDict.values():
dataFrameStr=dataFrameStr+',%s'%nuclIDi #here we add the nuclide identifier to the header!
dataFrameStr=dataFrameStr+'\n'
#header ends
f = open(dataFrame,'w')
f.write(dataFrameStr)
f.close()
#let's open the file linking to the outputs
csv=open('file_log_PWR_UOX-MOX.csv').readlines()
depfileOld=''
for line in csv[1:]:
x=line.strip().split(',')
####SFRATE AND GSRC
if x[4]=='UOX':
deppath='/UOX/serpent_files/' #since originally I have not included a link to the _dep.m file, here I had to fix that
depfileNew='%s/IE%d/BU%d/sPWR_IE_%d_BU_%d_dep.m'%(deppath,10*float(x[3]),10*float(x[1]),10*float(x[3]),10*float(x[1])) #and find out from the BIC parameters
else: #the path to the _dep.m file...
deppath='/MOX/serpent_files/'
depfileNew='%s/IE%d/BU%d/sPWR_MOX_IE_%d_BU_%d_dep.m'%(deppath,10*float(x[3]),10*float(x[1]),10*float(x[3]),10*float(x[1]))
if depfileNew != depfileOld: #of course there is one _dep.m file for all the CT's for a given BU-IE, so we keep track what to open. And we only do it once
#things we grep here are lists!
TOTSFs=os.popen('grep TOT_SF %s -A 2'%depfileNew).readlines()[2].strip().split() #not the most time efficient greping, but does the job
TOTGSRCs=os.popen('grep TOT_GSRC %s -A 2'%depfileNew).readlines()[2].strip().split()
TOTAs=os.popen('grep "TOT_A =" %s -A 2'%depfileNew).readlines()[2].strip().split() #TOT_A in itself matches TOT_ADENS, that is why we need "" around it
TOTHs=os.popen('grep TOT_H %s -A 2'%depfileNew).readlines()[2].strip().split()
depfileOld=depfileNew
else:
depfileOld=depfileNew
####
inv=readInventory(x[-1]) #extract inventory from the outputfile
idx=int(x[-1][x[-1].find('bumat')+5:]) #get an index, since we want to know which value from the list to take
totsf=TOTSFs[idx]
totgsrc=TOTGSRCs[idx]
tota=TOTAs[idx]
toth=TOTHs[idx]
#we make a big string for the entry, storing all the columns
newentry=x[0]+','+x[1]+','+x[2]+','+x[3]+','+x[4]+','+totsf+','+totgsrc+','+tota+','+toth
for nucli in nuclidesDict.keys():
newentry=newentry+',%s'%(inv[nucli])
newentry=newentry+'\n'
#entry is created, so we append
f = open(dataFrame,'a')
f.write(newentry)
f.close()
#and we print just to see where is the process at.
if int(x[0])%1000==0:
print(x[0])
| 35.818182 | 164 | 0.620135 |
c963dca9a730234f66f325086da0df26ded50d93 | 453 | py | Python | todolist_backend/cli.py | RenoirTan/TodoListBackend | 149bdf1d883891c87b27f01996816bff251f11d8 | [
"MIT"
] | null | null | null | todolist_backend/cli.py | RenoirTan/TodoListBackend | 149bdf1d883891c87b27f01996816bff251f11d8 | [
"MIT"
] | null | null | null | todolist_backend/cli.py | RenoirTan/TodoListBackend | 149bdf1d883891c87b27f01996816bff251f11d8 | [
"MIT"
] | null | null | null | from mongoengine import disconnect
from waitress import serve
from todolist_backend.server import app, get_configs
from .database import panic_init
from .info import MONGOENGINE_ALIAS
def run_debug():
panic_init()
app.run(**get_configs())
# disconnect(alias=MONGOENGINE_ALIAS)
def run_production():
panic_init()
configs = get_configs()
configs.pop("debug")
serve(app, **configs)
# disconnect(alias=MONGOENGINE_ALIAS)
| 22.65 | 52 | 0.743929 |
c964301c7d47d614f521b894d1e55685f398fbd2 | 86 | py | Python | sample_code/002_add.py | kaede-san0910/workshop-2022 | 961d3ebfc899565aa6913c90b08881ef857ca945 | [
"Apache-2.0"
] | null | null | null | sample_code/002_add.py | kaede-san0910/workshop-2022 | 961d3ebfc899565aa6913c90b08881ef857ca945 | [
"Apache-2.0"
] | null | null | null | sample_code/002_add.py | kaede-san0910/workshop-2022 | 961d3ebfc899565aa6913c90b08881ef857ca945 | [
"Apache-2.0"
] | null | null | null | a = int(input("a = "))
b = int(input("b = "))
print("{} + {} = {}".format(a, b, a+b)) | 21.5 | 39 | 0.406977 |
c965792691ce7606e38e36d2ae95ee8c42d4351b | 2,953 | py | Python | archer_views.py | splunk-soar-connectors/archer | 65b9a5e9e250b6407e3aad08b86a483499a6210f | [
"Apache-2.0"
] | null | null | null | archer_views.py | splunk-soar-connectors/archer | 65b9a5e9e250b6407e3aad08b86a483499a6210f | [
"Apache-2.0"
] | 1 | 2022-02-08T22:54:54.000Z | 2022-02-08T22:54:54.000Z | archer_views.py | splunk-soar-connectors/archer | 65b9a5e9e250b6407e3aad08b86a483499a6210f | [
"Apache-2.0"
] | null | null | null | # File: archer_views.py
#
# Copyright (c) 2016-2022 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
def get_ticket(provides, all_results, context):
context['results'] = results = []
for summary, action_results in all_results:
for result in action_results:
parameters = result.get_param()
if 'context' in parameters:
del parameters['context']
rec = {'parameters': parameters}
data = result.get_data()
if data:
data = data[0]['Record']['Field']
rec['record'] = sorted(data, key=lambda x: (x['@name'] is not None, x['@name']))
rec['content_id'] = result.get_summary().get(
'content_id', 'Not provided')
results.append(rec)
return 'get_ticket.html'
def list_tickets(provides, all_results, context):
headers = ['application', 'content id']
context['results'] = results = []
headers_set = set()
for summary, action_results in all_results:
for result in action_results:
for record in result.get_data():
headers_set.update([f.get('@name', '').strip()
for f in record.get('Field', [])])
if not headers_set:
headers_set.update(headers)
headers.extend(sorted(headers_set))
final_result = {'headers': headers, 'data': []}
dyn_headers = headers[2:]
for summary, action_results in all_results:
for result in action_results:
data = result.get_data()
param = result.get_param()
for item in data:
row = []
row.append({'value': param.get('application'),
'contains': ['archer application']})
row.append({'value': item.get('@contentId'),
'contains': ['archer content id']})
name_value = {}
for f in item.get('Field', []):
name_value[f['@name']] = f.get('#text')
for h in dyn_headers:
if h == 'IP Address':
row.append({'value': name_value.get(h, ''),
'contains': ['ip']})
else:
row.append({'value': name_value.get(h, '')})
final_result['data'].append(row)
results.append(final_result)
return 'list_tickets.html'
| 38.350649 | 95 | 0.562817 |
c96886f093360dec7c0ce79819456ac3947c46e0 | 12,198 | py | Python | napari/plugins/exceptions.py | yinawang28/napari | 6ea95a9fa2f9150a4dbb5ec1286b8ff2020c3957 | [
"BSD-3-Clause"
] | null | null | null | napari/plugins/exceptions.py | yinawang28/napari | 6ea95a9fa2f9150a4dbb5ec1286b8ff2020c3957 | [
"BSD-3-Clause"
] | null | null | null | napari/plugins/exceptions.py | yinawang28/napari | 6ea95a9fa2f9150a4dbb5ec1286b8ff2020c3957 | [
"BSD-3-Clause"
] | null | null | null | import re
import sys
from collections import defaultdict
from types import TracebackType
from typing import (
Callable,
DefaultDict,
Dict,
Generator,
List,
Optional,
Tuple,
Type,
Union,
)
# This is a mapping of plugin_name -> PluginError instances
# all PluginErrors get added to this in PluginError.__init__
PLUGIN_ERRORS: DefaultDict[str, List['PluginError']] = defaultdict(list)
# standard tuple type returned from sys.exc_info()
ExcInfoTuple = Tuple[Type[Exception], Exception, Optional[TracebackType]]
if sys.version_info >= (3, 8):
from importlib import metadata as importlib_metadata
else:
import importlib_metadata
Distribution = importlib_metadata.Distribution
class PluginError(Exception):
"""Base class for all plugin-related errors.
Instantiating a PluginError (whether raised or not), adds the exception
instance to the PLUGIN_ERRORS dict for later retrieval.
Parameters
----------
message : str
A message for the exception
plugin_name : str
The name of the plugin that had the error
plugin_module : str
The module of the plugin that had the error
"""
def __init__(self, message: str, plugin_name: str, plugin_module: str):
super().__init__(message)
self.plugin_name = plugin_name
self.plugin_module = plugin_module
PLUGIN_ERRORS[plugin_name].append(self)
def format_with_contact_info(self) -> str:
"""Make formatted string with context and contact info if possible."""
# circular imports
from napari import __version__
msg = f'\n\nPluginError: {self}'
msg += '\n(Use "Plugins > Plugin errors..." to review/report errors.)'
if self.__cause__:
cause = str(self.__cause__).replace("\n", "\n" + " " * 13)
msg += f'\n Cause was: {cause}'
contact = fetch_module_metadata(self.plugin_module)
if contact:
extra = [f'{k: >11}: {v}' for k, v in contact.items()]
extra += [f'{"napari": >11}: v{__version__}']
msg += "\n".join(extra)
msg += '\n'
return msg
def info(self,) -> ExcInfoTuple:
"""Return info as would be returned from sys.exc_info()."""
return (self.__class__, self, self.__traceback__)
class PluginImportError(PluginError, ImportError):
"""Raised when a plugin fails to import."""
def __init__(self, plugin_name: str, plugin_module: str):
msg = f"Failed to import plugin: '{plugin_name}'"
super().__init__(msg, plugin_name, plugin_module)
class PluginRegistrationError(PluginError):
"""Raised when a plugin fails to register with pluggy."""
def __init__(self, plugin_name: str, plugin_module: str):
msg = f"Failed to register plugin: '{plugin_name}'"
super().__init__(msg, plugin_name, plugin_module)
def format_exceptions(plugin_name: str, as_html: bool = False):
"""Return formatted tracebacks for all exceptions raised by plugin.
Parameters
----------
plugin_name : str
The name of a plugin for which to retrieve tracebacks.
as_html : bool
Whether to return the exception string as formatted html,
defaults to False.
Returns
-------
str
A formatted string with traceback information for every exception
raised by ``plugin_name`` during this session.
"""
_plugin_errors: List[PluginError] = PLUGIN_ERRORS.get(plugin_name)
if not _plugin_errors:
return ''
from napari import __version__
format_exc_info = get_tb_formatter()
_linewidth = 80
_pad = (_linewidth - len(plugin_name) - 18) // 2
msg = [
f"{'=' * _pad} Errors for plugin '{plugin_name}' {'=' * _pad}",
'',
f'{"napari version": >16}: {__version__}',
]
err0 = _plugin_errors[0]
package_meta = fetch_module_metadata(err0.plugin_module)
if package_meta:
msg.extend(
[
f'{"plugin package": >16}: {package_meta["package"]}',
f'{"version": >16}: {package_meta["version"]}',
f'{"module": >16}: {err0.plugin_module}',
]
)
msg.append('')
for n, err in enumerate(_plugin_errors):
_pad = _linewidth - len(str(err)) - 10
msg += ['', f'ERROR #{n + 1}: {str(err)} {"-" * _pad}', '']
msg.append(format_exc_info(err.info(), as_html))
msg.append('=' * _linewidth)
return ("<br>" if as_html else "\n").join(msg)
def get_tb_formatter() -> Callable[[ExcInfoTuple, bool], str]:
"""Return a formatter callable that uses IPython VerboseTB if available.
Imports IPython lazily if available to take advantage of ultratb.VerboseTB.
If unavailable, cgitb is used instead, but this function overrides a lot of
the hardcoded citgb styles and adds error chaining (for exceptions that
result from other exceptions).
Returns
-------
callable
A function that accepts a 3-tuple and a boolean ``(exc_info, as_html)``
and returns a formatted traceback string. The ``exc_info`` tuple is of
the ``(type, value, traceback)`` format returned by sys.exc_info().
The ``as_html`` determines whether the traceback is formated in html
or plain text.
"""
try:
import IPython.core.ultratb
def format_exc_info(info: ExcInfoTuple, as_html: bool) -> str:
color = 'Linux' if as_html else 'NoColor'
vbtb = IPython.core.ultratb.VerboseTB(color_scheme=color)
if as_html:
ansi_string = vbtb.text(*info).replace(" ", " ")
html = "".join(ansi2html(ansi_string))
html = html.replace("\n", "<br>")
html = (
"<span style='font-family: monaco,courier,monospace;'>"
+ html
+ "</span>"
)
return html
else:
return vbtb.text(*info)
except ImportError:
import cgitb
import traceback
# cgitb does not support error chaining...
# see https://www.python.org/dev/peps/pep-3134/#enhanced-reporting
# this is a workaround
def cgitb_chain(exc: Exception) -> Generator[str, None, None]:
"""Recurse through exception stack and chain cgitb_html calls."""
if exc.__cause__:
yield from cgitb_chain(exc.__cause__)
yield (
'<br><br><font color="#51B432">The above exception was '
'the direct cause of the following exception:</font><br>'
)
elif exc.__context__:
yield from cgitb_chain(exc.__context__)
yield (
'<br><br><font color="#51B432">During handling of the '
'above exception, another exception occurred:</font><br>'
)
yield cgitb_html(exc)
def cgitb_html(exc: Exception) -> str:
"""Format exception with cgitb.html."""
info = (type(exc), exc, exc.__traceback__)
return cgitb.html(info)
def format_exc_info(info: ExcInfoTuple, as_html: bool) -> str:
if as_html:
html = "\n".join(cgitb_chain(info[1]))
# cgitb has a lot of hardcoded colors that don't work for us
# remove bgcolor, and let theme handle it
html = re.sub('bgcolor="#.*"', '', html)
# remove superfluous whitespace
html = html.replace('<br>\n', '\n')
# but retain it around the <small> bits
html = re.sub(r'(<tr><td><small.*</tr>)', f'<br>\\1<br>', html)
# weird 2-part syntax is a workaround for hard-to-grep text.
html = html.replace(
"<p>A problem occurred in a Python script. "
"Here is the sequence of",
"",
)
html = html.replace(
"function calls leading up to the error, "
"in the order they occurred.</p>",
"<br>",
)
# remove hardcoded fonts
html = html.replace('face="helvetica, arial"', "")
html = (
"<span style='font-family: monaco,courier,monospace;'>"
+ html
+ "</span>"
)
return html
else:
# if we don't need HTML, just use traceback
return ''.join(traceback.format_exception(*info))
return format_exc_info
def fetch_module_metadata(dist: Union[Distribution, str]) -> Dict[str, str]:
"""Attempt to retrieve name, version, contact email & url for a package.
Parameters
----------
distname : str or Distribution
Distribution object or name of a distribution. If a string, it must
match the *name* of the package in the METADATA file... not the name of
the module.
Returns
-------
package_info : dict
A dict with metadata about the package
Returns None of the distname cannot be found.
"""
if isinstance(dist, Distribution):
meta = dist.metadata
else:
try:
meta = importlib_metadata.metadata(dist)
except importlib_metadata.PackageNotFoundError:
return {}
return {
'package': meta.get('Name', ''),
'version': meta.get('Version', ''),
'summary': meta.get('Summary', ''),
'url': meta.get('Home-page') or meta.get('Download-Url', ''),
'author': meta.get('Author', ''),
'email': meta.get('Author-Email') or meta.get('Maintainer-Email', ''),
'license': meta.get('License', ''),
}
ANSI_STYLES = {
1: {"font_weight": "bold"},
2: {"font_weight": "lighter"},
3: {"font_weight": "italic"},
4: {"text_decoration": "underline"},
5: {"text_decoration": "blink"},
6: {"text_decoration": "blink"},
8: {"visibility": "hidden"},
9: {"text_decoration": "line-through"},
30: {"color": "black"},
31: {"color": "red"},
32: {"color": "green"},
33: {"color": "yellow"},
34: {"color": "blue"},
35: {"color": "magenta"},
36: {"color": "cyan"},
37: {"color": "white"},
}
def ansi2html(
ansi_string: str, styles: Dict[int, Dict[str, str]] = ANSI_STYLES
) -> Generator[str, None, None]:
"""Convert ansi string to colored HTML
Parameters
----------
ansi_string : str
text with ANSI color codes.
styles : dict, optional
A mapping from ANSI codes to a dict of css kwargs:values,
by default ANSI_STYLES
Yields
-------
str
HTML strings that can be joined to form the final html
"""
previous_end = 0
in_span = False
ansi_codes = []
ansi_finder = re.compile("\033\\[" "([\\d;]*)" "([a-zA-z])")
for match in ansi_finder.finditer(ansi_string):
yield ansi_string[previous_end : match.start()]
previous_end = match.end()
params, command = match.groups()
if command not in "mM":
continue
try:
params = [int(p) for p in params.split(";")]
except ValueError:
params = [0]
for i, v in enumerate(params):
if v == 0:
params = params[i + 1 :]
if in_span:
in_span = False
yield "</span>"
ansi_codes = []
if not params:
continue
ansi_codes.extend(params)
if in_span:
yield "</span>"
in_span = False
if not ansi_codes:
continue
style = [
"; ".join([f"{k}: {v}" for k, v in styles[k].items()]).strip()
for k in ansi_codes
if k in styles
]
yield '<span style="%s">' % "; ".join(style)
in_span = True
yield ansi_string[previous_end:]
if in_span:
yield "</span>"
in_span = False
| 33.237057 | 79 | 0.565175 |
c9693a49a18c1714e3e73fb34025f16a983d9fca | 572 | py | Python | examples/federation/account.py | syfun/starlette-graphql | 1f57b60a9699bc6a6a2b95d5596ffa93ef13c262 | [
"MIT"
] | 14 | 2020-04-03T08:18:21.000Z | 2021-11-10T04:39:45.000Z | examples/federation/account.py | syfun/starlette-graphql | 1f57b60a9699bc6a6a2b95d5596ffa93ef13c262 | [
"MIT"
] | 2 | 2021-08-31T20:25:23.000Z | 2021-09-21T14:40:56.000Z | examples/federation/account.py | syfun/starlette-graphql | 1f57b60a9699bc6a6a2b95d5596ffa93ef13c262 | [
"MIT"
] | 1 | 2020-08-27T17:04:29.000Z | 2020-08-27T17:04:29.000Z | import uvicorn
from gql import gql, reference_resolver, query
from stargql import GraphQL
from helper import get_user_by_id, users
type_defs = gql("""
type Query {
me: User
}
type User @key(fields: "id") {
id: ID!
name: String
username: String
}
""")
@query('me')
def get_me(_, info):
return users[0]
@reference_resolver('User')
def user_reference(_, info, representation):
return get_user_by_id(representation['id'])
app = GraphQL(type_defs=type_defs, federation=True)
if __name__ == '__main__':
uvicorn.run(app, port=8082)
| 16.342857 | 51 | 0.687063 |
c96971b273caac5ab991341745cb2d8e72b76d77 | 2,519 | py | Python | tests/api_resources/test_application_fee.py | bhch/async-stripe | 75d934a8bb242f664e7be30812c12335cf885287 | [
"MIT",
"BSD-3-Clause"
] | 8 | 2021-05-29T08:57:58.000Z | 2022-02-19T07:09:25.000Z | tests/api_resources/test_application_fee.py | bhch/async-stripe | 75d934a8bb242f664e7be30812c12335cf885287 | [
"MIT",
"BSD-3-Clause"
] | 5 | 2021-05-31T10:18:36.000Z | 2022-01-25T11:39:03.000Z | tests/api_resources/test_application_fee.py | bhch/async-stripe | 75d934a8bb242f664e7be30812c12335cf885287 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2021-05-29T13:27:10.000Z | 2021-05-29T13:27:10.000Z | from __future__ import absolute_import, division, print_function
import stripe
import pytest
pytestmark = pytest.mark.asyncio
TEST_RESOURCE_ID = "fee_123"
TEST_FEEREFUND_ID = "fr_123"
class TestApplicationFee(object):
async def test_is_listable(self, request_mock):
resources = await stripe.ApplicationFee.list()
request_mock.assert_requested("get", "/v1/application_fees")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], stripe.ApplicationFee)
async def test_is_refundable(self, request_mock):
appfee = await stripe.ApplicationFee.retrieve(TEST_RESOURCE_ID)
resource = await appfee.refund()
request_mock.assert_requested(
"post", "/v1/application_fees/%s/refund" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.ApplicationFee)
assert resource is appfee
class TestApplicationFeeRefunds(object):
async def test_is_listable(self, request_mock):
resources = await stripe.ApplicationFee.list_refunds(TEST_RESOURCE_ID)
request_mock.assert_requested(
"get", "/v1/application_fees/%s/refunds" % TEST_RESOURCE_ID
)
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], stripe.ApplicationFeeRefund)
async def test_is_retrievable(self, request_mock):
resource = await stripe.ApplicationFee.retrieve_refund(
TEST_RESOURCE_ID, TEST_FEEREFUND_ID
)
request_mock.assert_requested(
"get",
"/v1/application_fees/%s/refunds/%s"
% (TEST_RESOURCE_ID, TEST_FEEREFUND_ID),
)
assert isinstance(resource, stripe.ApplicationFeeRefund)
async def test_is_creatable(self, request_mock):
resource = await stripe.ApplicationFee.create_refund(
TEST_RESOURCE_ID, amount=100
)
request_mock.assert_requested(
"post", "/v1/application_fees/%s/refunds" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.ApplicationFeeRefund)
async def test_is_modifiable(self, request_mock):
resource = await stripe.ApplicationFee.modify_refund(
TEST_RESOURCE_ID, TEST_FEEREFUND_ID, metadata={"foo": "bar"}
)
request_mock.assert_requested(
"post",
"/v1/application_fees/%s/refunds/%s"
% (TEST_RESOURCE_ID, TEST_FEEREFUND_ID),
)
assert isinstance(resource, stripe.ApplicationFeeRefund)
| 35.985714 | 78 | 0.687574 |
c96a052abb332bba00f134e10d854c779b770b2a | 866 | py | Python | code/busyschedule.py | matthewReff/Kattis-Problems | 848628af630c990fb91bde6256a77afad6a3f5f6 | [
"MIT"
] | 8 | 2020-02-21T22:21:01.000Z | 2022-02-16T05:30:54.000Z | code/busyschedule.py | matthewReff/Kattis-Problems | 848628af630c990fb91bde6256a77afad6a3f5f6 | [
"MIT"
] | null | null | null | code/busyschedule.py | matthewReff/Kattis-Problems | 848628af630c990fb91bde6256a77afad6a3f5f6 | [
"MIT"
] | 3 | 2020-08-05T05:42:35.000Z | 2021-08-30T05:39:51.000Z | def busyschedule():
times = input()
while (times != 0):
timeList = []
for i in range(0, times):
timeS = raw_input()
time = timeS.split()
time[0] = time[0].split(":")
if time[1] == "a.m.":
if time[0][0] == "12":
timeList.append([(0,int(time[0][1])), timeS])
else:
timeList.append([(int(time[0][0]),int(time[0][1])), timeS])
else:
if time[0][0] == "12":
timeList.append([(12,int(time[0][1])), timeS])
else:
timeList.append([(int(time[0][0])+12,int(time[0][1])), timeS])
timeList.sort()
for i in timeList:
print i[1]
print ""
times = input()
busyschedule() | 30.928571 | 82 | 0.39261 |
c96af4a490471a665152773f8f3b2a90f985672a | 607 | py | Python | tests/backtracking/test_path_through_grid.py | davjohnst/fundamentals | f8aff4621432c3187305dd04563425f54ea08495 | [
"Apache-2.0"
] | null | null | null | tests/backtracking/test_path_through_grid.py | davjohnst/fundamentals | f8aff4621432c3187305dd04563425f54ea08495 | [
"Apache-2.0"
] | null | null | null | tests/backtracking/test_path_through_grid.py | davjohnst/fundamentals | f8aff4621432c3187305dd04563425f54ea08495 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from unittest import TestCase
from fundamentals.backtracking.path_through_grid import PathThroughGrid
class TestPathThroughGrid(TestCase):
def test_no_path(self):
grid = [
[0, 1, 0],
[1, 0, 1],
[0, 0, 1]
]
ptg = PathThroughGrid(grid)
self.assertIsNone(ptg.get_path())
def test_path(self):
grid = [
[1, 1, 0],
[1, 1, 1],
[0, 0, 1]
]
ptg = PathThroughGrid(grid)
self.assertEquals([(0, 0), (0, 1), (1, 1), (1, 2), (2, 2)],ptg.get_path()) | 22.481481 | 82 | 0.507414 |
c96b923ab99cdd18285399edd12e8dfeb03b5f78 | 343 | py | Python | main.py | yukraven/vitg | 27d3d9b73a23e4ff5ff4c769eb1f26b8f57fee72 | [
"MIT"
] | null | null | null | main.py | yukraven/vitg | 27d3d9b73a23e4ff5ff4c769eb1f26b8f57fee72 | [
"MIT"
] | 63 | 2019-08-25T07:48:54.000Z | 2019-10-18T01:52:29.000Z | main.py | yukraven/vitg | 27d3d9b73a23e4ff5ff4c769eb1f26b8f57fee72 | [
"MIT"
] | null | null | null | import sqlite3
import Sources.Parser
conn = sqlite3.connect("Database/vitg.db")
cursor = conn.cursor()
cursor.execute("SELECT * FROM Locations")
results = cursor.fetchall()
print(results)
conn.close()
parser = Sources.Parser.Parser()
words = [u"любить", u"бить"]
for word in words:
command = parser.getCommand(word)
print(command)
| 19.055556 | 42 | 0.725948 |
c96d512247f8395a641feee824bc046d0dbdc522 | 7,018 | py | Python | src/gene.score.array.simulator.py | ramachandran-lab/PEGASUS-WINGS | bdd81b58be4c4fb62916e422a854abdcbfbb6fd7 | [
"MIT"
] | 3 | 2019-03-31T12:32:25.000Z | 2020-01-04T20:57:14.000Z | src/gene.score.array.simulator.py | ramachandran-lab/PEGASUS-WINGS | bdd81b58be4c4fb62916e422a854abdcbfbb6fd7 | [
"MIT"
] | null | null | null | src/gene.score.array.simulator.py | ramachandran-lab/PEGASUS-WINGS | bdd81b58be4c4fb62916e422a854abdcbfbb6fd7 | [
"MIT"
] | 1 | 2020-10-24T23:48:15.000Z | 2020-10-24T23:48:15.000Z | import numpy as np
import pandas as pd
import sys
import string
import time
import subprocess
from collections import Counter
import string
import random
def random_pheno_generator(size=6,chars=string.ascii_lowercase):
return ''.join(random.choice(chars) for _ in range(size))
#First argument is the gene score distribution that you want to draw from, the second is the type of clusters to generate
#If 'large' only clusters with a large number of shared genes will be simulated
#If 'mixed' one cluster with only a few shared genes will be simulated
subprocess.call('mkdir NewSims_nothreshenforced',shell = True)
if len(sys.argv) < 3:
sys.exit("Enter the ICD10 code of interest as the first argument, and either 'mixed' or 'large' as the second argument depending on desired number of significant genes in a cluster.")
class simulator():
def __init__(self,type_of_clusters,num_draws,sim_status,percentage,sim_label):
self.example_dist = pd.read_csv('merged.pegasus.results.'+ sys.argv[1] + '.txt', delimiter = '\t').set_index('Gene')
self.genes = np.array(self.example_dist.index.tolist())
self.num_clusters = int(np.random.uniform(2,int(num_draws) * 0.15))
# self.num_clusters = int(np.random.uniform(2,3))
self.phenos = np.array([random_pheno_generator() for i in range(num_draws)])
self.clusters = {}
self.unique_sig_genes = {}
self.cluster_type = type_of_clusters
self.percentage = float(percentage)/100
self.draw_status = sim_status
self.sim_label = sim_label
if self.draw_status == 'limited':
self.num_draws = num_draws
else:
self.num_draws == 100000000000
def _gen_clusters_(self):
self.possible_genes = list(self.genes)
self.possible_phenos = list(self.phenos)
total_genes = 175
self.ref_count = {}
for i in range(self.num_clusters):
#Set size of clusters, both number of phenos and sig genes
num_sig_shared_genes = int(total_genes*self.percentage)
genes,phenos = self.cluster_sharing(num_sig_shared_genes,np.random.randint(2,8),self.possible_genes,self.possible_phenos)
#Update sets of genes and phenos so that there is not overlap between the clusters (first run)
self.possible_phenos = list(set(self.possible_phenos).difference(phenos))
self.possible_genes = list(set(self.possible_genes).difference(genes))
self.clusters['cluster' + str(i)] = {'Gene':list(genes),'Phenos':list(phenos)}
for j in phenos:
self.ref_count[str(j)] = len(genes)
for i in self.phenos:
if i not in self.ref_count.keys():
self.ref_count[i] = 0
self.unique_genes(self.phenos)
def cluster_sharing(self,num_unique_genes,num_unique_phenos,possible_genes,possible_phenos):
genes = set()
while len(genes) < num_unique_genes:
genes.add(np.random.choice(possible_genes))
phenos = set()
while len(phenos) < num_unique_phenos:
phenos.add(np.random.choice(possible_phenos))
return genes,phenos
def draw_counter(self,gene_dict,selected_genes):
if self.draw_status == 'limited':
for i in selected_genes:
gene_dict[i] +=1
for x,y in gene_dict.items():
if y >= self.num_draws:
del gene_dict[x]
return gene_dict
else:
return gene_dict
#Generates a list of genes that are also significant for each phenotype, whether or not they have been assigned to a cluster
def unique_genes(self,phenos):
self.counter_dict = {}
for i in self.possible_genes:
self.counter_dict[i] = 0
for pheno in phenos:
self.number_siggenes = 175
pheno_only_genes = np.random.choice(self.possible_genes, size = int(self.number_siggenes - self.ref_count[pheno]),replace = False)
self.counter_dict = self.draw_counter(self.counter_dict,pheno_only_genes)
self.unique_sig_genes[pheno] = list(set(pheno_only_genes))
self.possible_genes = list(self.counter_dict.keys())
def generate_matrix(self):
all_scores = np.array(self.example_dist).flatten()
small_scores = all_scores[all_scores <= 0.001]
non_sig_scores = all_scores[all_scores > 0.001]
data = np.zeros((len(self.phenos),len(self.genes)))
for j in range(len(self.phenos)):
data[j] = np.negative(np.log(np.array(np.random.choice(non_sig_scores,len(self.genes)))))
scorematrix = pd.DataFrame(data.T,index = self.genes,columns = self.phenos)
for key,value in self.clusters.items():
for phenotype in value['Phenos']:
for gene in value['Gene']:
self.unique_sig_genes[phenotype].append(gene)
#Fill in significant gene scores that are unique to each phenotype
for key,value in self.unique_sig_genes.items():
for x in value:
scorematrix.loc[x,key] = np.negative(np.log(np.random.choice(small_scores)))
return scorematrix
def write(self,dataframe):
if self.draw_status == 100000000000:
y = str(self.percentage) + '_' + str(self.sim_label)
subprocess.call('mkdir NewSims_nothreshenforced/Simulations' + y+str(self.num_clusters),shell = True)
dataframe = dataframe*-1
dataframe = 10**dataframe.astype(float)
dataframe.index.name = 'Gene'
dataframe.to_csv('NewSims_nothreshenforced/Simulations'+y+str(self.num_clusters)+'/Simulated.scores.using.' + sys.argv[1] + '.gene.dist.' + y + '.csv', header = True, index = True)
for key,value in self.clusters.items():
newfile = open('NewSims_nothreshenforced/Simulations'+y+str(self.num_clusters)+ '/' + str(key) + 'gene.and.pheno.info.txt','w')
newfile.write('Shared Significant Genes:\n')
newfile.write(','.join(value["Gene"]))
newfile.write('\nPhenos:\n')
newfile.write(','.join(value['Phenos']))
else:
y = str(self.percentage) + '_' + str(self.sim_label)
subprocess.call('mkdir NewSims_nothreshenforced/Simulations' + y + '_num_draws_' + str(self.num_draws),shell = True)
dataframe = dataframe*-1
dataframe = 10**dataframe.astype(float)
dataframe.index.name = 'Gene'
dataframe.to_csv('NewSims_nothreshenforced/Simulations'+y+ '_num_draws_' + str(self.num_draws) + '/Simulated.scores.using.' + sys.argv[1] + '.gene.dist.' + str(self.num_clusters) + '.clusters.' + str(self.num_draws)+'.pos.draws.csv', header = True, index = True)
for key,value in self.clusters.items():
newfile = open('NewSims_nothreshenforced/Simulations'+y+ '_num_draws_' + str(self.num_draws)+ '/' + str(key) + 'gene.and.pheno.info.txt','w')
newfile.write('Shared Significant Genes:\n')
newfile.write(','.join(value["Gene"]))
newfile.write('\nPhenos:\n')
newfile.write(','.join(value['Phenos']))
def test(self):
self._gen_clusters_()
self.write(self.generate_matrix())
def main():
#each item of z is the number of phenotypes in a simulation
for z in [25,50,75,100]:
#The amount of shared significant architecture to be imposed on a cluster
shared_percentage = [1,10,25,50,75]
for g in shared_percentage:
#How many simulations for each set of parameters should be run
for j in range(1,1001):
print('Generated ' + str(g) + '% with unlimited random draws simulation,' +str(z) + ' phenotypes: ' + str(j))
limiteddraw = simulator(sys.argv[2],z,'limited',g,j)
limiteddraw.test()
main()
| 43.320988 | 265 | 0.727273 |
c96fe90561c66a9922b3825850ab89dad8c3224a | 7,273 | py | Python | datatools_bdh/dict_utils.py | sfu-bigdata/datatools-bdh | 43303db2e165c10b43f5afe5293d41e655a05040 | [
"MIT"
] | null | null | null | datatools_bdh/dict_utils.py | sfu-bigdata/datatools-bdh | 43303db2e165c10b43f5afe5293d41e655a05040 | [
"MIT"
] | null | null | null | datatools_bdh/dict_utils.py | sfu-bigdata/datatools-bdh | 43303db2e165c10b43f5afe5293d41e655a05040 | [
"MIT"
] | null | null | null | """Convenience functions for dictionary access and YAML"""
from sklearn.utils import Bunch
from collections import OrderedDict
from collections.abc import Mapping
import copy
import yaml
# ----------------------------------------------------------------------------
def deep_convert_list_dict(d, skip_list_level=0):
"""In nested dict `d` convert all lists into dictionaries.
Args:
skip_list_level - top-n nested list levels to ignore for
dict conversion
"""
if isinstance(d, str):
return d
try:
for k,v in d.items():
d[k] = deep_convert_list_dict(v, skip_list_level=skip_list_level)
except AttributeError:
if skip_list_level:
skip_list_level -= 1
for k,v in enumerate(d):
d[k] = deep_convert_list_dict(v, skip_list_level=skip_list_level)
else:
dd = {}
try:
for k,v in enumerate(d):
dd[str(k)] = deep_convert_list_dict(v, skip_list_level=skip_list_level)
return dd
except:
raise
except TypeError:
pass
return d
def xml_dict(xml, skip_list_level=0):
"""Parse `xml` source and return a nested dictionary.
Since pandas.json_normalize has special treatment for nested lists,
it is possible to control how many levels of nested lists are ignored before
recursively converting lists into dicts.
"""
import xmltodict
return deep_convert_list_dict(
xmltodict.parse(xml, dict_constructor=dict),
skip_list_level=skip_list_level)
# ----------------------------------------------------------------------------
# manipulate class objects
def set_class_dict(cls, clsdict):
"""Set builtin class properties"""
return type(cls.__name__, (cls,), clsdict)
def set_docstr(cls, docstr, **kwargs):
"""Modify the docstring of a class `cls`"""
return set_class_dict(cls, {'__doc__': docstr, **kwargs})
# ----------------------------------------------------------------------------
# working with dict and Bunch
def deep_update(d1, d2):
"""
Recursively updates `d1` with `d2`
:param d1: A dictionary (possibly nested) to be updated.
:type d1: dict
:param d2: A dictionary (possibly nested) which will be used to update d1.
:type d2: dict
:return: An updated version of d1, where d2 values were used to update the values of d1. Will
add d2 keys if not present in d1. If a key does exist in d1, that key's value will be
overwritten by the d2 value. Works recursively to update nested dictionaries.
:rtype: dict
"""
if all((isinstance(d, Mapping) for d in (d1, d2))):
for k, v in d2.items():
d1[k] = deep_update(d1.get(k), v)
return d1
return d2
def nested_value(d, keys):
"""Access an element in nested dictioary `d` with path given by list of `keys`"""
for k in keys:
d = d[k]
return d
def select_keys(d, keys):
"""Returns the items in dict `d` whose keys are listen in `keys`"""
return {k: v for k, v in d.items() if k in keys}
def merge_dicts(d1, d2):
"""
Performs a deep_update() of d1 using d2.
Recursively updates `d1` with `d2`, while also making a deep copy of d1.
:param d1: A dictionary (possibly nested) to be updated.
:type d1: dict
:param d2: A dictionary (possibly nested) which will be used to update d1.
:type d2: dict
:return: An updated & deep-copied version of d1, where d2 values were used to update the values
of d1. Will add d2 keys if not present in d1. If a key does exist in d1, that key's
value will be overwritten by the d2 value. Works recursively to update nested
dictionaries.
:rtype: dict
"""
"""Recursively update `d1` with `d2` using a deep copy of `d1`"""
md = copy.deepcopy(d1)
return deep_update(md, d2)
def make_Bunch(docstr, *args, **kwargs):
'''Construct a Bunch collection with alternative doc string
All arguments after `docstr` are passed to the Bunch dict constructor.
The main appeal of a bunch d over a dict, is that keys can be accessed
via d.key rather than just d['key']
Example:
B = make_Bunch("""Container for special custom data""",a=1)
B.b = 3
print(B)
help(B)
'''
# TODO: the docstring modification causes issue with pickle serialization
# If you might want to use pickle, consider to just construct the sklearn.utils.Bunch
# object directly and don't use this construciton method here.
return set_docstr(Bunch, docstr)(*args, **kwargs)
# ----------------------------------------------------------------------------
# YAML functions
class YAMLProcessingError(Exception):
"""Indicate downstream processing error of loaded YAML structure"""
pass
def _map_from_ordered_pairs(pairs, MapType=Bunch):
"""Construct a custom dict type (e.g. Bunch) from pairs."""
return MapType(**dict(pairs)) # dict in python >= 3.6, preserves insertion order
def _ordered_load(stream, Loader=yaml.Loader, MapType=Bunch, **kwargs):
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return _map_from_ordered_pairs(loader.construct_pairs(node), MapType=MapType)
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(stream, OrderedLoader, **kwargs)
def _dict_representer(dumper, data):
return dumper.represent_mapping('tag:yaml.org,2002:map', data.items())
def _setup_yaml():
"""Have custom dict types produce standard format YAML output for dicts"""
yaml.add_multi_representer(OrderedDict, _dict_representer)
yaml.add_multi_representer(Bunch, _dict_representer)
def yload(datastr, Loader=yaml.SafeLoader, MapType=Bunch, **kwargs):
"""
Load object from YAML input string or stream
:param datastr: A string or stream containing YAML formatted text
:type datastr: str or stream
:param Loader: The yaml loader object to use, defaults to yaml.SaveLoader
:type Loader: yaml.Loader Object, optional
:param MapType: type of dictionary to construct, defaults to Bunch
:type MapType: type, optional
:param kwargs: Further keyword args are passed on to yaml.load()
:return: Python object representation of the YAML string/stream
:rtype: Specified in MapType parameter
"""
return _ordered_load(datastr, Loader=Loader, MapType=MapType, **kwargs)
def ydump(data, *args, sort_keys=False, **kwargs):
"""
Create YAML output string for data object. If data is an OrderedDict, original key ordering
is preserved in internal call to yaml.dump().
:param data:
:type data: dict or Bunch
:param args: Additional args passed on to yaml.dump()
:param sort_keys: defaults to False
:type sort_keys: bool
:param kwargs: Further keyword args are passed on to yaml.dump()
:return: YAML string representation of data
:rtype: str
"""
return yaml.dump(data, *args, sort_keys=sort_keys, **kwargs)
_setup_yaml()
| 33.210046 | 99 | 0.641001 |
c9702823a44c14ac03b736bffeea367a229f28da | 6,612 | py | Python | testscripts/RDKB/component/CMAgent/TS_CMAGENT_SetSessionId.py | cablelabs/tools-tdkb | 1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69 | [
"Apache-2.0"
] | null | null | null | testscripts/RDKB/component/CMAgent/TS_CMAGENT_SetSessionId.py | cablelabs/tools-tdkb | 1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69 | [
"Apache-2.0"
] | null | null | null | testscripts/RDKB/component/CMAgent/TS_CMAGENT_SetSessionId.py | cablelabs/tools-tdkb | 1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69 | [
"Apache-2.0"
] | null | null | null | ##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2016 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>15</version>
<name>TS_CMAGENT_SetSessionId</name>
<primitive_test_id/>
<primitive_test_name>CMAgent_SetSessionId</primitive_test_name>
<primitive_test_version>5</primitive_test_version>
<status>FREE</status>
<synopsis>TC_CMAGENT_1 - Set Session ID API Validation</synopsis>
<groups_id>4</groups_id>
<execution_time>1</execution_time>
<long_duration>false</long_duration>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_CMAGENT_1</test_case_id>
<test_objective>To Validate "Set Session ID" Function of CM Agent</test_objective>
<test_type>Positive</test_type>
<test_setup>XB3</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state else invoke cosa_start.sh manually that includes all the ccsp components and TDK Component"
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>None</api_or_interface_used>
<input_parameters>Json Interface:
API Name
CMAgent_SetSessionId
Input
1.sessionId as 0
2.pathname (Device.X_CISCO_COM_CableModem.)
3.override as 0 (This parameter will enable the reading of current session id and check set session id api with value read)
4. priority as 0</input_parameters>
<automation_approch>1.Configure the Function info in Test Manager GUI which needs to be tested
(CMAgent_SetSessionId - func name - "If not exists already"
cmagent - module name
Necessary I/P args as Mentioned in Input)
2.Python Script will be generated/overrided automically by Test Manager with provided arguments in configure page (TS_CMAGENT_SetSessionId.py)
3.Execute the generated Script(TS_CMAGENT_SetSessionId.py) using excution page of Test Manager GUI
4.cmagentstub which is a part of TDK Agent process, will be in listening mode to execute TDK Component function named CMAgent_SetSessionId through registered TDK cmagentstub function along with necessary Entry Values as arguments
5.CMAgent_SetSessionId function will call CCSP Base Interface Function named CcspBaseIf_SendcurrentSessionIDSignal, that inturn will call "CcspCcMbi_CurrentSessionIdSignal" along with provided input arguments to assign session id to global value of CM Agent
6.Responses(printf) from TDK Component,Ccsp Library function and cmagentstub would be logged in Agent Console log based on the debug info redirected to agent console
7.cmagentstub will validate the available result (from agent console log and Pointer to instance as non null ) with expected result (Eg:"Session ID assigned Succesfully") and the same is updated in agent console log
8.TestManager will publish the result in GUI as PASS/FAILURE based on the response from cmagentstub</automation_approch>
<except_output>CheckPoint 1:
Session ID assigned log from DUT should be available in Agent Console Log
CheckPoint 2:
TDK agent Test Function will log the test case result as PASS based on API response
CheckPoint 3:
TestManager GUI will publish the result as PASS in Execution page</except_output>
<priority>High</priority>
<test_stub_interface>None</test_stub_interface>
<test_script>TS_CMAGENT_SetSessionId</test_script>
<skipped>No</skipped>
<release_version/>
<remarks/>
</test_cases>
<script_tags/>
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("cmagent","RDKB");
#IP and Port of box, No need to change,
#This will be replaced with corresponding Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_CMAGENT_SetSessionId');
#Get the result of connection with test component and STB
loadModuleresult =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadModuleresult;
loadStatusExpected = "SUCCESS"
if loadStatusExpected not in loadModuleresult.upper():
print "[Failed To Load CM Agent Stub from env TDK Path]"
print "[Exiting the Script]"
exit();
#Primitive test case which associated to this Script
tdkTestObj = obj.createTestStep('CMAgent_SetSessionId');
#Input Parameters
tdkTestObj.addParameter("pathname","Device.X_CISCO_COM_CableModem.");
tdkTestObj.addParameter("priority",0);
tdkTestObj.addParameter("sessionId",0);
tdkTestObj.addParameter("override",0);
expectedresult = "SUCCESS";
#Execute the test case in STB
tdkTestObj.executeTestCase(expectedresult);
#Get the result of execution
actualresult = tdkTestObj.getResult();
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
resultDetails = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution as success
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1: Get the component session Id";
print "EXPECTED RESULT 1: Should get the component session Id";
print "ACTUAL RESULT 1: %s" %resultDetails;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
#Set the result status of execution as failure
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1: Get the component session Id";
print "EXPECTED RESULT 1: Should get the component session Id";
print "ACTUAL RESULT 1: %s" %resultDetails;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
print "[TEST EXECUTION RESULT] : %s" %resultDetails ;
obj.unloadModule("cmagent");
| 44.675676 | 259 | 0.73775 |
c970887827dfacb25a04d949c110b21b2a98595f | 492 | py | Python | blu/config.py | coolman565/blu_two | 5c7626145b3644570be99ff0267f88bd61b9806c | [
"MIT"
] | null | null | null | blu/config.py | coolman565/blu_two | 5c7626145b3644570be99ff0267f88bd61b9806c | [
"MIT"
] | 1 | 2021-06-01T21:57:23.000Z | 2021-06-01T21:57:23.000Z | blu/config.py | coolman565/blu_two | 5c7626145b3644570be99ff0267f88bd61b9806c | [
"MIT"
] | null | null | null | import logging
import yaml
logger = logging.getLogger(__name__)
class Config(object):
__instance__ = None
cfg = {}
def __new__(cls, *args, **kwargs):
if Config.__instance__ is None:
Config.__instance__ = object.__new__(cls)
return Config.__instance__
def load(self, location: str = "blu.yml"):
logger.info('loading configuration, source: %s', location)
with open(location, 'r') as file:
self.cfg = yaml.load(file)
| 23.428571 | 66 | 0.632114 |
c97156d460bdc88e5f228d10d1465d45738af933 | 8,536 | py | Python | other_useful_scripts/join.py | sklasfeld/ChIP_Annotation | 9ce9db7a129bfdec91ec23b33d73ff22f37408ad | [
"MIT"
] | 1 | 2020-08-23T23:12:56.000Z | 2020-08-23T23:12:56.000Z | other_useful_scripts/join.py | sklasfeld/ChIP_Annotation | 9ce9db7a129bfdec91ec23b33d73ff22f37408ad | [
"MIT"
] | null | null | null | other_useful_scripts/join.py | sklasfeld/ChIP_Annotation | 9ce9db7a129bfdec91ec23b33d73ff22f37408ad | [
"MIT"
] | 1 | 2020-08-23T23:16:47.000Z | 2020-08-23T23:16:47.000Z | #!/usr/bin/env python3
# -*- coding: iso-8859-15 -*-
# 2017, Samantha Klasfeld, the Wagner Lab
# the Perelman School of Medicine, the University of Pennsylvania
# Samantha Klasfeld, 12-21-2017
import argparse
import sys
import pandas as pd
import numpy as np
parser = argparse.ArgumentParser(description="this script takes \
in a 2 tables and performs a \
joins them to create a merged table")
parser.add_argument('left_table', help='left table file name')
parser.add_argument('right_table', help='right table file name')
parser.add_argument('out_table', help='output table file name')
parser.add_argument('-w','--how', help='Type of merge to be performed: \
`left`,`right`,`outer`,`inner`, `antileft`. Default:`inner`',
choices=['left', 'right', 'outer', 'inner', 'antileft'], default='inner')
parser.add_argument('-j','--on', help='Column or index level names \
to join on. These must be found in both DataFrames. If on is None \
and not merging on indexes then this defaults to the intersection \
of the columns in both DataFrames.', nargs='+')
parser.add_argument('-lo','--left_on', help='Column or index level names \
to join on in the left DataFrame. Can also be an array or list of arrays \
of the length of the left DataFrame. These arrays are treated as if \
they are columns.', nargs='+')
parser.add_argument('-ro','--right_on', help='Column or index level names \
to join on in the right DataFrame. Can also be an array or list of arrays \
of the length of the left DataFrame. These arrays are treated as if \
they are columns.', nargs='+')
parser.add_argument('-ml','--merge_left_index', help='Use the index from the left \
DataFrame as the join key(s). If it is a MultiIndex, the number of keys \
in the other DataFrame (either the index or a number of columns) must \
match the number of levels.', action='store_true', default=False)
parser.add_argument('-mr','--merge_right_index', help='Use the index from the right \
DataFrame as the join key(s). If it is a MultiIndex, the number of keys \
in the other DataFrame (either the index or a number of columns) must \
match the number of levels.', action='store_true', default=False)
parser.add_argument('-or','--order', help='Order the join keys \
lexicographically in the result DataFrame. If False, the \
order of the join keys depends on the join type (how keyword).', \
action='store_true', default=False)
parser.add_argument('-su','--suffixes', help='Tuple of (str,str). Each str is a \
Suffix to apply to overlapping column names in the left and right side, \
respectively. To raise an exception on overlapping columns \
use (False, False). Default:(`_x`,`_y`)', nargs=2)
parser.add_argument('-nl', '--noheader_l', action='store_true', default=False, \
help='Set if `left_table` has no header. If this is set, \
user must also set `colnames_l`')
parser.add_argument('-nr', '--noheader_r', action='store_true', default=False, \
help='Set if `right_table` has no header. If this is set, \
user must also set `colnames_r`')
parser.add_argument('-cl', '--colnames_l', nargs='+', \
help='`If `noheader_l` is set, add column names \
to `left_table`. Otherwise, rename the columns.')
parser.add_argument('-cr', '--colnames_r', nargs='+', \
help='`If `noheader_r` is set, add column names \
to `right_table`. Otherwise, rename the columns.')
parser.add_argument('--left_sep', '-sl', default="\t", \
help='table delimiter of `left_table`. By default, \
the table is expected to be tab-delimited')
parser.add_argument('--right_sep', '-sr', default="\t", \
help='table delimiter of `right_table`. By default, \
the table is expected to be tab-delimited')
parser.add_argument('--out_sep', '-so', default="\t", \
help='table delimiter of `out_table`. By default, \
the out table will be tab-delimited')
parser.add_argument('--left_indexCol', '-il', \
help='Column(s) to use as the row labels of the \
`left_table`, either given as string name or column index.')
parser.add_argument('--right_indexCol', '-ir', \
help='Column(s) to use as the row labels of the \
`right_table`, either given as string name or column index.')
parser.add_argument('-clc','--change_left_cols', nargs='+',
help='list of specific column names you want to change in left table. \
For example, if you want to change columns `oldColName1` and \
`oldColName2` to `newColName1` \
and `newColName2`, respectively, then set this to \
`oldColName2,newColName1 oldColName2,newColName2`')
parser.add_argument('-crc','--change_right_cols', nargs='+',
help='list of specific column names you want to change in right table. \
For example, if you want to change columns `oldColName1` and \
`oldColName2` to `newColName1` \
and `newColName2`, respectively, then set this to \
`oldColName2,newColName1 oldColName2,newColName2`')
#parser.add_argument('--header','-H', action='store_true', default=False, \
# help='true if header in table')
args = parser.parse_args()
if args.noheader_l and not args.colnames_l:
sys.exit("Error: If `noheader_l` is set, user must also set `colnames_l`\n")
if args.noheader_r and not args.colnames_r:
sys.exit("Error: If `noheader_r` is set, user must also set `colnames_r`\n")
if args.change_left_cols and args.colnames_l:
sys.exit("Error: Can only set one of these parameters:\n" +
"\t* change_left_cols\n"+
"\t* colnames_l\n")
if args.change_right_cols and args.colnames_r:
sys.exit("Error: Can only set one of these parameters:\n" +
"\t* change_right_cols\n"+
"\t* colnames_r\n")
if not args.on:
if not args.left_on and not args.right_on:
sys.exit("Error: must set columns to join on.")
# 1. Read input files
read_ltable_param={}
read_rtable_param={}
read_ltable_param["sep"]=args.left_sep
read_rtable_param["sep"]=args.right_sep
if args.noheader_l:
read_ltable_param["header"]=None
if args.noheader_r:
read_rtable_param["header"]=None
if args.left_indexCol:
read_ltable_param["index_col"]=args.left_indexCol
if args.right_indexCol:
read_rtable_param["index_col"]=args.right_indexCol
left_df = pd.read_csv(args.left_table, **read_ltable_param)
right_df = pd.read_csv(args.right_table, **read_rtable_param)
# 2. Change/Update column names of the input tables
if args.colnames_l:
if len(left_df.columns) != len(args.colnames_l):
sys.exit(("ValueError: Length mismatch: Expected axis " +
"has %i elements, new values have %i elements") %
(len(left_df.columns), len(args.colnames_l)))
left_df.columns = args.colnames_l
if args.colnames_r:
if len(right_df.columns) != len(args.colnames_r):
sys.exit(("ValueError: Length mismatch: Expected axis " +
"has %i elements, new values have %i elements") %
(len(right_df.columns), len(args.colnames_r)))
right_df.columns = args.colnames_r
if args.change_left_cols:
for left_changeCol_param in args.change_left_cols:
if len(left_changeCol_param.split(",")) != 2:
sys.exit("ERROR: values set to `change_left_cols` must " +
"be in the format [old_col_name],[new_column_name]")
rename_left_cols = dict(x.split(",") for x in args.change_left_cols)
left_df = left_df.rename(columns=rename_left_cols)
if args.change_right_cols:
for right_changeCol_param in args.change_right_cols:
if len(right_changeCol_param.split(",")) != 2:
sys.exit("ERROR: values set to `change_right_cols` must " +
"be in the format [old_col_name],[new_column_name]")
rename_right_cols = dict(x.split(",") for x in args.change_right_cols)
right_df = right_df.rename(columns=rename_right_cols)
# 3. Set merge parameters
merge_param={}
if args.how == "antileft":
merge_param['how']="left"
else:
merge_param['how']=args.how
if args.on:
merge_param['on']=args.on
if args.left_on:
merge_param['left_on']=args.left_on
if args.right_on:
merge_param['right_on']=args.right_on
if args.merge_left_index:
merge_param['left_index']=args.merge_left_index
if args.merge_right_index:
merge_param['right_index']=args.merge_right_index
if args.order:
merge_param['sort']=args.order
if args.suffixes:
merge_param['suffixes']=args.suffixes
# 4. Perform Merge
merge_df = left_df.merge(
right_df, **merge_param)
# 4B. There is an extra step for a left anti-join
# 5. Export merged table
out_param={}
out_param["sep"]=args.out_sep
if not args.left_indexCol:
out_param["index"]=False
if args.how == "antileft":
antimerge_df = left_df.loc[merge_df.index,:].copy()
antimerge_df.to_csv(args.out_table, **out_param)
else:
merge_df.to_csv(args.out_table, **out_param) | 42.467662 | 85 | 0.72493 |
c971e430652331e744f0b8b0fc1ac07db5704fb9 | 884 | py | Python | 6.py | mattclark-net/aoc21 | d4dcd78524a8cb27e1445cb6c39e696e64cc4e7a | [
"MIT"
] | null | null | null | 6.py | mattclark-net/aoc21 | d4dcd78524a8cb27e1445cb6c39e696e64cc4e7a | [
"MIT"
] | null | null | null | 6.py | mattclark-net/aoc21 | d4dcd78524a8cb27e1445cb6c39e696e64cc4e7a | [
"MIT"
] | null | null | null | # parse the input
with open("6-input.txt") as f:
fish = [int(n) for n in f.readline().split(",")]
startcounts = dict(zip(range(0, 9), [0 for x in range(9)]))
for f in fish:
startcounts[f] += 1
def updatedcounts(counts):
newcounts = {}
newcounts[8] = counts[0]
newcounts[7] = counts[8]
newcounts[6] = counts[7] + counts[0]
newcounts[5] = counts[6]
newcounts[4] = counts[5]
newcounts[3] = counts[4]
newcounts[2] = counts[3]
newcounts[1] = counts[2]
newcounts[0] = counts[1]
return newcounts
counts = startcounts
for day in range(80):
print(day, [counts[v] for v in range(9)])
counts = updatedcounts(counts)
print("\n\n", sum(counts.values()), "\n\n")
counts = startcounts
for day in range(256):
print(day, [counts[v] for v in range(9)])
counts = updatedcounts(counts)
print("\n\n", sum(counts.values()), "\n\n")
| 25.257143 | 59 | 0.616516 |
c97337433ecaa8303091ad4ba921fe29802304f0 | 3,287 | py | Python | packages/mccomponents/tests/mccomponents/sample/samplecomponent_SQkernel_TestCase.py | mcvine/mcvine | 42232534b0c6af729628009bed165cd7d833789d | [
"BSD-3-Clause"
] | 5 | 2017-01-16T03:59:47.000Z | 2020-06-23T02:54:19.000Z | packages/mccomponents/tests/mccomponents/sample/samplecomponent_SQkernel_TestCase.py | mcvine/mcvine | 42232534b0c6af729628009bed165cd7d833789d | [
"BSD-3-Clause"
] | 293 | 2015-10-29T17:45:52.000Z | 2022-01-07T16:31:09.000Z | packages/mccomponents/tests/mccomponents/sample/samplecomponent_SQkernel_TestCase.py | mcvine/mcvine | 42232534b0c6af729628009bed165cd7d833789d | [
"BSD-3-Clause"
] | 1 | 2019-05-25T00:53:31.000Z | 2019-05-25T00:53:31.000Z | #!/usr/bin/env python
#
#
standalone = True
import os, numpy as np
os.environ['MCVINE_MPI_BINDING'] = 'NONE'
import unittestX as unittest
class TestCase(unittest.TestCase):
def test1(self):
'mccomponents.sample.samplecomponent: SQkernel'
# The kernel spec is in sampleassemblies/V-sqkernel/V-scatterer.xml
# It is a flat kernel S(Q)=1.
# So the simulation result should have a flat S(Q) too.
# The following code run a simulation with
# (1) monochromatic source (2) sample (3) IQE_monitor
# After the simulation, it test the S(Q) by
# (1) do a manual "reduction" using the simulated scattered neutrons, and
# (2) examine the monitor data
import mcni
from mcni.utils import conversion
# instrument
# 1. source
from mcni.components.MonochromaticSource import MonochromaticSource
ei = 60.
vil = conversion.e2v(ei)
vi = (0,0,vil)
neutron = mcni.neutron(r = (0,0,0), v = vi, time = 0, prob = 1 )
component1 = MonochromaticSource('source', neutron)
# 2. sample
from mccomponents.sample import samplecomponent
component2 = samplecomponent( 'sample', 'sampleassemblies/V-sqkernel/sampleassembly.xml' )
# 3. monitor
import mcstas2
component3 = mcstas2.componentfactory('monitors', 'IQE_monitor')(
name='monitor', Ei=ei, Qmin=0, Qmax=8., Emin=-10., Emax=10., nQ=20, nE=20)
# instrument and geometer
instrument = mcni.instrument( [component1, component2, component3] )
geometer = mcni.geometer()
geometer.register( component1, (0,0,0), (0,0,0) )
geometer.register( component2, (0,0,1), (0,0,0) )
geometer.register( component3, (0,0,1), (0,0,0) )
# neutron buffer
N0 = 10000
neutrons = mcni.neutron_buffer(N0)
#
# simulate
import mcni.SimulationContext
workdir = "tmp.SQkernel"
if os.path.exists(workdir):
import shutil; shutil.rmtree(workdir)
sim_context = mcni.SimulationContext.SimulationContext(outputdir=workdir)
mcni.simulate( instrument, geometer, neutrons, context=sim_context )
#
# check 1: directly calculate I(Q) from neutron buffer
from mcni.neutron_storage import neutrons_as_npyarr
narr = neutrons_as_npyarr(neutrons); narr.shape = N0, 10
v = narr[:, 3:6]; p = narr[:, 9]
delta_v_vec = -v + vi; delta_v = np.linalg.norm(delta_v_vec, axis=-1)
Q = conversion.V2K * delta_v
I, qbb = np.histogram(Q, 20, weights=p)
qbc = (qbb[1:] + qbb[:-1])/2
I=I/qbc; I/=np.mean(I)
self.assertTrue(1.0*np.isclose(I, 1., atol=0.1).size/I.size>0.9)
#
# check 2: use data in IQE monitor
import histogram.hdf as hh
iqe = hh.load(os.path.join(workdir, 'stepNone', 'iqe_monitor.h5'))
iq = iqe.sum('energy')
Q = iq.Q; I = iq.I
I0 = np.mean(I); I/=I0
# check that most of the intensity is similar to I0
self.assertTrue(1.0*np.isclose(I, 1., atol=0.1).size/I.size>0.9)
return
pass # end of TestCase
if __name__ == "__main__": unittest.main()
# End of file
| 35.728261 | 98 | 0.606632 |
c973d138beb4bdeb8b96079770c98d55a9dad08e | 693 | py | Python | app/ZeroKnowledge/bbs.py | MilkyBoat/AttriChain | ad3a7e5cc58e4add21ffd289d925f73e3367210b | [
"MIT"
] | 5 | 2020-07-10T21:00:28.000Z | 2022-02-23T01:41:01.000Z | app/ZeroKnowledge/bbs.py | MilkyBoat/AttriChain | ad3a7e5cc58e4add21ffd289d925f73e3367210b | [
"MIT"
] | null | null | null | app/ZeroKnowledge/bbs.py | MilkyBoat/AttriChain | ad3a7e5cc58e4add21ffd289d925f73e3367210b | [
"MIT"
] | 4 | 2020-09-13T14:31:45.000Z | 2022-03-23T04:06:38.000Z | from ZeroKnowledge import primality
import random
def goodPrime(p):
return p % 4 == 3 and primality.probablyPrime(p, accuracy=100)
def findGoodPrime(numBits=512):
candidate = 1
while not goodPrime(candidate):
candidate = random.getrandbits(numBits)
return candidate
def makeModulus(numBits=512):
return findGoodPrime(numBits) * findGoodPrime(numBits)
def parity(n):
return sum(int(x) for x in bin(n)[2:]) % 2
def bbs(modulusLength=512):
modulus = makeModulus(numBits=modulusLength)
def f(inputInt):
return pow(inputInt, 2, modulus)
return f
if __name__ == "__main__":
owp = bbs()
print(owp(70203203))
print(owp(12389))
| 21 | 66 | 0.685426 |
c9743d63b6769b341831d17f36b94f9161097eb4 | 5,811 | py | Python | differannotate/datastructures.py | zyndagj/differannotate | c73d9df5f82f1cf97340235265a368b16da9c89b | [
"BSD-3-Clause"
] | null | null | null | differannotate/datastructures.py | zyndagj/differannotate | c73d9df5f82f1cf97340235265a368b16da9c89b | [
"BSD-3-Clause"
] | null | null | null | differannotate/datastructures.py | zyndagj/differannotate | c73d9df5f82f1cf97340235265a368b16da9c89b | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
###############################################################################
# Author: Greg Zynda
# Last Modified: 12/11/2019
###############################################################################
# BSD 3-Clause License
#
# Copyright (c) 2019, Greg Zynda
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
from quicksect import IntervalTree
import logging
from differannotate.constants import FORMAT
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.WARN, format=FORMAT)
class dict_index(dict):
'''
A modified dictionary class meant to store and increment unique values
IDs as values are retrieved from keys.
# Usage
>>> DI = dict_index()
>>> DI['cat']
0
>>> DI['bear']
1
>>> DI['cat']
0
>>> DI['cat'] = 10
>>> DI['cat']
0
>>> DI.getkey(0)
'cat'
>>> DI.getkey(1)
'bear'
>>> DI.getkey(2)
Traceback (most recent call last):
...
KeyError: 2
>>> DI.getkey('dog')
Traceback (most recent call last):
...
TypeError: dog
'''
def __init__(self):
super(dict_index,self).__init__()
self.cur = 0
def __getitem__(self, key):
try:
return super(dict_index,self).__getitem__(key)
except:
super(dict_index,self).__setitem__(key, self.cur)
self.cur += 1
return super(dict_index,self).__getitem__(key)
def __setitem__(self, key, value):
pass
def getkey(self, val):
'''
# Parameters
val (int): Should be < len(dict_index)
# Raises
TypeError: if val is not an integer
KeyError: if val does not exist as a value in the dict_index
'''
if not isinstance(val, int):
raise TypeError(val)
if val >= self.cur:
raise KeyError(val)
keys = super(dict_index,self).keys()
vals = super(dict_index,self).values()
return keys[vals.index(val)]
class iterit(IntervalTree):
def __init__(self):
super(iterit,self).__init__()
self.min = None
self.max = None
self.set_cache = {}
def add(self, start, end, other=None):
if self.min == None:
self.min = start
self.max = end
else:
if start < self.min:
self.min = start
if end > self.max:
self.max = end
super(iterit,self).add(start, end, other)
def iterintervals(self):
return super(iterit,self).search(self.min, self.max)
def iifilter(self, eid, col, strand=False):
'''
>>> IT = iterit()
>>> IT.add(0, 10, (0, 0))
>>> IT.add(5, 15, (1, 1))
>>> IT.add(10, 20, (1, 2))
>>> ret = IT.iifilter(1,0)
>>> len(ret)
2
>>> for i in map(interval2tuple, ret): print i
(5, 15, 1, 1)
(10, 20, 1, 2)
'''
assert(col >= 1)
if _strand(strand):
sid = _get_strand(strand)
return list(filter(lambda x: len(x.data) > col and x.data[col] == eid and x.data[0] == sid, self.iterintervals()))
else:
return list(filter(lambda x: len(x.data) > col and x.data[col] == eid, self.iterintervals()))
def searchfilter(self, start, end, eid, col, strand=False):
assert(col >= 1)
if _strand(strand):
sid = _get_strand(strand)
return list(filter(lambda x: len(x.data) > col and x.data[col] == eid and x.data[0] == sid, super(iterit,self).search(start, end)))
else:
return list(filter(lambda x: len(x.data) > col and x.data[col] == eid, super(iterit,self).search(start, end)))
def to_set(self,eid=False, col=False, strand=False):
cache_name = (eid, col, strand)
if cache_name in self.set_cache:
return self.set_cache[cache_name].copy()
if eid or col or strand:
ret = set(map(interval2tuple, self.iifilter(eid, col, strand)))
else:
ret = set(map(interval2tuple, self.iterintervals()))
self.set_cache[cache_name] = ret
return ret.copy()
def _strand(strand):
return not isinstance(strand, bool)
strand_dict = {'+':0, '-':1, 0:'+', 1:'-'}
def _get_strand(strand):
if isinstance(strand, int):
return strand
elif isinstance(strand, str):
return strand_dict[strand]
else:
raise ValueError(strand)
def interval2tuple(interval):
'''
Converts an interval to a tuple
# Usage
>>> IT = iterit()
>>> IT.add(0, 10, (0, 0))
>>> IT.add(5, 15, (1, 1))
>>> for i in map(interval2tuple, IT.iterintervals()): print i
(0, 10, 0, 0)
(5, 15, 1, 1)
'''
if interval.data:
return (interval.start, interval.end)+tuple(interval.data)
else:
return (interval.start, interval.end)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30.584211 | 134 | 0.66202 |
c9743e069ad8fe0a795c53358dc5e0951de0d7c7 | 2,113 | py | Python | examples/regional_constant_preservation/plotCurve.py | schoonovernumerics/FEOTs | d8bf24d0e0c23a9ee65e2be6a75f5dbc83d3e5ad | [
"BSD-3-Clause"
] | null | null | null | examples/regional_constant_preservation/plotCurve.py | schoonovernumerics/FEOTs | d8bf24d0e0c23a9ee65e2be6a75f5dbc83d3e5ad | [
"BSD-3-Clause"
] | 13 | 2017-08-03T22:30:25.000Z | 2019-01-23T16:32:28.000Z | examples/regional_constant_preservation/plotCurve.py | schoonovernumerics/FEOTS | d8bf24d0e0c23a9ee65e2be6a75f5dbc83d3e5ad | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python3
DOC="""plotCurve
plotCurve is used to create vertical profiles of different lateral ylabel statistics of FEOTS output.
Usage:
plotCurve plot <file> [--out=<out>] [--opts=<opts>] [--scalex=<scalex>] [--xlabel=<xlabel>] [--ylabel=<ylabel>]
Commands:
plot Create a vertical profile plot of the chosen statistics for the given FEOTS output ylabel.
Options:
-h --help Display this help screen
--out=<out> The path to place the output files [default: ./]
--opts=<opts> Comma separated list of plot options. [default: none]
--scalex=<scalex> Amount to scale the x dimension by for the plot (multiplicative). [default: 1.0]
--xlabel=<xlabel> Label for the x-dimension in the plot. [default: x]
--ylabel=<ylabel> Label for the y-dimension in the plot. [default: y]
"""
import numpy as np
from matplotlib import pyplot as plt
from docopt import docopt
import feotsPostProcess as feots
def parse_cli():
args = docopt(DOC,version='plotCurve 0.0.0')
return args
#END parse_cli
def loadCurve(filename):
curveData = np.loadtxt(filename, delimiter=",", skiprows=1)
return curveData
#END loadCurve
def plotCurve(curveData, opts, scalex, xlabel, ylabel, plotfile):
f, ax = plt.subplots()
ax.fillbetween(curveData[:,0]*scalex,curveData[:,1], color=(0.8,0.8,0.8,0.8))
ax.plot(curveData[:,0]*scalex, curveData[:,1], marker='', color='black', linewidth=2)
if 'logx' in opts:
ax.set(xscale='log')
if 'logy' in opts:
ax.set(yscale='log')
ax.grid(color='gray', linestyle='-', linewidth=1)
ax.set(xlabel=xlabel, ylabel=ylabel)
f.savefig(plotfile)
plt.close('all')
#END plotCurve
def main():
args = parse_cli()
if args['plot'] :
xlabel = args['--xlabel']
scalex = args['--scalex']
ylabel = args['--ylabel']
opts = args['--opts'].split(',')
curveData = loadCurve(args['<file>'])
outFile = args['--out']
plotCurve(curveData, opts, scalex, xlabel, ylabel, outFile)
#END main
if __name__ == '__main__':
main()
| 26.08642 | 114 | 0.644108 |
c974860e7717afdaa174abddb3959a9916ac8f90 | 6,535 | py | Python | statefun-examples/statefun-python-walkthrough-example/walkthrough_pb2.py | authuir/flink-statefun | ca16055de31737a8a0073b8f9083268fc24b9828 | [
"Apache-2.0"
] | 1 | 2020-05-27T03:38:36.000Z | 2020-05-27T03:38:36.000Z | statefun-examples/statefun-python-walkthrough-example/walkthrough_pb2.py | authuir/flink-statefun | ca16055de31737a8a0073b8f9083268fc24b9828 | [
"Apache-2.0"
] | null | null | null | statefun-examples/statefun-python-walkthrough-example/walkthrough_pb2.py | authuir/flink-statefun | ca16055de31737a8a0073b8f9083268fc24b9828 | [
"Apache-2.0"
] | null | null | null | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: walkthrough.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='walkthrough.proto',
package='walkthrough',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x11walkthrough.proto\x12\x0bwalkthrough\"\x16\n\x05Hello\x12\r\n\x05world\x18\x01 \x01(\t\"\x0e\n\x0c\x41notherHello\"\x18\n\x07\x43ounter\x12\r\n\x05value\x18\x01 \x01(\x03\"\x1d\n\nHelloReply\x12\x0f\n\x07message\x18\x01 \x01(\t\"\x07\n\x05\x45ventb\x06proto3')
)
_HELLO = _descriptor.Descriptor(
name='Hello',
full_name='walkthrough.Hello',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='world', full_name='walkthrough.Hello.world', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=34,
serialized_end=56,
)
_ANOTHERHELLO = _descriptor.Descriptor(
name='AnotherHello',
full_name='walkthrough.AnotherHello',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=58,
serialized_end=72,
)
_COUNTER = _descriptor.Descriptor(
name='Counter',
full_name='walkthrough.Counter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='walkthrough.Counter.value', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=74,
serialized_end=98,
)
_HELLOREPLY = _descriptor.Descriptor(
name='HelloReply',
full_name='walkthrough.HelloReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='message', full_name='walkthrough.HelloReply.message', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=100,
serialized_end=129,
)
_EVENT = _descriptor.Descriptor(
name='Event',
full_name='walkthrough.Event',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=131,
serialized_end=138,
)
DESCRIPTOR.message_types_by_name['Hello'] = _HELLO
DESCRIPTOR.message_types_by_name['AnotherHello'] = _ANOTHERHELLO
DESCRIPTOR.message_types_by_name['Counter'] = _COUNTER
DESCRIPTOR.message_types_by_name['HelloReply'] = _HELLOREPLY
DESCRIPTOR.message_types_by_name['Event'] = _EVENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Hello = _reflection.GeneratedProtocolMessageType('Hello', (_message.Message,), dict(
DESCRIPTOR = _HELLO,
__module__ = 'walkthrough_pb2'
# @@protoc_insertion_point(class_scope:walkthrough.Hello)
))
_sym_db.RegisterMessage(Hello)
AnotherHello = _reflection.GeneratedProtocolMessageType('AnotherHello', (_message.Message,), dict(
DESCRIPTOR = _ANOTHERHELLO,
__module__ = 'walkthrough_pb2'
# @@protoc_insertion_point(class_scope:walkthrough.AnotherHello)
))
_sym_db.RegisterMessage(AnotherHello)
Counter = _reflection.GeneratedProtocolMessageType('Counter', (_message.Message,), dict(
DESCRIPTOR = _COUNTER,
__module__ = 'walkthrough_pb2'
# @@protoc_insertion_point(class_scope:walkthrough.Counter)
))
_sym_db.RegisterMessage(Counter)
HelloReply = _reflection.GeneratedProtocolMessageType('HelloReply', (_message.Message,), dict(
DESCRIPTOR = _HELLOREPLY,
__module__ = 'walkthrough_pb2'
# @@protoc_insertion_point(class_scope:walkthrough.HelloReply)
))
_sym_db.RegisterMessage(HelloReply)
Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), dict(
DESCRIPTOR = _EVENT,
__module__ = 'walkthrough_pb2'
# @@protoc_insertion_point(class_scope:walkthrough.Event)
))
_sym_db.RegisterMessage(Event)
# @@protoc_insertion_point(module_scope)
| 28.413043 | 286 | 0.72303 |
c9778ad426ae5b59849224563d916aed7af67c6a | 2,438 | py | Python | dnsdetect.py | bhaveshgoyal/DNSpoPy | 978beae9028b6122f1d9c1e7316e630ed466a628 | [
"MIT"
] | 3 | 2022-01-03T12:10:41.000Z | 2022-03-21T22:14:51.000Z | dnsdetect.py | bhaveshgoyal/DNSpoPy | 978beae9028b6122f1d9c1e7316e630ed466a628 | [
"MIT"
] | null | null | null | dnsdetect.py | bhaveshgoyal/DNSpoPy | 978beae9028b6122f1d9c1e7316e630ed466a628 | [
"MIT"
] | null | null | null | #import pcap
#import dpkt
#import dnet
from collections import defaultdict
from scapy.all import *
from scapy.all import send as ssend
import netifaces
import getopt
import datetime
conf.sniff_promisc=True
pcap_specified = False
detection_map = defaultdict(list)
def detect_poison(pkt):
global pcap_specified
if IP in pkt:
ip_src = pkt[IP].src
ip_dst = pkt[IP].dst
if pkt.haslayer(DNSRR) and len(pkt[Ether]) > 60 and len(pkt[UDP]) > 8:
key = str(pkt[DNS].id) + str(pkt[DNS].qd.qname) + str(pkt[IP].sport) + ">" + str(pkt[IP].dst) + ":" + str(pkt[IP].dport)
if key in detection_map.keys() and str(pkt[IP].payload is not detection_map[key][0]):
date = datetime.datetime.fromtimestamp(pkt.time)
print str(date) + " DNS Poisoning attempt"
print "TXID 0x" + str(pkt[DNS].id) + " Request " + str(pkt[DNS].qd.qname)
print "Answer 1 ",
list_a1 = []
for i in range(pkt[DNS].ancount):
dnsrr = pkt[DNS].an[i]
list_a1.append(dnsrr.rdata)
print list_a1
print "Answer 2",
if len(detection_map[key]) > 2:
print detection_map[key][2:]
else:
print detection_map[key][1]
print "\n"
else:
detection_map[key] = [str(pkt[IP].payload), "Non A type Response"]
for i in range(pkt[DNS].ancount):
dnsrr = pkt[DNS].an[i]
detection_map[key].append(str(dnsrr.rdata))
def main():
global pcap_specified
interface = netifaces.gateways()['default'][netifaces.AF_INET][1]
try:
opt, exp = getopt.getopt(sys.argv[1:], "i:r:", ["interface", "tracefile"])
except getopt.GetoptError as err:
print "DNSpoPy: Usage Error:",
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
for o, a in opt:
if o in ("-i", "--interface"):
interface = a
print "interface: " + a
elif o in ("-r", "--tracefile"):
pcap_specified = True
print "Reading from Tracefile: " + a + "\n"
trace_file = a
else:
assert False, "Option not recognized"
fexp = 'port 53'
if len(exp) > 0:
fexp += ' and ' + ' '.join(exp)
print "Detecting poisoning attempts on interface: " + str(interface)
try:
if pcap_specified:
sniff(offline= trace_file, filter = fexp, prn = detect_poison, store = 0)
else:
sniff(iface = str(interface), filter = fexp, prn = detect_poison, store = 0)
except:
print "DNSpoPy: Something went wrong while sniffing packets"
return
if __name__ == "__main__":
main()
| 29.731707 | 123 | 0.651354 |
c977bbeabde9764661a77f5cb005a889127439bd | 534 | py | Python | yeti/core/entities/malware.py | Darkheir/TibetanBrownBear | c3843daa4f84730e733c2dde1cda7739e6cdad8e | [
"Apache-2.0"
] | 9 | 2018-01-15T22:44:24.000Z | 2021-05-28T11:13:03.000Z | yeti/core/entities/malware.py | Darkheir/TibetanBrownBear | c3843daa4f84730e733c2dde1cda7739e6cdad8e | [
"Apache-2.0"
] | 140 | 2018-01-12T10:07:47.000Z | 2021-08-02T23:03:49.000Z | yeti/core/entities/malware.py | Darkheir/TibetanBrownBear | c3843daa4f84730e733c2dde1cda7739e6cdad8e | [
"Apache-2.0"
] | 11 | 2018-01-16T19:49:35.000Z | 2022-01-18T16:30:34.000Z | """Detail Yeti's Malware object structure."""
from .entity import Entity
class Malware(Entity):
"""Malware Yeti object.
Extends the Malware STIX2 definition.
"""
_collection_name = 'entities'
type = 'malware'
@property
def name(self):
return self._stix_object.name
@property
def description(self):
return self._stix_object.description
@property
def kill_chain_phases(self):
return self._stix_object.kill_chain_phases
Entity.datatypes[Malware.type] = Malware
| 19.777778 | 50 | 0.683521 |
c978b614564b15ad98ff9be9b231eda20bb8f13d | 6,405 | py | Python | python/dsbox/template/template_files/loaded/SRIClassificationTemplate.py | usc-isi-i2/dsbox-ta2 | 85e0e8f5bbda052fa77cb98f4eef1f4b50909fd2 | [
"MIT"
] | 7 | 2018-05-10T22:19:44.000Z | 2020-07-21T07:28:39.000Z | python/dsbox/template/template_files/loaded/SRIClassificationTemplate.py | usc-isi-i2/dsbox-ta2 | 85e0e8f5bbda052fa77cb98f4eef1f4b50909fd2 | [
"MIT"
] | 187 | 2018-04-13T17:19:24.000Z | 2020-04-21T00:41:15.000Z | python/dsbox/template/template_files/loaded/SRIClassificationTemplate.py | usc-isi-i2/dsbox-ta2 | 85e0e8f5bbda052fa77cb98f4eef1f4b50909fd2 | [
"MIT"
] | 7 | 2018-07-10T00:14:07.000Z | 2019-07-25T17:59:44.000Z | from dsbox.template.template import DSBoxTemplate
from d3m.metadata.problem import TaskKeyword
from dsbox.template.template_steps import TemplateSteps
from dsbox.schema import SpecializedProblem
import typing
import numpy as np # type: ignore
class SRIClassificationTemplate(DSBoxTemplate):
def __init__(self):
DSBoxTemplate.__init__(self)
self.template = {
"weight": 30,
"name": "SRI_classification_template",
"taskSubtype": {TaskKeyword.VERTEX_CLASSIFICATION.name},
"taskType": {TaskKeyword.VERTEX_CLASSIFICATION.name},
# "taskType": {TaskKeyword.VERTEX_CLASSIFICATION.name, TaskKeyword.COMMUNITY_DETECTION.name, TaskKeyword.LINK_PREDICTION.name, TaskKeyword.TIME_SERIES.name},
# "taskSubtype": {"NONE", TaskKeyword.NONOVERLAPPING.name, TaskKeyword.OVERLAPPING.name, TaskKeyword.MULTICLASS.name, TaskKeyword.BINARY.name, TaskKeyword.MULTILABEL.name, TaskKeyword.MULTIVARIATE.name, TaskKeyword.UNIVARIATE.name, TaskKeyword.TIME_SERIES.name},
#"inputType": "table",
"inputType": {"edgeList", "graph", "table"},
"output": "prediction_step",
"steps": [
{
"name": "text_reader_step",
"primitives": ["d3m.primitives.data_preprocessing.dataset_text_reader.DatasetTextReader"],
"inputs": ["template_input"]
},
{
"name": "denormalize_step",
"primitives": ["d3m.primitives.data_transformation.denormalize.Common"],
"inputs": ["text_reader_step"]
},
{
"name": "to_dataframe_step",
"primitives": ["d3m.primitives.data_transformation.dataset_to_dataframe.Common"],
"inputs": ["denormalize_step"]
},
{
"name": "common_profiler_step",
"primitives": ["d3m.primitives.schema_discovery.profiler.Common"],
"inputs": ["to_dataframe_step"]
},
{
"name": "parser_step",
"primitives": ["d3m.primitives.data_transformation.column_parser.Common"],
"inputs": ["common_profiler_step"]
},
{
"name": "pre_extract_target_step",
"primitives": [{
"primitive": "d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common",
"hyperparameters":
{
'semantic_types': ('https://metadata.datadrivendiscovery.org/types/TrueTarget',),
'use_columns': (),
'exclude_columns': ()
}
}],
"inputs": ["parser_step"]
},
{
"name": "extract_target_step",
"primitives": ["d3m.primitives.data_transformation.simple_column_parser.DataFrameCommon"],
"inputs": ["pre_extract_target_step"]
},
{
"name": "extract_attribute_step",
"primitives": [{
"primitive": "d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common",
"hyperparameters":
{
'semantic_types': ('https://metadata.datadrivendiscovery.org/types/Attribute',),
}
}],
"inputs": ["parser_step"]
},
{
"name": "data_conditioner_step",
"primitives": [{
"primitive": "d3m.primitives.data_transformation.conditioner.Conditioner",
"hyperparameters":
{
"ensure_numeric":[True],
"maximum_expansion": [30]
}
}],
"inputs": ["extract_attribute_step"]
},
{
"name": "model_step",
"primitives": [
{
"primitive": "d3m.primitives.classification.bernoulli_naive_bayes.SKlearn",
"hyperparameters": {
'alpha': [0.1, 1.0],
'binarize': [0.0],
'fit_prior': [False],
'return_result': ["new"],
'use_semantic_types': [False],
'add_index_columns': [False],
'error_on_no_input':[True],
}
},
{
"primitive": "d3m.primitives.regression.gradient_boosting.SKlearn",
"hyperparameters": {
'max_depth': [5, 8],
'learning_rate': [0.3, 0.5],
'min_samples_split': [2, 3, 6],
'min_samples_leaf': [1, 2],
'criterion': ["mse"],
'n_estimators': [100, 150],
'fit_prior': [False],
'return_result': ["new"],
'use_semantic_types': [False],
'add_index_columns': [False],
'error_on_no_input':[True],
}
},
{"primitive": "d3m.primitives.classification.random_forest.SKlearn"
}
],
"inputs": ["extract_attribute_step2", "extract_target_step"]
},
{
"name": "prediction_step",
"primitives": ["d3m.primitives.data_transformation.construct_predictions.Common"],
"inputs": ["model_step", "to_dataframe_step"]
}
]
}
| 48.157895 | 274 | 0.444653 |
c978cd7b9db932291bd60fddc562ff295cb80fc4 | 192 | py | Python | beecrowd exercises/beecrowd-1019.py | pachecosamuel/Python-Exercises | de542536dd1a2bc0ad27e81824713cda8ad34054 | [
"MIT"
] | null | null | null | beecrowd exercises/beecrowd-1019.py | pachecosamuel/Python-Exercises | de542536dd1a2bc0ad27e81824713cda8ad34054 | [
"MIT"
] | null | null | null | beecrowd exercises/beecrowd-1019.py | pachecosamuel/Python-Exercises | de542536dd1a2bc0ad27e81824713cda8ad34054 | [
"MIT"
] | null | null | null | time = eval(input())
qtdtime = [3600, 60, 1]
result = []
for i in qtdtime:
qtd = time // i
result.append(str(qtd))
time -= qtd * i
print(f'{result[0]}:{result[1]}:{result[2]}')
| 16 | 45 | 0.557292 |
c979df9649b375b708736b82938ddd72a6f161b7 | 161 | py | Python | Retired/How many times mentioned.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
] | 6 | 2020-09-03T09:32:25.000Z | 2020-12-07T04:10:01.000Z | Retired/How many times mentioned.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
] | 1 | 2021-12-13T15:30:21.000Z | 2021-12-13T15:30:21.000Z | Retired/How many times mentioned.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
] | null | null | null | from collections import Counter
def count_mentioned(con,names):
con=con.lower()
res=[con.count(i.lower()) for i in names]
return res if res else None | 32.2 | 45 | 0.714286 |
c97a5d77ecd44aba596f1a6d89d78783ed1f6a39 | 5,458 | py | Python | bigorm/database.py | AnthonyPerez/bigorm | 67ecdbb1f99cd5c8ec2ca24c7ba5f5dbed7493bb | [
"MIT"
] | null | null | null | bigorm/database.py | AnthonyPerez/bigorm | 67ecdbb1f99cd5c8ec2ca24c7ba5f5dbed7493bb | [
"MIT"
] | 3 | 2020-04-06T19:13:58.000Z | 2020-05-22T22:21:31.000Z | bigorm/database.py | AnthonyPerez/bigorm | 67ecdbb1f99cd5c8ec2ca24c7ba5f5dbed7493bb | [
"MIT"
] | null | null | null | import threading
import functools
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class DatabaseContextError(RuntimeError):
pass
"""
Once an engine is created is is not destroyed until the program itself exits.
Engines are used to produce a new session when a context is entered.
When a context is exited, the session for that context is destroyed.
"""
global_database_context = threading.local()
class DatabaseContext(object):
"""
This is fairly complicated. Follow these rules:
1) Do not create threads in a DatabaseConext. If you
do you will lose the context.
2) With async/await asychronous programming,
enter contexts in atmotic blocks (do not await in a context).
Usage:
with DatabaseContext():
"""
@classmethod
def __get_engines(_):
if not hasattr(global_database_context, 'engines'):
global_database_context.engines = {}
return global_database_context.engines
@classmethod
def __get_sessions(_):
if not hasattr(global_database_context, 'sessions'):
global_database_context.sessions = []
return global_database_context.sessions
@classmethod
def get_session(_):
sessions = DatabaseContext.__get_sessions()
if len(sessions) == 0:
raise DatabaseContextError('Session not established, did you create a DatabaseContext?')
_, session = sessions[-1]
return session
@classmethod
def get_engine(_):
sessions = DatabaseContext.__get_sessions()
if len(sessions) == 0:
raise DatabaseContextError('Session not established, did you create a DatabaseContext?')
engine, _ = sessions[-1]
return engine
@classmethod
def is_in_context(_):
sessions = DatabaseContext.__get_sessions()
return len(sessions) > 0
def __init__(self, *args, **kwargs):
"""
All arguments are forwarded to create_engine
"""
self.args = args
self.kwargs = kwargs
def __enter__(self):
key = (tuple(self.args), tuple(sorted(list(self.kwargs.items()))))
engine, Session = DatabaseContext.__get_engines().get(key, (None, None))
if engine is None:
engine = sqlalchemy.create_engine(
*self.args,
**self.kwargs
)
Session = sqlalchemy.orm.sessionmaker(bind=engine)
DatabaseContext.__get_engines()[key] = (engine, Session)
new_session = Session()
DatabaseContext.__get_sessions().append(
(engine, new_session)
)
def __exit__(self, exception_type, exception_value, traceback):
_, session = DatabaseContext.__get_sessions().pop()
try:
if exception_type is not None:
# There was an exception, roll back.
session.rollback()
finally:
session.close()
class BigQueryDatabaseContext(DatabaseContext):
def __init__(self, project='', default_dataset='', **kwargs):
"""
Args:
project (Optional[str]): The project name, defaults to
your credential's default project.
default_dataset (Optional[str]): The default dataset.
This is used in the case where the table has no
dataset referenced in it's __tablename__
**kwargs (kwargs): Keyword arguments are passed to create_engine.
Example:
'bigquery://some-project/some-dataset' '?'
'credentials_path=/some/path/to.json' '&'
'location=some-location' '&'
'arraysize=1000' '&'
'clustering_fields=a,b,c' '&'
'create_disposition=CREATE_IF_NEEDED' '&'
'destination=different-project.different-dataset.table' '&'
'destination_encryption_configuration=some-configuration' '&'
'dry_run=true' '&'
'labels=a:b,c:d' '&'
'maximum_bytes_billed=1000' '&'
'priority=INTERACTIVE' '&'
'schema_update_options=ALLOW_FIELD_ADDITION,ALLOW_FIELD_RELAXATION' '&'
'use_query_cache=true' '&'
'write_disposition=WRITE_APPEND'
These keyword arguments match those in the job configuration:
https://googleapis.github.io/google-cloud-python/latest/bigquery/generated/google.cloud.bigquery.job.QueryJobConfig.html#google.cloud.bigquery.job.QueryJobConfig
"""
connection_str = 'bigquery://{}/{}'.format(project, default_dataset)
if len(kwargs) > 0:
connection_str += '?'
for k, v in kwargs.items():
connection_str += '{}={}&'.format(k, v)
connection_str = connection_str[:-1]
super(BigQueryDatabaseContext, self).__init__(
connection_str
)
def requires_database_context(f):
"""
Dectorator that causes the function
to throw a DatabaseContextError if the function is called
but a DatabaseContext has not been entered.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
if not DatabaseContext.is_in_context():
raise DatabaseContextError('Session not established, did you create a DatabaseContext?')
return f(*args, **kwargs)
return wrapper
| 34.327044 | 177 | 0.622206 |
c97adf2252f256379d95e93fa38af238ed647da0 | 114 | py | Python | Leetcode/1323. Maximum 69 Number/solution1.py | asanoviskhak/Outtalent | c500e8ad498f76d57eb87a9776a04af7bdda913d | [
"MIT"
] | 51 | 2020-07-12T21:27:47.000Z | 2022-02-11T19:25:36.000Z | Leetcode/1323. Maximum 69 Number/solution1.py | CrazySquirrel/Outtalent | 8a10b23335d8e9f080e5c39715b38bcc2916ff00 | [
"MIT"
] | null | null | null | Leetcode/1323. Maximum 69 Number/solution1.py | CrazySquirrel/Outtalent | 8a10b23335d8e9f080e5c39715b38bcc2916ff00 | [
"MIT"
] | 32 | 2020-07-27T13:54:24.000Z | 2021-12-25T18:12:50.000Z | class Solution:
def maximum69Number(self, num: int) -> int:
return int(str(num).replace('6', '9', 1))
| 28.5 | 49 | 0.605263 |
c97aeafdeaa32ce81d91fe53e55f4082c9dd290e | 444 | py | Python | src/rover/project/code/decision.py | juancruzgassoloncan/Udacity-Robo-nanodegree | 7621360ce05faf90660989e9d28f56da083246c9 | [
"MIT"
] | 1 | 2020-12-28T13:58:34.000Z | 2020-12-28T13:58:34.000Z | src/rover/project/code/decision.py | juancruzgassoloncan/Udacity-Robo-nanodegree | 7621360ce05faf90660989e9d28f56da083246c9 | [
"MIT"
] | null | null | null | src/rover/project/code/decision.py | juancruzgassoloncan/Udacity-Robo-nanodegree | 7621360ce05faf90660989e9d28f56da083246c9 | [
"MIT"
] | null | null | null | import numpy as np
from rover_sates import *
from state_machine import *
# This is where you can build a decision tree for determining throttle, brake and steer
# commands based on the output of the perception_step() function
def decision_step(Rover, machine):
if Rover.nav_angles is not None:
machine.run()
else:
Rover.throttle = Rover.throttle_set
Rover.steer = 0
Rover.brake = 0
return Rover
| 23.368421 | 87 | 0.702703 |
c97ce1f34312b0218b91e4e2faa6b094d0a6ab72 | 188 | py | Python | iotbot/logger.py | li7yue/python--iotbot | ca721b795114202114a4eb355d20f9ecfd9b8901 | [
"MIT"
] | 1 | 2020-10-05T01:09:15.000Z | 2020-10-05T01:09:15.000Z | iotbot/logger.py | li7yue/python--iotbot | ca721b795114202114a4eb355d20f9ecfd9b8901 | [
"MIT"
] | null | null | null | iotbot/logger.py | li7yue/python--iotbot | ca721b795114202114a4eb355d20f9ecfd9b8901 | [
"MIT"
] | null | null | null | import sys
from loguru import logger
logger.remove()
logger.add(
sys.stdout,
format='{level.icon} {time:YYYY-MM-DD HH:mm:ss} <lvl>{level}\t{message}</lvl>',
colorize=True,
)
| 17.090909 | 83 | 0.664894 |
c97d3cc7b903e622320da5991308503b0ba6a84c | 1,770 | py | Python | cloudcafe/networking/lbaas/common/types.py | rcbops-qa/cloudcafe | d937f85496aadafbb94a330b9adb8ea18bee79ba | [
"Apache-2.0"
] | null | null | null | cloudcafe/networking/lbaas/common/types.py | rcbops-qa/cloudcafe | d937f85496aadafbb94a330b9adb8ea18bee79ba | [
"Apache-2.0"
] | null | null | null | cloudcafe/networking/lbaas/common/types.py | rcbops-qa/cloudcafe | d937f85496aadafbb94a330b9adb8ea18bee79ba | [
"Apache-2.0"
] | 1 | 2020-04-13T17:44:28.000Z | 2020-04-13T17:44:28.000Z | """
Copyright 2013 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class LBaaSStatusTypes(object):
"""
@summary: Types dictating an individual Status for an LBaaS entity
@cvar DEFERRED: An entity has been created but is not yet linked to a
load balancer. This is not a functioning state.
@type DEFERRED: C{str}
@cvar PENDING_CREATE: An entity is being created, but it is not yet
functioning.
@type PENDING_CREATE: C{str}
@cvar PENDING_UPDATE: An entity has been updated. It remains in a
functioning state.
@type PENDING_UPDATE: C{str}
@cvar PENDING_DELETE: An entity is in the process of being deleted.
@type PENDING_DELETE: C{str}
@cvar ACTIVE: An entity is in a normal functioning state.
@type ACTIVE: C{str}
@cvar INACTIVE: Applies to members that fail health checks.
@type INACTIVE: C{str}
@cvar ERROR: Something has gone wrong. This may be either a functioning
or non-functioning state.
@type ERROR: C{str}
@note: This is essentially an Enumerated Type
"""
DEFERRED = "DEFERRED"
PENDING_CREATE = "PENDING_CREATE"
PENDING_UPDATE = "PENDING_UPDATE"
PENDING_DELETE = "PENDING_DELETE"
ACTIVE = "ACTIVE"
INACTIVE = "INACTIVE"
ERROR = "ERROR"
| 36.875 | 76 | 0.716949 |
c97d6ba493e05a165ce59471439dfde7e1eb3a10 | 2,953 | py | Python | utils.py | sthagen/example-app-report | dedb70755debfbe959d00515b101314dfeed6ec1 | [
"MIT"
] | 1 | 2021-09-05T18:12:27.000Z | 2021-09-05T18:12:27.000Z | utils.py | sthagen/example-app-report | dedb70755debfbe959d00515b101314dfeed6ec1 | [
"MIT"
] | null | null | null | utils.py | sthagen/example-app-report | dedb70755debfbe959d00515b101314dfeed6ec1 | [
"MIT"
] | null | null | null | import os
from dash import dcc, html
URL_PATH_SEP = '/'
URL_BASE_PATHNAME = os.getenv('REPORT_URL_BASE', URL_PATH_SEP)
if URL_BASE_PATHNAME[-1] != URL_PATH_SEP:
URL_BASE_PATHNAME += URL_PATH_SEP
def Header(app):
return html.Div([get_header(app), html.Br([]), get_menu()])
def get_header(app):
header = html.Div(
[
html.Div(
[
html.A(
html.Img(
src=app.get_asset_url("example-logo.png"),
className="logo",
),
href="https://example.com/home/",
),
html.A(
html.Button(
"Something Else",
id="learn-more-button",
style={"margin-left": "-10px"},
),
href="https://example.com/learn/",
),
html.A(
html.Button("Still Different", id="learn-more-button"),
href="https://github.com/sthagen/example-app-report/",
),
],
className="row",
),
html.Div(
[
html.Div(
[html.H5("Report Main Title")],
className="seven columns main-title",
),
html.Div(
[
dcc.Link(
"Full View",
href=f"{URL_BASE_PATHNAME}full-view",
className="full-view-link",
)
],
className="five columns",
),
],
className="twelve columns",
style={"padding-left": "0"},
),
],
className="row",
)
return header
def get_menu():
menu = html.Div(
[
dcc.Link(
"Overview",
href=f"{URL_BASE_PATHNAME}overview",
className="tab first",
),
dcc.Link(
"Details Wun",
href=f"{URL_BASE_PATHNAME}wun",
className="tab",
),
dcc.Link(
"Details Two",
href=f"{URL_BASE_PATHNAME}two",
className="tab",
),
],
className="row all-tabs",
)
return menu
def make_dash_table(df):
"""Return a dash definition of an HTML table for a Pandas dataframe"""
table = []
for index, row in df.iterrows():
html_row = []
for i in range(len(row)):
html_row.append(html.Td([row[i]]))
table.append(html.Tr(html_row))
return table
| 29.53 | 79 | 0.385371 |
c97e6b1f40a5bb81ae2c559b1a1285a802b08835 | 53 | py | Python | social/backends/ubuntu.py | raccoongang/python-social-auth | 81c0a542d158772bd3486d31834c10af5d5f08b0 | [
"BSD-3-Clause"
] | 1,987 | 2015-01-01T16:12:45.000Z | 2022-03-29T14:24:25.000Z | social/backends/ubuntu.py | raccoongang/python-social-auth | 81c0a542d158772bd3486d31834c10af5d5f08b0 | [
"BSD-3-Clause"
] | 731 | 2015-01-01T22:55:25.000Z | 2022-03-10T15:07:51.000Z | virtual/lib/python3.6/site-packages/social/backends/ubuntu.py | dennismwaniki67/awards | 80ed10541f5f751aee5f8285ab1ad54cfecba95f | [
"MIT"
] | 1,082 | 2015-01-01T16:27:26.000Z | 2022-03-22T21:18:33.000Z | from social_core.backends.ubuntu import UbuntuOpenId
| 26.5 | 52 | 0.886792 |
c97f4aad4afc2d34135bd0a531bcabb3725f19f6 | 10,715 | py | Python | tests/unit/states/test_libvirt.py | cvedel/salt | 8731f42829ca1f0a38d2434057c485abeff222a7 | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/unit/states/test_libvirt.py | cvedel/salt | 8731f42829ca1f0a38d2434057c485abeff222a7 | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/unit/states/test_libvirt.py | cvedel/salt | 8731f42829ca1f0a38d2434057c485abeff222a7 | [
"Apache-2.0",
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
'''
# pylint: disable=3rd-party-module-not-gated
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import tempfile
import shutil
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.paths import TMP
from tests.support.unit import skipIf, TestCase
from tests.support.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
mock_open,
patch)
# Import Salt Libs
import salt.states.virt as virt
import salt.utils.files
class LibvirtMock(MagicMock): # pylint: disable=too-many-ancestors
'''
libvirt library mockup
'''
class libvirtError(Exception): # pylint: disable=invalid-name
'''
libvirt error mockup
'''
@skipIf(NO_MOCK, NO_MOCK_REASON)
class LibvirtTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.states.libvirt
'''
def setup_loader_modules(self):
self.mock_libvirt = LibvirtMock() # pylint: disable=attribute-defined-outside-init
self.addCleanup(delattr, self, 'mock_libvirt')
loader_globals = {
'libvirt': self.mock_libvirt
}
return {virt: loader_globals}
@classmethod
def setUpClass(cls):
cls.pki_dir = tempfile.mkdtemp(dir=TMP)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.pki_dir)
del cls.pki_dir
# 'keys' function tests: 1
def test_keys(self):
'''
Test to manage libvirt keys.
'''
with patch('os.path.isfile', MagicMock(return_value=False)):
name = 'sunrise'
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
mock = MagicMock(side_effect=[[], ['libvirt.servercert.pem'],
{'libvirt.servercert.pem': 'A'}])
with patch.dict(virt.__salt__, {'pillar.ext': mock}): # pylint: disable=no-member
comt = ('All keys are correct')
ret.update({'comment': comt})
self.assertDictEqual(virt.keys(name, basepath=self.pki_dir), ret)
with patch.dict(virt.__opts__, {'test': True}): # pylint: disable=no-member
comt = ('Libvirt keys are set to be updated')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(virt.keys(name, basepath=self.pki_dir), ret)
with patch.dict(virt.__opts__, {'test': False}): # pylint: disable=no-member
with patch.object(salt.utils.files, 'fopen', MagicMock(mock_open())):
comt = ('Updated libvirt certs and keys')
ret.update({'comment': comt, 'result': True,
'changes': {'servercert': 'new'}})
self.assertDictEqual(virt.keys(name, basepath=self.pki_dir), ret)
def test_keys_with_expiration_days(self):
'''
Test to manage libvirt keys.
'''
with patch('os.path.isfile', MagicMock(return_value=False)):
name = 'sunrise'
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
mock = MagicMock(side_effect=[[], ['libvirt.servercert.pem'],
{'libvirt.servercert.pem': 'A'}])
with patch.dict(virt.__salt__, {'pillar.ext': mock}): # pylint: disable=no-member
comt = ('All keys are correct')
ret.update({'comment': comt})
self.assertDictEqual(virt.keys(name,
basepath=self.pki_dir,
expiration_days=700), ret)
with patch.dict(virt.__opts__, {'test': True}): # pylint: disable=no-member
comt = ('Libvirt keys are set to be updated')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(virt.keys(name,
basepath=self.pki_dir,
expiration_days=700), ret)
with patch.dict(virt.__opts__, {'test': False}): # pylint: disable=no-member
with patch.object(salt.utils.files, 'fopen', MagicMock(mock_open())):
comt = ('Updated libvirt certs and keys')
ret.update({'comment': comt, 'result': True,
'changes': {'servercert': 'new'}})
self.assertDictEqual(virt.keys(name,
basepath=self.pki_dir,
expiration_days=700), ret)
def test_keys_with_state(self):
'''
Test to manage libvirt keys.
'''
with patch('os.path.isfile', MagicMock(return_value=False)):
name = 'sunrise'
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
mock = MagicMock(side_effect=[[], ['libvirt.servercert.pem'],
{'libvirt.servercert.pem': 'A'}])
with patch.dict(virt.__salt__, {'pillar.ext': mock}): # pylint: disable=no-member
comt = ('All keys are correct')
ret.update({'comment': comt})
self.assertDictEqual(virt.keys(name,
basepath=self.pki_dir,
st='California'), ret)
with patch.dict(virt.__opts__, {'test': True}): # pylint: disable=no-member
comt = ('Libvirt keys are set to be updated')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(virt.keys(name,
basepath=self.pki_dir,
st='California'), ret)
with patch.dict(virt.__opts__, {'test': False}): # pylint: disable=no-member
with patch.object(salt.utils.files, 'fopen', MagicMock(mock_open())):
comt = ('Updated libvirt certs and keys')
ret.update({'comment': comt, 'result': True,
'changes': {'servercert': 'new'}})
self.assertDictEqual(virt.keys(name,
basepath=self.pki_dir,
st='California'), ret)
def test_keys_with_all_options(self):
'''
Test to manage libvirt keys.
'''
with patch('os.path.isfile', MagicMock(return_value=False)):
name = 'sunrise'
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
mock = MagicMock(side_effect=[[], ['libvirt.servercert.pem'],
{'libvirt.servercert.pem': 'A'}])
with patch.dict(virt.__salt__, {'pillar.ext': mock}): # pylint: disable=no-member
comt = ('All keys are correct')
ret.update({'comment': comt})
self.assertDictEqual(virt.keys(name,
basepath=self.pki_dir,
country='USA',
st='California',
locality='Los_Angeles',
organization='SaltStack',
expiration_days=700), ret)
with patch.dict(virt.__opts__, {'test': True}): # pylint: disable=no-member
comt = ('Libvirt keys are set to be updated')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(virt.keys(name,
basepath=self.pki_dir,
country='USA',
st='California',
locality='Los_Angeles',
organization='SaltStack',
expiration_days=700), ret)
with patch.dict(virt.__opts__, {'test': False}): # pylint: disable=no-member
with patch.object(salt.utils.files, 'fopen', MagicMock(mock_open())):
comt = ('Updated libvirt certs and keys')
ret.update({'comment': comt, 'result': True,
'changes': {'servercert': 'new'}})
self.assertDictEqual(virt.keys(name,
basepath=self.pki_dir,
country='USA',
st='California',
locality='Los_Angeles',
organization='SaltStack',
expiration_days=700), ret)
def test_running(self):
'''
running state test cases.
'''
ret = {'name': 'myvm',
'changes': {},
'result': True,
'comment': 'myvm is running'}
with patch.dict(virt.__salt__, { # pylint: disable=no-member
'virt.vm_state': MagicMock(return_value='stopped'),
'virt.start': MagicMock(return_value=0)
}):
ret.update({'changes': {'myvm': 'Domain started'},
'comment': 'Domain myvm started'})
self.assertDictEqual(virt.running('myvm'), ret)
with patch.dict(virt.__salt__, { # pylint: disable=no-member
'virt.vm_state': MagicMock(return_value='stopped'),
'virt.start': MagicMock(side_effect=[self.mock_libvirt.libvirtError('libvirt error msg')])
}):
ret.update({'changes': {}, 'result': False, 'comment': 'libvirt error msg'})
self.assertDictEqual(virt.running('myvm'), ret)
| 44.832636 | 110 | 0.466729 |
c97fb65ad57fdb00bbfc2ec6a2a6804e6bf866f3 | 50 | py | Python | src/__init__.py | ProfessorManhattan/mod-ansible-autodoc | 3cc6fd25f817efc63ec0fc158088d637dd689ad3 | [
"MIT"
] | null | null | null | src/__init__.py | ProfessorManhattan/mod-ansible-autodoc | 3cc6fd25f817efc63ec0fc158088d637dd689ad3 | [
"MIT"
] | null | null | null | src/__init__.py | ProfessorManhattan/mod-ansible-autodoc | 3cc6fd25f817efc63ec0fc158088d637dd689ad3 | [
"MIT"
] | null | null | null | __name__ = "{{Name}}"
__version__ = "{{Version}}"
| 16.666667 | 27 | 0.6 |
c9801e27d75fc448c57278f4f2febd70cf000239 | 3,203 | py | Python | alfred/views/main_widget.py | Sefrwahed/Alfred | 0b77ec547fb665ef29fe1a3b7e1c4ad30c31170d | [
"MIT"
] | 5 | 2016-09-06T10:29:24.000Z | 2017-02-22T14:07:48.000Z | alfred/views/main_widget.py | Sefrwahed/Alfred | 0b77ec547fb665ef29fe1a3b7e1c4ad30c31170d | [
"MIT"
] | 66 | 2016-09-06T06:40:24.000Z | 2022-03-11T23:18:05.000Z | alfred/views/main_widget.py | Sefrwahed/Alfred | 0b77ec547fb665ef29fe1a3b7e1c4ad30c31170d | [
"MIT"
] | 3 | 2016-10-06T15:17:38.000Z | 2016-12-04T13:25:53.000Z | import json
# PyQt imports
from PyQt5.QtCore import Qt, pyqtSignal, pyqtSlot
from PyQt5.QtWidgets import QDialog
from PyQt5.QtWebChannel import QWebChannel
# Local includes
from .ui.widget_ui import Ui_Dialog
from alfred import data_rc
import alfred.alfred_globals as ag
from alfred.modules.api.view_components import ARow, AColumn, ACard, AHeading
class MainWidget(QDialog, Ui_Dialog):
text_changed = pyqtSignal('QString')
def __init__(self, bridge_obj):
QDialog.__init__(self)
self.setupUi(self)
self.setWindowFlags(Qt.Window | Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_TranslucentBackground, True)
self.lineEdit.returnPressed.connect(self.send_text)
self.channel = QWebChannel(self.webView.page())
self.webView.page().setWebChannel(self.channel)
self.bridge_obj = bridge_obj
self.channel.registerObject("web_bridge", bridge_obj)
def clear_view(self):
self.webView.page().setHtml("")
def set_status_icon_busy(self, busy):
if busy:
self.bot_status_icon.page().runJavaScript("document.getElementById('inner').style.width = '0px';")
else:
self.bot_status_icon.page().runJavaScript("document.getElementById('inner').style.width = '20px';")
def show_busy_state_widget(self):
self.show_special_widget("Please wait...", "Alfred is busy learning at the moment :D")
def show_module_running_widget(self, module_name):
self.show_special_widget("Module is running, Please wait...", "{} module is predicted".format(module_name.capitalize()))
def show_no_modules_view(self):
self.show_special_widget("Please install some modules", "No modules found :(")
def show_special_widget(self, title, content, color=''):
temp = ag.main_components_env.get_template("widgets.html")
components = [ARow(AColumn(12, ACard(title, AHeading(3, content,color=color))))]
html = temp.render(componenets=components)
self.webView.page().setHtml(html)
@pyqtSlot()
def send_text(self):
msg = self.lineEdit.text()
if msg != '':
self.text_changed.emit(msg)
self.last_text = msg
@pyqtSlot(list)
def set_widget_view(self, components):
temp = ag.main_components_env.get_template("widgets.html")
html = temp.render(componenets=components)
# print(html)
self.webView.page().setHtml(html)
@pyqtSlot(list)
def set_view(self, components):
temp = ag.main_components_env.get_template("base.html")
html = temp.render(componenets=components)
# print(html)
self.webView.page().setHtml(html)
@pyqtSlot(str)
def remove_component(self, dom_id):
js = "jQuery('#{}').fadeOut(function(){{ jQuery(this).remove() }});".format(dom_id)
# print(js)
self.webView.page().runJavaScript(js)
@pyqtSlot(str, str)
def append_content(self, parent_dom_id, element_html):
js = "jQuery('{}').prependTo('#{}').hide().fadeIn();".format(("".join(element_html.splitlines())).replace("'", ""), parent_dom_id)
# print(js)
self.webView.page().runJavaScript(js) | 37.244186 | 138 | 0.67468 |
c98046d6e476b2db7f4e9b5014b73851b0a58d74 | 5,573 | py | Python | projects/11/jackTokenizer.py | nadavWeisler/Nand2Tetris | 59c2e616c45044c15b99aeb8459d39b59e5e07ba | [
"MIT"
] | null | null | null | projects/11/jackTokenizer.py | nadavWeisler/Nand2Tetris | 59c2e616c45044c15b99aeb8459d39b59e5e07ba | [
"MIT"
] | null | null | null | projects/11/jackTokenizer.py | nadavWeisler/Nand2Tetris | 59c2e616c45044c15b99aeb8459d39b59e5e07ba | [
"MIT"
] | null | null | null | import re
from utils import *
class JackTokenizer:
def __init__(self, file_name):
self._file = open(file_name, 'r')
self._data = []
self._types = []
self._tokens = []
self._xml = ['<tokens>']
self._tokens_iterator = iter(self._tokens)
self._token_types_iterator = iter(self._types)
self._current_token = ""
self._current_token_type = ""
def got_more_tokens(self):
try:
self._current_token = next(self._tokens_iterator)
self._current_token_type = next(self._token_types_iterator)
return True
except:
return False
def get_token(self):
return self._current_token_type, self._current_token
@staticmethod
def is_keyword(token):
return token in KEYWORDS
@staticmethod
def is_symbol(token):
return token in SYMBOLS
def is_identifier(self, token):
return len(token) >= 1 and not token[0].isdigit() and \
re.match(r'^[A-Za-z0-9_]+', token) is not None and \
not self.is_keyword(token)
@staticmethod
def is_int(token):
return token.isdigit() and 0 <= int(token) <= MAX_INT
@staticmethod
def is_string(token):
return len(token) >= 2 and \
(token[0] == '\"' and
token[-1] == '\"' and
'\"' not in token[1:-1] and
NEW_LINE not in token[1:-1])
def get_token_type(self, token):
if self.is_keyword(token):
return 'keyword'
elif self.is_symbol(token):
return 'symbol'
elif self.is_identifier(token):
return 'identifier'
elif self.is_int(token):
return 'integerConstant'
elif self.is_string(token):
return 'stringConstant'
def filter(self):
start = False
for line in self._file:
segment1 = ""
segment2 = ""
temp = line.strip()
matcher1 = re.match('.*\"[^\"]*//[^\"]*\".*', temp)
matcher2 = re.match('.*\"[^\"]*/\*{1,2}[^\"]*\".*', temp)
matcher3 = re.match('.*\"[^\"]*\*/[^\"]*\".*', temp)
if matcher1 is not None or matcher2 is not None or matcher3 is not None:
self._data.append(temp[:])
continue
arr = temp.split('/*')
if len(arr) > 1:
start = True
segment1 = arr[0]
if start:
arr = temp.split('*/')
if len(arr) > 1:
segment2 = arr[1]
start = False
result = segment1[:] + segment2[:]
if len(result):
self._data.append(segment1[:] + segment2[:])
else:
temp = ' '.join(temp.split('//')[0].split())
if len(temp):
self._data.append(temp[:])
@staticmethod
def convert_lt_gt_quot_amp(char):
if char == '<':
return '<'
elif char == '>':
return '>'
elif char == '\"':
return '"'
elif char == '&':
return '&'
@staticmethod
def split_line_by_symbols(line):
result = list()
idx = 0
temp = ""
while idx < len(line):
if line[idx] == ' ':
result.append(temp)
temp = ""
elif line[idx] in SYMBOLS and line[idx] != '\"':
if len(temp):
result.append(temp)
result.append(line[idx])
temp = ""
else:
result.append(line[idx])
elif line[idx] == '\"':
next_idx = line.find('\"', idx + 1)
while line[next_idx - 1] == '\\':
next_idx = line.find('\"', next_idx)
segment = line[idx:next_idx + 1]
result.append(segment)
temp = ""
idx = next_idx + 1
continue
else:
temp += line[idx]
idx += 1
return result
def tokenize(self):
self.filter()
for line in self._data:
segments = self.split_line_by_symbols(line)
for segment in segments:
current_type = self.get_token_type(segment)
if current_type is not None:
self._types.append(current_type)
self._tokens.append(segment)
if current_type not in {'stringConstant', 'integerConstant'}:
current_type = current_type.lower()
else:
if current_type == 'stringConstant':
current_type = 'stringConstant'
self._tokens[-1] = self._tokens[-1].strip('\"')
segment = segment.strip('\"')
else:
current_type = 'integerConstant'
if segment in {'<', '>', '\"', '&'}:
self._tokens[-1] = self.convert_lt_gt_quot_amp(segment)
segment = self.convert_lt_gt_quot_amp(segment)
self._xml.append('<' + current_type + '> ' + segment + ' </' + current_type + '>')
elif len(segment.strip()):
print(segment)
raise Exception("Invalid Token")
self._xml.append('</tokens>')
| 33.981707 | 102 | 0.466535 |
c98092ff02eaf3078402f8fe2053638da3880d53 | 1,115 | py | Python | main.py | TimKozak/NearestFilms | 991f8b7b1cb9f7f47c6bff818aaae3b91eb80375 | [
"MIT"
] | 2 | 2021-02-15T20:38:03.000Z | 2021-12-15T12:42:54.000Z | main.py | TimKozak/NearestFilms | 991f8b7b1cb9f7f47c6bff818aaae3b91eb80375 | [
"MIT"
] | null | null | null | main.py | TimKozak/NearestFilms | 991f8b7b1cb9f7f47c6bff818aaae3b91eb80375 | [
"MIT"
] | null | null | null | """
Main module of a program.
"""
import folium
from tools import find_coords, user_input
def creating_map():
"""
Creates HTML page for a given data.
"""
year, coords = user_input()
locations = find_coords(year, coords)
mp = folium.Map(location=coords, zoom_start=10)
mp.add_child(folium.Marker(
location=coords,
popup="You are here",
icon=folium.Icon(color='red',
icon_color='lightgray',
icon='home')))
for location in locations:
mp.add_child(folium.Marker(
location=[location[1][0], location[1][1]],
popup=location[0],
icon=folium.Icon(color='green',
icon_color='white',
icon='cloud')))
folium.PolyLine(locations=[(coords[0], coords[1]),
location[1]], color='orange').add_to(mp)
mp.save('nearest_films.html')
print("Map succesfully generated")
if __name__ == "__main__":
creating_map()
# print(find_coords(2017, (52.4081812, -1.510477)))
| 27.195122 | 75 | 0.552466 |
c98373f93bfe070f74725f6b7462934da5ef570c | 1,366 | py | Python | ptCrypt/Symmetric/Modes/ECB.py | 0awawa0/aCrypt | 7c5d07271d524b9e5b03035d63587b69bff5abc7 | [
"MIT"
] | null | null | null | ptCrypt/Symmetric/Modes/ECB.py | 0awawa0/aCrypt | 7c5d07271d524b9e5b03035d63587b69bff5abc7 | [
"MIT"
] | 25 | 2021-12-08T07:20:11.000Z | 2021-12-10T12:07:05.000Z | ptCrypt/Symmetric/Modes/ECB.py | 0awawa0/aCrypt | 7c5d07271d524b9e5b03035d63587b69bff5abc7 | [
"MIT"
] | null | null | null | from ptCrypt.Symmetric.Modes.Mode import Mode
from ptCrypt.Symmetric.BlockCipher import BlockCipher
from ptCrypt.Symmetric.Paddings.Padding import Padding
class ECB(Mode):
"""Electronic codebook mode of encryption. The simplest encryption mode.
Encrypts every block independently from other blocks.
More: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Electronic_codebook_(ECB)
"""
def __init__(self, cipher: BlockCipher, padding: Padding = None):
super().__init__(cipher, padding)
def encrypt(self, data: bytes):
if self.padding:
data = self.padding.pad(data)
blocks = self.splitBlocks(data)
for i in range(len(blocks)):
blocks[i] = self.cipher.encrypt(blocks[i])
return self.joinBlocks(blocks)
def decrypt(self, data: bytes):
if len(data) % self.cipher.blockSize:
raise BlockCipher.WrongBlockSizeException(f"Cannot process data. Data size ({len(data)}) is not multiple of the cipher block size ({self.cipher.blockSize}).")
blocks = self.splitBlocks(data)
for i in range(len(blocks)):
blocks[i] = self.cipher.decrypt(blocks[i])
decrypted = self.joinBlocks(blocks)
if self.padding:
decrypted = self.padding.unpad(decrypted)
return decrypted
| 35.947368 | 170 | 0.666179 |
c983d81c361719032d41d5bf9ca26fcce754a0f2 | 1,335 | py | Python | src/static-vxlan-agent/test/arp_tracer.py | jbemmel/srl-evpn-proxy | 240b8180ab03ee06a5043e646781860ba32a3530 | [
"Apache-2.0"
] | 8 | 2021-08-25T01:08:09.000Z | 2022-01-18T12:44:41.000Z | src/static-vxlan-agent/test/arp_tracer.py | jbemmel/srl-evpn-proxy | 240b8180ab03ee06a5043e646781860ba32a3530 | [
"Apache-2.0"
] | null | null | null | src/static-vxlan-agent/test/arp_tracer.py | jbemmel/srl-evpn-proxy | 240b8180ab03ee06a5043e646781860ba32a3530 | [
"Apache-2.0"
] | 1 | 2022-03-13T22:36:18.000Z | 2022-03-13T22:36:18.000Z | #!/usr/bin/env python3 # Originally python2
# Sample from https://www.collabora.com/news-and-blog/blog/2019/05/14/an-ebpf-overview-part-5-tracing-user-processes/
# Python program with embedded C eBPF program
from bcc import BPF, USDT
import sys
bpf = """
#include <uapi/linux/ptrace.h>
BPF_PERF_OUTPUT(events);
struct file_transf {
char client_ip_str[20];
char file_path[300];
u32 file_size;
u64 timestamp;
};
int trace_file_transfers(struct pt_regs *ctx, char *ipstrptr, char *pathptr, u32 file_size) {
struct file_transf ft = {0};
ft.file_size = file_size;
ft.timestamp = bpf_ktime_get_ns();
bpf_probe_read(&ft.client_ip_str, sizeof(ft.client_ip_str), (void *)ipstrptr);
bpf_probe_read(&ft.file_path, sizeof(ft.file_path), (void *)pathptr);
events.perf_submit(ctx, &ft, sizeof(ft));
return 0;
};
"""
def print_event(cpu, data, size):
event = b["events"].event(data)
print("{0}: {1} is downloding file {2} ({3} bytes)".format(
event.timestamp, event.client_ip_str, event.file_path, event.file_size))
u = USDT(pid=int(sys.argv[1]))
u.enable_probe(probe="file_transfer", fn_name="trace_file_transfers")
b = BPF(text=bpf, usdt_contexts=[u])
b["events"].open_perf_buffer(print_event)
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
| 31.046512 | 117 | 0.702622 |
c984c4501d6e403db82fdd8d7b4131f8e313f048 | 1,004 | py | Python | test/test_reference.py | ognibit/sudoku-solver | 1c47b80b36b4bd57a11a4084e04defd849531782 | [
"Apache-2.0"
] | null | null | null | test/test_reference.py | ognibit/sudoku-solver | 1c47b80b36b4bd57a11a4084e04defd849531782 | [
"Apache-2.0"
] | null | null | null | test/test_reference.py | ognibit/sudoku-solver | 1c47b80b36b4bd57a11a4084e04defd849531782 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import pytest
from sudoku import SudokuLine, build_rows, build_columns
from sudoku import SudokuSquare, build_squares
def test_reference():
dimension = 3
dim_pow = dimension ** 2
board = np.zeros((dim_pow, dim_pow), dtype='int')
symbols = np.arange(1, dim_pow + 1)
squares = build_squares(board, symbols, dimension)
rows = build_rows(board, symbols, dimension)
cols = build_columns(board, symbols, dimension)
assert squares[0][0].board[0,0] == 0
assert rows[0].board[0] == 0
assert cols[0].board[0] == 0
board[0,0] = 1
assert squares[0][0].board[0,0] == 1
assert rows[0].board[0] == 1
assert cols[0].board[0] == 1
board[0][1] = 2
assert squares[0][0].board[0,1] == 2
assert rows[0].board[1] == 2
assert cols[1].board[0] == 2
board[1][0] = 3
assert squares[0][0].board[1,0] == 3
assert rows[1].board[0] == 3
assert cols[0].board[1] == 3
board[4][3] = 4
assert squares[1][1].board[1,0] == 4
assert rows[4].board[3] == 4
assert cols[3].board[4] == 4
| 23.348837 | 56 | 0.657371 |
c985d647edcaf8c1a409b8e34f91d4add29cf574 | 1,424 | py | Python | ReaderBoard/Score.py | JongbinRyu/Ajou_Challenge | 26c1e6e8464f2b50941dccbbca017ab99b6d4489 | [
"MIT"
] | null | null | null | ReaderBoard/Score.py | JongbinRyu/Ajou_Challenge | 26c1e6e8464f2b50941dccbbca017ab99b6d4489 | [
"MIT"
] | null | null | null | ReaderBoard/Score.py | JongbinRyu/Ajou_Challenge | 26c1e6e8464f2b50941dccbbca017ab99b6d4489 | [
"MIT"
] | null | null | null | import os
import json
import datetime
from pytz import timezone, utc
def update_total_score(name_list_dict, score_rules, now_kst_aware, penalty_const=.1):
"""
Update Total Score when scheduled day written in "ScoreRule.json"
:param name_list_dict: This contains contestants score info loaded from "namelist.json"
:param score_rules: Score rules loaded from "ScoreRule.json"
:param now_kst_aware: Current Aware Time(UTC difference info stored) for Korea/Seoul(+9:00)
:return: None
"""
current_time = str(now_kst_aware)
name_list = name_list_dict['namelist']
# Read Score Rules and Calculate total score
for rule in score_rules:
date_rule = datetime.datetime.strptime(rule['date'], '%Y-%m-%d %H:%M:%S')
if now_kst_aware.month == date_rule.month and now_kst_aware.day == date_rule.day:
name_list_dict['total_score_update_time'] = current_time
print("Today is {} Update scheduled as {}".format(rule["var_name"], rule['date']))
# Todo: change 'avg_accuracy' to 'last_accuracy'
for info in name_list:
info[rule["var_name"]] = info['avg_accuracy']
for info in name_list:
total_score = 0
for rule in score_rules:
total_score += info[rule['var_name']] * rule['weight']
total_score -= info["penalty"] * penalty_const
info['total_score'] = round(total_score, 5) | 45.935484 | 95 | 0.67486 |
c98644a1740c0b9a2213d68e9dafb7bed9e7032f | 3,500 | py | Python | src/utils/loaders.py | OE-Heart/span-based-srl | a03b46a5ea4c59e14bea80ea724b0de276df4bc1 | [
"MIT"
] | 41 | 2018-10-05T21:48:33.000Z | 2022-02-16T10:24:39.000Z | src/utils/loaders.py | OE-Heart/span-based-srl | a03b46a5ea4c59e14bea80ea724b0de276df4bc1 | [
"MIT"
] | 9 | 2018-10-21T14:45:01.000Z | 2022-02-25T14:25:29.000Z | src/utils/loaders.py | OE-Heart/span-based-srl | a03b46a5ea4c59e14bea80ea724b0de276df4bc1 | [
"MIT"
] | 9 | 2018-10-16T07:00:51.000Z | 2022-02-17T13:10:47.000Z | import os
import gzip
import pickle
import h5py
import numpy as np
import theano
from utils.misc import get_file_names_in_dir
from utils.vocab import UNK
class Loader(object):
def __init__(self, argv):
self.argv = argv
def load(self, **kwargs):
raise NotImplementedError
@staticmethod
def load_data(fn):
with gzip.open(fn, 'rb') as gf:
return pickle.load(gf)
@staticmethod
def load_key_value_format(fn):
data = []
with open(fn, 'r') as f:
for line in f:
key, value = line.rstrip().split()
data.append((key, int(value)))
return data
@staticmethod
def load_hdf5(path):
return h5py.File(path, 'r')
def load_txt_from_dir(self, dir_path, file_prefix):
file_names = get_file_names_in_dir(dir_path + '/*')
file_names = [fn for fn in file_names
if os.path.basename(fn).startswith(file_prefix)
and fn.endswith('txt')]
return [self.load(path=fn) for fn in file_names]
def load_hdf5_from_dir(self, dir_path, file_prefix):
file_names = get_file_names_in_dir(dir_path + '/*')
file_names = [fn for fn in file_names
if os.path.basename(fn).startswith(file_prefix)
and fn.endswith('hdf5')]
return [self.load_hdf5(fn) for fn in file_names]
class Conll05Loader(Loader):
def load(self, path, data_size=1000000, is_test=False):
if path is None:
return []
corpus = []
sent = []
with open(path) as f:
for line in f:
elem = [l for l in line.rstrip().split()]
if len(elem) > 0:
if is_test:
sent.append(elem[:6])
else:
sent.append(elem)
else:
corpus.append(sent)
sent = []
if len(corpus) >= data_size:
break
return corpus
class Conll12Loader(Loader):
def load(self, path, data_size=1000000, is_test=False):
if path is None:
return []
corpus = []
sent = []
with open(path) as f:
for line in f:
elem = [l for l in line.rstrip().split()]
if len(elem) > 10:
if is_test:
sent.append(elem[:11])
else:
sent.append(elem)
elif len(elem) == 0:
corpus.append(sent)
sent = []
if len(corpus) >= data_size:
break
return corpus
def load_emb(path):
word_list = []
emb = []
with open(path) as f:
for line in f:
line = line.rstrip().split()
word_list.append(line[0])
emb.append(line[1:])
emb = np.asarray(emb, dtype=theano.config.floatX)
if UNK not in word_list:
word_list = [UNK] + word_list
unk_vector = np.mean(emb, axis=0)
emb = np.vstack((unk_vector, emb))
return word_list, emb
def load_pickle(fn):
with gzip.open(fn, 'rb') as gf:
return pickle.load(gf)
def load_key_value_format(fn):
data = []
with open(fn, 'r') as f:
for line in f:
key, value = line.rstrip().split()
data.append((key, int(value)))
return data
| 26.315789 | 69 | 0.513714 |
a309e90ac2f88ea56edc2aaeacb9b7f74fba3681 | 591 | py | Python | system_test_progress_tracking/progress_tracking/urls.py | TobKed/system_test_progress_tracking | 633792e7057289b6a23db30c6353241123eaa2e4 | [
"MIT"
] | null | null | null | system_test_progress_tracking/progress_tracking/urls.py | TobKed/system_test_progress_tracking | 633792e7057289b6a23db30c6353241123eaa2e4 | [
"MIT"
] | 3 | 2020-02-11T23:29:05.000Z | 2021-06-10T21:03:42.000Z | system_test_progress_tracking/progress_tracking/urls.py | TobKed/system_test_progress_tracking | 633792e7057289b6a23db30c6353241123eaa2e4 | [
"MIT"
] | 2 | 2019-01-24T20:39:31.000Z | 2019-01-29T07:42:27.000Z | from django.urls import path
from .views import (
home,
MachineDetailView,
MachineListView,
DryRunDataDetailView,
MachineLastDataView,
)
urlpatterns = [
path('', MachineListView.as_view(), name='home-view'),
path('', MachineListView.as_view(), name='machine-list-view'),
path('machine/<int:pk>', MachineDetailView.as_view(), name='machine-detail-view'),
path('machine/<int:pk>/last', MachineLastDataView.as_view(), name='machine-last-data-view'),
path('machine/run_data/<int:pk>', DryRunDataDetailView.as_view(), name='dry-run-data-detail-view'),
]
| 32.833333 | 103 | 0.698816 |
a30a5b9c466fd79c98aae5b462aff3ba4ea72d40 | 480 | py | Python | main.py | mrroot5/wall-builder | 2f0414359080fecdba5312463dd05cd9c11da6c1 | [
"MIT"
] | null | null | null | main.py | mrroot5/wall-builder | 2f0414359080fecdba5312463dd05cd9c11da6c1 | [
"MIT"
] | null | null | null | main.py | mrroot5/wall-builder | 2f0414359080fecdba5312463dd05cd9c11da6c1 | [
"MIT"
] | null | null | null | """
Python version 3.6.7
OS Linux Ubuntu 18.04.1 LTS
Created: 30/11/2018 17:12
Finished: 30/11/2018 19:
Author: Adrian Garrido Garcia
"""
import sys
from wall.builder import build_a_wall
if __name__ == '__main__':
try:
build_a_wall(sys.argv[1], sys.argv[2])
except IndexError:
rows = input("Please, give me the number of wall rows: ")
bricks = input("Please, give me the number of bricks for every wall row: ")
build_a_wall(rows, bricks)
| 25.263158 | 83 | 0.672917 |
a30c417b3a747422a1fa92c8a3a68fa2a0ddf883 | 2,770 | py | Python | dataset.py | njoel-ethz/saliency-rl | 61cf7acf10569b04c3a59528a4fc511c6e794895 | [
"MIT"
] | null | null | null | dataset.py | njoel-ethz/saliency-rl | 61cf7acf10569b04c3a59528a4fc511c6e794895 | [
"MIT"
] | null | null | null | dataset.py | njoel-ethz/saliency-rl | 61cf7acf10569b04c3a59528a4fc511c6e794895 | [
"MIT"
] | null | null | null | import os
import csv
import cv2
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
def transform(snippet):
''' stack & noralization '''
snippet = np.concatenate(snippet, axis=-1)
snippet = torch.from_numpy(snippet).permute(2, 0, 1).contiguous().float()
snippet = snippet.mul_(2.).sub_(255).div(255)
snippet = snippet.view(-1,3,snippet.size(1),snippet.size(2)).permute(1,0,2,3)
return snippet
class DHF1KDataset(Dataset):
def __init__(self, path_data, len_snippet):
self.path_data = path_data
self.len_snippet = len_snippet
if (path_data == 'DHF1K_dataset'):
path_to_file = 'DHF1K_num_frame_train.csv'#'Atari_num_frame_train.csv', 'r'))]
else:
path_to_file = 'Atari_num_frame_train.csv'
csv_reader = csv.reader(open(path_to_file, 'r'))
list_of_tuples = list(map(tuple, csv_reader)) #list of (#samples, file_name)
num_frame = []
for (n_samples, name) in list_of_tuples:
num_frame.append((int(n_samples), name))
self.list_num_frame = num_frame
def __len__(self):
return len(self.list_num_frame)
def __getitem__(self, idx):
file_name = self.list_num_frame[idx][1]
#file_name = '%04d'%(idx+1)
path_clip = os.path.join(self.path_data, 'video', file_name)
path_annt = os.path.join(self.path_data, 'annotation', file_name, 'maps')
start_idx = np.random.randint(1, self.list_num_frame[idx][0]-self.len_snippet+1) #(0, ..) to keep 1st frame
v = np.random.random()
clip = []
for i in range(self.len_snippet):
img = cv2.imread(os.path.join(path_clip, '%06d.png'%(start_idx+i+1)))
img = cv2.resize(img, (384, 224))
img = img[...,::-1]
if v < 0.5:
img = img[:, ::-1, ...]
clip.append(img)
annt = cv2.imread(os.path.join(path_annt, '%06d.png'%(start_idx+self.len_snippet)), 0)
annt = cv2.resize(annt, (384, 224))
if v < 0.5:
annt = annt[:, ::-1]
return transform(clip), torch.from_numpy(annt.copy()).contiguous().float(), (file_name, '%06d.png'%(start_idx+self.len_snippet))
# from gist.github.com/MFreidank/821cc87b012c53fade03b0c7aba13958
class InfiniteDataLoader(DataLoader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dataset_iterator = super().__iter__()
def __iter__(self):
return self
def __next__(self):
try:
batch = next(self.dataset_iterator)
except StopIteration:
self.dataset_iterator = super().__iter__()
batch = next(self.dataset_iterator)
return batch | 37.432432 | 136 | 0.615884 |
a30c4f34a3721276b10ca7d81878d13ffef5c2e3 | 1,342 | py | Python | back-end/api/migrations/0003_address_event.py | tuftsjumbocode/bostonathleticsassociation | 4a01607bd530e9f4973d3b345a442b4eceafa8e1 | [
"MIT"
] | 2 | 2017-01-30T01:33:07.000Z | 2017-02-12T22:00:19.000Z | back-end/api/migrations/0003_address_event.py | tuftsjumbocode/bostonathleticsassociation | 4a01607bd530e9f4973d3b345a442b4eceafa8e1 | [
"MIT"
] | 90 | 2017-02-02T01:56:30.000Z | 2017-05-07T02:58:46.000Z | back-end/api/migrations/0003_address_event.py | tuftsjumbocode/bostonathleticsassociation | 4a01607bd530e9f4973d3b345a442b4eceafa8e1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-19 08:29
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20161006_0128'),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('street', models.TextField()),
('city', models.TextField()),
('state', models.TextField()),
('z_code', models.TextField()),
],
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('date', models.DateTimeField()),
('duration', models.DurationField()),
('location', models.CharField(max_length=40)),
('notes', models.TextField()),
('address', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Address')),
],
),
]
| 34.410256 | 114 | 0.551416 |
a30c6a24fddc04808da5db4f7c0d305641d9674e | 8,505 | py | Python | Pre-Processing/generate_tfrecords.py | beric7/COCO-Bridge | c3ae8e5eb72cbc2fce982215c55bac09844764d8 | [
"Apache-2.0"
] | 2 | 2020-07-06T23:25:12.000Z | 2021-09-21T23:11:55.000Z | Pre-Processing/generate_tfrecords.py | beric7/COCO-Bridge | c3ae8e5eb72cbc2fce982215c55bac09844764d8 | [
"Apache-2.0"
] | null | null | null | Pre-Processing/generate_tfrecords.py | beric7/COCO-Bridge | c3ae8e5eb72cbc2fce982215c55bac09844764d8 | [
"Apache-2.0"
] | null | null | null | # generate_tfrecords.py
# Note: substantial portions of this code, expecially the create_tf_example() function, are credit to Dat Tran
# see his website here: https://towardsdatascience.com/how-to-train-your-own-object-detector-with-tensorflows-object-detector-api-bec72ecfe1d9
# and his GitHub here: https://github.com/CDahmsTemp/TensorFlow_Tut_3_Object_Detection_Walk-through/blob/master/1_xml_to_csv.py
import os
import io
import pandas as pd
import tensorflow as tf
from PIL import Image
from object_detection.utils import dataset_util
from collections import namedtuple
# module-level variables ##############################################################################################
# input training CSV file and training images directory
TRAIN_CSV_FILE_LOC = os.getcwd() + "/training_data/" + "Bbox_info_CSV_output_Train.csv"
TRAIN_IMAGES_DIR = os.getcwd() + "/Train_Output"
# input test CSV file and test images directory
EVAL_CSV_FILE_LOC = os.getcwd() + "/training_data/" + "Bbox_info_CSV_output_Evaluation.csv"
TEST_IMAGES_DIR = os.getcwd() + "/Evaluation_Output"
# training and testing output .tfrecord files
TRAIN_TFRECORD_FILE_LOC = os.getcwd() + "/training_data/" + "train.tfrecord"
EVAL_TFRECORD_FILE_LOC = os.getcwd() + "/training_data/" + "eval.tfrecord"
#######################################################################################################################
def main():
if not checkIfNecessaryPathsAndFilesExist():
return
# end if
# write the train data .tfrecord file
trainTfRecordFileWriteSuccessful = writeTfRecordFile(TRAIN_CSV_FILE_LOC, TRAIN_TFRECORD_FILE_LOC, TRAIN_IMAGES_DIR)
if trainTfRecordFileWriteSuccessful:
print("successfully created the training TFRectrds, saved to: " + TRAIN_TFRECORD_FILE_LOC)
# end if
# write the eval data .tfrecord file
evalTfRecordFileWriteSuccessful = writeTfRecordFile(EVAL_CSV_FILE_LOC, EVAL_TFRECORD_FILE_LOC, TEST_IMAGES_DIR)
if evalTfRecordFileWriteSuccessful:
print("successfully created the eval TFRecords, saved to: " + EVAL_TFRECORD_FILE_LOC)
# end if
# end main
#######################################################################################################################
def writeTfRecordFile(csvFileName, tfRecordFileName, imagesDir):
# use pandas to read in the .csv file data, pandas.read_csv() returns a type DataFrame with the given param
csvFileDataFrame = pd.read_csv(csvFileName)
# reformat the CSV data into a format TensorFlow can work with
csvFileDataList = reformatCsvFileData(csvFileDataFrame)
# instantiate a TFRecordWriter for the file data
tfRecordWriter = tf.python_io.TFRecordWriter(tfRecordFileName)
# for each file (not each line) in the CSV file data . . .
# (each image/.xml file pair can have more than one box, and therefore more than one line for that file in the CSV file)
for singleFileData in csvFileDataList:
tfExample = createTfExample(singleFileData, imagesDir)
tfRecordWriter.write(tfExample.SerializeToString())
# end for
tfRecordWriter.close()
return True # return True to indicate success
# end function
#######################################################################################################################
def checkIfNecessaryPathsAndFilesExist():
if not os.path.exists(TRAIN_CSV_FILE_LOC):
print('ERROR: TRAIN_CSV_FILE "' + TRAIN_CSV_FILE_LOC + '" does not seem to exist')
return False
# end if
if not os.path.exists(TRAIN_IMAGES_DIR):
print('ERROR: TRAIN_IMAGES_DIR "' + TRAIN_IMAGES_DIR + '" does not seem to exist')
return False
# end if
if not os.path.exists(EVAL_CSV_FILE_LOC):
print('ERROR: TEST_CSV_FILE "' + EVAL_CSV_FILE_LOC + '" does not seem to exist')
return False
# end if
if not os.path.exists(TEST_IMAGES_DIR):
print('ERROR: TEST_IMAGES_DIR "' + TEST_IMAGES_DIR + '" does not seem to exist')
return False
# end if
return True
# end function
#######################################################################################################################
def reformatCsvFileData(csvFileDataFrame):
# the purpose of this function is to translate the data from one CSV file in pandas.DataFrame format
# into a list of the named tuple below, which then can be fed into TensorFlow
# establish the named tuple data format
dataFormat = namedtuple('data', ['filename', 'object'])
# pandas.DataFrame.groupby() returns type pandas.core.groupby.DataFrameGroupBy
csvFileDataFrameGroupBy = csvFileDataFrame.groupby('filename')
# declare, populate, and return the list of named tuples of CSV data
csvFileDataList = []
for filename, x in zip(csvFileDataFrameGroupBy.groups.keys(), csvFileDataFrameGroupBy.groups):
csvFileDataList.append(dataFormat(filename, csvFileDataFrameGroupBy.get_group(x)))
# end for
return csvFileDataList
# end function
#######################################################################################################################
def createTfExample(singleFileData, path):
# use TensorFlow's GFile function to open the .jpg image matching the current box data
with tf.gfile.GFile(os.path.join(path, '{}'.format(singleFileData.filename)), 'rb') as tensorFlowImageFile:
tensorFlowImage = tensorFlowImageFile.read()
# end with
# get the image width and height via converting from a TensorFlow image to an io library BytesIO image,
# then to a PIL Image, then breaking out the width and height
bytesIoImage = io.BytesIO(tensorFlowImage)
pilImage = Image.open(bytesIoImage)
width, height = pilImage.size
# get the file name from the file data passed in, and set the image format to .jpg
fileName = singleFileData.filename.encode('utf8')
imageFormat = b'jpg'
# declare empty lists for the box x, y, mins and maxes, and the class as text and as an integer
xMins = []
xMaxs = []
yMins = []
yMaxs = []
classesAsText = []
classesAsInts = []
# for each row in the current .xml file's data . . . (each row in the .xml file corresponds to one box)
for index, row in singleFileData.object.iterrows():
xMins.append(row['xmin'] / width)
xMaxs.append(row['xmax'] / width)
yMins.append(row['ymin'] / height)
yMaxs.append(row['ymax'] / height)
classesAsText.append(row['class'].encode('utf8'))
classesAsInts.append(classAsTextToClassAsInt(row['class']))
# end for
# finally we can calculate and return the TensorFlow Example
tfExample = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(fileName),
'image/source_id': dataset_util.bytes_feature(fileName),
'image/encoded': dataset_util.bytes_feature(tensorFlowImage),
'image/format': dataset_util.bytes_feature(imageFormat),
'image/object/bbox/xmin': dataset_util.float_list_feature(xMins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xMaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(yMins),
'image/object/bbox/ymax': dataset_util.float_list_feature(yMaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classesAsText),
'image/object/class/label': dataset_util.int64_list_feature(classesAsInts)}))
return tfExample
# end function
#######################################################################################################################
def classAsTextToClassAsInt(classAsText):
# ToDo: If you have more than one classification, add an if statement for each
# ToDo: i.e. if you have 3 classes, you would have 3 if statements and then the else
if classAsText == 'Gusset Plate Connection':
return 1
elif classAsText == 'Out of Plane Stiffener':
return 2
elif classAsText == 'Cover Plate Termination':
return 3
elif classAsText == 'Bearing':
return 4
else:
print("error in class_text_to_int(), row_label could not be identified")
return -1
# end if
# end function
#######################################################################################################################
if __name__ == '__main__':
main() | 45 | 142 | 0.642563 |
a30d6af902c1a8c64022ae0458cac17dd1fa6032 | 6,398 | py | Python | openprocurement/chronograph/__init__.py | yshalenyk/openprocurement.chronograph | c15a6da519cea8a09b5d9a943752a49dd6f5131f | [
"Apache-2.0"
] | null | null | null | openprocurement/chronograph/__init__.py | yshalenyk/openprocurement.chronograph | c15a6da519cea8a09b5d9a943752a49dd6f5131f | [
"Apache-2.0"
] | null | null | null | openprocurement/chronograph/__init__.py | yshalenyk/openprocurement.chronograph | c15a6da519cea8a09b5d9a943752a49dd6f5131f | [
"Apache-2.0"
] | null | null | null | import gevent.monkey
gevent.monkey.patch_all()
import os
from logging import getLogger
#from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
from apscheduler.schedulers.gevent import GeventScheduler as Scheduler
from couchdb import Server, Session
from couchdb.http import Unauthorized, extract_credentials
from datetime import datetime, timedelta
#from openprocurement.chronograph.jobstores import CouchDBJobStore
from openprocurement.chronograph.design import sync_design
from openprocurement.chronograph.scheduler import push
from openprocurement.chronograph.utils import add_logging_context
from pyramid.config import Configurator
from pytz import timezone
from pyramid.events import ApplicationCreated, ContextFound
from pbkdf2 import PBKDF2
LOGGER = getLogger(__name__)
TZ = timezone(os.environ['TZ'] if 'TZ' in os.environ else 'Europe/Kiev')
SECURITY = {u'admins': {u'names': [], u'roles': ['_admin']}, u'members': {u'names': [], u'roles': ['_admin']}}
VALIDATE_DOC_ID = '_design/_auth'
VALIDATE_DOC_UPDATE = """function(newDoc, oldDoc, userCtx){
if(newDoc._deleted) {
throw({forbidden: 'Not authorized to delete this document'});
}
if(userCtx.roles.indexOf('_admin') !== -1 && newDoc.indexOf('_design/') === 0) {
return;
}
if(userCtx.name === '%s') {
return;
} else {
throw({forbidden: 'Only authorized user may edit the database'});
}
}"""
def start_scheduler(event):
app = event.app
app.registry.scheduler.start()
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings)
config.add_subscriber(add_logging_context, ContextFound)
config.include('pyramid_exclog')
config.add_route('home', '/')
config.add_route('resync_all', '/resync_all')
config.add_route('resync_back', '/resync_back')
config.add_route('resync', '/resync/{tender_id}')
config.add_route('recheck', '/recheck/{tender_id}')
config.add_route('calendar', '/calendar')
config.add_route('calendar_entry', '/calendar/{date}')
config.add_route('streams', '/streams')
config.scan(ignore='openprocurement.chronograph.tests')
config.add_subscriber(start_scheduler, ApplicationCreated)
config.registry.api_token = os.environ.get('API_TOKEN', settings.get('api.token'))
db_name = os.environ.get('DB_NAME', settings['couchdb.db_name'])
server = Server(settings.get('couchdb.url'), session=Session(retry_delays=range(60)))
if 'couchdb.admin_url' not in settings and server.resource.credentials:
try:
server.version()
except Unauthorized:
server = Server(extract_credentials(settings.get('couchdb.url'))[0], session=Session(retry_delays=range(60)))
config.registry.couchdb_server = server
if 'couchdb.admin_url' in settings and server.resource.credentials:
aserver = Server(settings.get('couchdb.admin_url'), session=Session(retry_delays=range(10)))
users_db = aserver['_users']
if SECURITY != users_db.security:
LOGGER.info("Updating users db security", extra={'MESSAGE_ID': 'update_users_security'})
users_db.security = SECURITY
username, password = server.resource.credentials
user_doc = users_db.get('org.couchdb.user:{}'.format(username), {'_id': 'org.couchdb.user:{}'.format(username)})
if not user_doc.get('derived_key', '') or PBKDF2(password, user_doc.get('salt', ''), user_doc.get('iterations', 10)).hexread(int(len(user_doc.get('derived_key', '')) / 2)) != user_doc.get('derived_key', ''):
user_doc.update({
"name": username,
"roles": [],
"type": "user",
"password": password
})
LOGGER.info("Updating chronograph db main user", extra={'MESSAGE_ID': 'update_chronograph_main_user'})
users_db.save(user_doc)
security_users = [username, ]
if db_name not in aserver:
aserver.create(db_name)
db = aserver[db_name]
SECURITY[u'members'][u'names'] = security_users
if SECURITY != db.security:
LOGGER.info("Updating chronograph db security", extra={'MESSAGE_ID': 'update_chronograph_security'})
db.security = SECURITY
auth_doc = db.get(VALIDATE_DOC_ID, {'_id': VALIDATE_DOC_ID})
if auth_doc.get('validate_doc_update') != VALIDATE_DOC_UPDATE % username:
auth_doc['validate_doc_update'] = VALIDATE_DOC_UPDATE % username
LOGGER.info("Updating chronograph db validate doc", extra={'MESSAGE_ID': 'update_chronograph_validate_doc'})
db.save(auth_doc)
# sync couchdb views
sync_design(db)
db = server[db_name]
else:
if db_name not in server:
server.create(db_name)
db = server[db_name]
# sync couchdb views
sync_design(db)
config.registry.db = db
jobstores = {
#'default': CouchDBJobStore(database=db_name, client=server)
}
#executors = {
#'default': ThreadPoolExecutor(5),
#'processpool': ProcessPoolExecutor(5)
#}
job_defaults = {
'coalesce': False,
'max_instances': 3
}
config.registry.api_url = settings.get('api.url')
config.registry.callback_url = settings.get('callback.url')
scheduler = Scheduler(jobstores=jobstores,
#executors=executors,
job_defaults=job_defaults,
timezone=TZ)
if 'jobstore_db' in settings:
scheduler.add_jobstore('sqlalchemy', url=settings['jobstore_db'])
config.registry.scheduler = scheduler
# scheduler.remove_all_jobs()
# scheduler.start()
resync_all_job = scheduler.get_job('resync_all')
now = datetime.now(TZ)
if not resync_all_job or resync_all_job.next_run_time < now - timedelta(hours=1):
if resync_all_job:
args = resync_all_job.args
else:
args = [settings.get('callback.url') + 'resync_all', None]
run_date = now + timedelta(seconds=60)
scheduler.add_job(push, 'date', run_date=run_date, timezone=TZ,
id='resync_all', args=args,
replace_existing=True, misfire_grace_time=60 * 60)
return config.make_wsgi_app()
| 44.124138 | 215 | 0.664739 |
a30f4fc2ab1f50558de3a730d24cdd2bc794f650 | 1,078 | py | Python | poc/setmanyblocks.py | astro-pi/SpaceCRAFT | b577681b31c0554db9e77ed816cd63900fe195ca | [
"BSD-3-Clause"
] | 12 | 2016-03-05T16:40:16.000Z | 2019-10-27T07:48:12.000Z | poc/setmanyblocks.py | astro-pi/SpaceCRAFT | b577681b31c0554db9e77ed816cd63900fe195ca | [
"BSD-3-Clause"
] | 1 | 2016-03-03T16:54:59.000Z | 2016-03-09T12:14:33.000Z | poc/setmanyblocks.py | astro-pi/SpaceCRAFT | b577681b31c0554db9e77ed816cd63900fe195ca | [
"BSD-3-Clause"
] | 2 | 2015-12-01T08:01:07.000Z | 2019-10-27T07:48:19.000Z | #code which sends many setBlock commands all in one go, to see if there was
# a performance improvement.. It sent them a lot quicker, but you still had to wait
# for minecraft to catch up
import mcpi.minecraft as minecraft
import mcpi.block as block
import mcpi.util as util
from time import time, sleep
def setManyBlocks(mc, blocks):
mc.conn.drain()
s = ""
for block in blocks:
args = minecraft.intFloor(block)
s += "world.setBlock(%s)\n"%(util.flatten_parameters_to_string(args))
mc.conn.lastSent = s
mc.conn.socket.sendall(s.encode())
mc = minecraft.Minecraft.create()
starttime = time()
blocksToSet = []
for x in range(0,25):
for y in range(25,50):
for z in range(0,25):
blocksToSet.append((x,y,z,block.DIAMOND_BLOCK.id))
endtime = time()
print(endtime - starttime)
setManyBlocks(mc, blocksToSet)
sleep(5)
starttime = time()
for x in range(0,25):
for y in range(25,50):
for z in range(0,25):
mc.setBlock(x,y,z,block.DIRT.id)
endtime = time()
print(endtime - starttime)
| 25.069767 | 83 | 0.666976 |
a31137aa372b035d450bbbaac3b873065c92d845 | 2,059 | py | Python | modules/yatse/templatetags/strings.py | Hinnack/yatse | 3386630d974cfe2d832c0297defd1822e463855d | [
"MIT"
] | null | null | null | modules/yatse/templatetags/strings.py | Hinnack/yatse | 3386630d974cfe2d832c0297defd1822e463855d | [
"MIT"
] | 1 | 2021-02-22T18:06:52.000Z | 2021-02-22T18:06:52.000Z | modules/yatse/templatetags/strings.py | Hinnack/yatse | 3386630d974cfe2d832c0297defd1822e463855d | [
"MIT"
] | null | null | null | from django import template
from django.conf import settings
from django.utils.translation import ugettext as _
from django.forms.forms import pretty_name
#from yats.diff import generate_patch_html
import re
try:
import json
except ImportError:
from django.utils import simplejson as json
register = template.Library()
def prettify(value):
return pretty_name(value)
register.filter('prettify', prettify)
def contains(value, search):
if not value or not search:
return False
return search in value
register.filter('contains', contains)
def numberToTicketURL(value):
return re.sub('#([0-9]+)', r'<a href="/tickets/view/\1/">#\1</a>', value)
register.filter('numberToTicketURL', numberToTicketURL)
class Diffs(template.Node):
def __init__(self, line):
self.line = line
def render(self, context):
line = context.get(self.line)
user = context.get('request').user
result = {}
old = json.loads(line.old)
new = json.loads(line.new)
for ele in old:
if not user.is_staff and ele in settings.TICKET_NON_PUBLIC_FIELDS:
continue
if new[ele] == 'None':
new[ele] = _('unknown')
if old[ele] == 'None':
old[ele] = _('unknown')
if new[ele] == 'True':
new[ele] = _('yes')
if old[ele] == 'True':
old[ele] = _('yes')
if new[ele] == 'False':
new[ele] = _('no')
if old[ele] == 'False':
old[ele] = _('no')
result[ele] = generate_patch_html(old[ele], new[ele], ele, 'semantic')
context['elements'] = result
return ''
def do_diff(parser, token):
try:
# split_contents() knows not to split quoted strings.
tag_name, line = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError("%r tag requires a single argument" % token.contents.split()[0])
return Diffs(line)
register.tag('diff', do_diff)
| 27.824324 | 107 | 0.599806 |
a3140847cb4f8ae37f600751b8e796fb5fea58ee | 129 | py | Python | Lib/test/test_import/data/circular_imports/rebinding.py | sireliah/polish-python | 605df4944c2d3bc25f8bf6964b274c0a0d297cc3 | [
"PSF-2.0"
] | 1 | 2018-06-21T18:21:24.000Z | 2018-06-21T18:21:24.000Z | Lib/test/test_import/data/circular_imports/rebinding.py | sireliah/polish-python | 605df4944c2d3bc25f8bf6964b274c0a0d297cc3 | [
"PSF-2.0"
] | null | null | null | Lib/test/test_import/data/circular_imports/rebinding.py | sireliah/polish-python | 605df4944c2d3bc25f8bf6964b274c0a0d297cc3 | [
"PSF-2.0"
] | null | null | null | """Test the binding of names when a circular zaimportuj shares the same name jako an
attribute."""
z .rebinding2 zaimportuj util
| 32.25 | 84 | 0.782946 |
a3156184194412b6c58e7f98504a56f1d8eea1bf | 1,132 | py | Python | Chapter08/8_2_save_packets_in_pcap_format.py | shamir456/Python-Network-Programming-Cookbook-Second-Edition | 7f5ebcbb4ef79c41da677afdf0d8e0fb575dcf0b | [
"MIT"
] | 125 | 2017-08-10T18:09:55.000Z | 2022-03-29T10:14:31.000Z | Chapter08/8_2_save_packets_in_pcap_format.py | shamir456/Python-Network-Programming-Cookbook-Second-Edition | 7f5ebcbb4ef79c41da677afdf0d8e0fb575dcf0b | [
"MIT"
] | 4 | 2018-01-19T05:42:58.000Z | 2019-03-07T06:18:52.000Z | Chapter08/8_2_save_packets_in_pcap_format.py | shamir456/Python-Network-Programming-Cookbook-Second-Edition | 7f5ebcbb4ef79c41da677afdf0d8e0fb575dcf0b | [
"MIT"
] | 79 | 2017-08-15T00:40:36.000Z | 2022-02-26T10:20:24.000Z | #!/usr/bin/env python
# Python Network Programming Cookbook, Second Edition -- Chapter - 8
# This program is optimized for Python 2.7.12 and Python 3.5.2.
# It may run on any other version with/without modifications.
import os
from scapy.all import *
pkts = []
count = 0
pcapnum = 0
def write_cap(x):
global pkts
global count
global pcapnum
pkts.append(x)
count += 1
if count == 3:
pcapnum += 1
pname = "pcap%d.pcap" % pcapnum
wrpcap(pname, pkts)
pkts = []
count = 0
def test_dump_file():
print ("Testing the dump file...")
dump_file = "./pcap1.pcap"
if os.path.exists(dump_file):
print ("dump fie %s found." %dump_file)
pkts = sniff(offline=dump_file)
count = 0
while (count <=2):
print ("----Dumping pkt:%s----" %count)
print (hexdump(pkts[count]))
count += 1
else:
print ("dump fie %s not found." %dump_file)
if __name__ == '__main__':
print ("Started packet capturing and dumping... Press CTRL+C to exit")
sniff(prn=write_cap)
test_dump_file()
| 24.085106 | 74 | 0.590106 |
a316647d2535cb3b325343092f5d4ee583cf738e | 2,862 | py | Python | tests/test_init.py | Javex/pyramid_crud | 873ca8a84c9e5030257a327259f7f1972ba42c9d | [
"MIT"
] | 4 | 2015-03-04T11:35:15.000Z | 2017-05-05T04:00:26.000Z | tests/test_init.py | Javex/pyramid_crud | 873ca8a84c9e5030257a327259f7f1972ba42c9d | [
"MIT"
] | 16 | 2015-02-14T00:56:03.000Z | 2015-02-19T22:23:13.000Z | tests/test_init.py | Javex/pyramid_crud | 873ca8a84c9e5030257a327259f7f1972ba42c9d | [
"MIT"
] | 3 | 2015-08-25T13:20:17.000Z | 2020-05-10T19:02:18.000Z | import pyramid_crud
import pytest
from pyramid.exceptions import ConfigurationError
from pyramid.interfaces import ISessionFactory
@pytest.fixture
def static_prefix(config):
"Add a static URL prefix with the name '/testprefix'"
config.add_settings({'crud.static_url_prefix': '/testprefix'})
@pytest.fixture
def custom_settings( static_prefix):
"A fixture that uses custom settings."
@pytest.fixture
def session_factory(config):
f = lambda: None
config.registry.registerUtility(f, ISessionFactory)
def test_check_session_no_factory(config):
with pytest.raises(ConfigurationError):
pyramid_crud.check_session(config)
@pytest.mark.usefixtures("session_factory")
def test_check_session_factory(config):
pyramid_crud.check_session(config)
@pytest.mark.usefixtures("custom_settings")
def test_parse_options_from_settings(config):
settings = config.get_settings()
ref_settings = {'static_url_prefix': '/testprefix'}
settings = pyramid_crud.parse_options_from_settings(settings, 'crud.')
assert settings == ref_settings
def test_parse_options_from_settings_defaults():
settings = pyramid_crud.parse_options_from_settings({}, 'crud.')
ref_settings = {'static_url_prefix': '/static/crud'}
assert settings == ref_settings
def test_includeme_no_session(config):
pyramid_crud.includeme(config)
with pytest.raises(ConfigurationError):
config.commit()
def test_includeme_session_correct_order(config):
def register():
f = lambda: None
config.registry.registerUtility(f, ISessionFactory)
config.action(('pyramid_crud', 'session_test'), register)
pyramid_crud.includeme(config)
config.commit()
def test_includeme_session_wrong_order(config):
def register():
f = lambda: None
config.registry.registerUtility(f, ISessionFactory)
config.action(('pyramid_crud', 'session_test'), register, order=2)
pyramid_crud.includeme(config)
with pytest.raises(ConfigurationError):
config.commit()
@pytest.mark.usefixtures("custom_settings", "session_factory")
def test_includeme_static_view(config, pyramid_request):
pyramid_crud.includeme(config)
config.commit()
url = pyramid_request.static_url('pyramid_crud:static/test.png')
assert url == 'http://example.com/testprefix/test.png'
@pytest.mark.usefixtures("session_factory")
def test_includeme_static_view_default(config, pyramid_request):
pyramid_crud.includeme(config)
config.commit()
url = pyramid_request.static_url('pyramid_crud:static/test.png')
assert url == 'http://example.com/static/crud/test.png'
@pytest.mark.usefixtures("session_factory")
def test_includeme_static_view_none(config, pyramid_request):
pyramid_crud.includeme(config)
config.commit()
pyramid_request.static_url('pyramid_crud:static/test.png')
| 30.446809 | 74 | 0.759609 |
a3168c69f4eb9f2ba122306fee2a6890c6f1230e | 1,621 | py | Python | Assignments/Sprint2/FinValuePivot.py | mark-morelos/CS_Notes | 339c47ae5d7e678b7ac98d6d78857d016c611e38 | [
"MIT"
] | 1 | 2021-02-28T07:43:59.000Z | 2021-02-28T07:43:59.000Z | Assignments/Sprint2/FinValuePivot.py | mark-morelos/CS_Notes | 339c47ae5d7e678b7ac98d6d78857d016c611e38 | [
"MIT"
] | null | null | null | Assignments/Sprint2/FinValuePivot.py | mark-morelos/CS_Notes | 339c47ae5d7e678b7ac98d6d78857d016c611e38 | [
"MIT"
] | 1 | 2021-03-03T03:52:21.000Z | 2021-03-03T03:52:21.000Z | """
You are given a sorted array in ascending order that is rotated at some unknown pivot
(i.e., [0,1,2,4,5,6,7] might become [4,5,6,7,0,1,2]) and a target value.
Write a function that returns the target value's index. If the target value is not present
in the array, return -1.
You may assume no duplicate exists in the array.
Your algorithm's runtime complexity must be in the order of O(log n).
Example 1:
Input: nums = [4,5,6,7,0,1,2], target = 0
Output: 4
Example 2:
Input: nums = [4,5,6,7,0,1,2], target = 3
"""
def findValueSortedShiftedArray(nums, target):
n = len(nums)
pivot = findPivot(nums, 0, n-1)
if pivot == -1:
return binarySearch(nums, 0, n-1, target)
if nums[pivot] == target:
return pivot
if nums[0] <= target:
return binarySearch(nums, 0, pivot-1, target)
return binarySearch(nums, pivot + 1, n-1, target)
def findPivot(nums, min, max):
min, max = 0, len(nums)
if max < min:
return -1
if max == min:
return min
mid = int((min + max) / 2)
if mid < max and nums[mid] > nums[mid + 1]:
return mid
if mid > min and nums[mid] < nums[mid - 1]:
return (mid - 1)
if nums[min] >= nums[mid]:
return findPivot(nums, mid + 1, max)
def binarySearch(nums, min, max, target):
if max < min:
return -1
mid = int((min + max) / 2)
if target == nums[mid]:
return mid
if target > nums[mid]:
return binarySearch(nums, (mid + 1), max, target)
return binarySearch(nums, min, (mid - 1), target) | 25.730159 | 91 | 0.58359 |
a31d402b111d9ee652386e79f628f7e0ddffa959 | 987 | py | Python | utility.py | Forthyse/Forsythe-Bot | c8871b1fde456403d951a9dde13dddaca2d3f67b | [
"MIT"
] | 3 | 2021-01-18T22:10:05.000Z | 2022-01-07T21:46:34.000Z | utility.py | Forthyse/Forsythe-Bot | c8871b1fde456403d951a9dde13dddaca2d3f67b | [
"MIT"
] | null | null | null | utility.py | Forthyse/Forsythe-Bot | c8871b1fde456403d951a9dde13dddaca2d3f67b | [
"MIT"
] | 2 | 2020-10-21T01:27:34.000Z | 2021-01-02T23:51:02.000Z | import discord
from discord.ext import commands
class utility(commands.Cog):
def __init__(self, client):
self.client = client
@commands.guild_only()
@commands.command(name = "avatar", aliases = ["av", "pic"])
async def avatar(self, ctx, user: discord.User=None):
if user is None:
user = ctx.author
embed = discord.Embed(color=000000, title=f'{user.name}#{user.discriminator}')
embed.set_image(url=user.avatar_url)
await ctx.send(embed=embed)
@commands.guild_only()
@commands.command(name="ping")
@commands.cooldown(2, 3, commands.BucketType.user)
async def ping(self, ctx):
pinging = await ctx.send('Pinging...')
diff = pinging.created_at - ctx.message.created_at
await pinging.edit(content=f'Pong! Latency: {round(diff.total_seconds()*1000)}ms | Websocket: {round(self.client.latency*1000)}ms')
def setup(client):
client.add_cog(utility(client)) | 37.961538 | 140 | 0.64843 |
a31f5b674099dd26d6054dab2dbff6ca679ee640 | 8,215 | py | Python | torch/ao/quantization/fx/fusion_patterns.py | li-ang/pytorch | 17f3179d607b9a2eac5efdfc36673e89f70e6628 | [
"Intel"
] | 1 | 2022-02-15T07:07:31.000Z | 2022-02-15T07:07:31.000Z | torch/ao/quantization/fx/fusion_patterns.py | xiaozhoushi/pytorch | 7dba88dfdb414def252531027658afe60409291d | [
"Intel"
] | null | null | null | torch/ao/quantization/fx/fusion_patterns.py | xiaozhoushi/pytorch | 7dba88dfdb414def252531027658afe60409291d | [
"Intel"
] | null | null | null | import torch
from torch.fx.graph import Node
from .pattern_utils import (
register_fusion_pattern,
)
from .utils import _parent_name
from .quantization_types import QuantizerCls, NodePattern, Pattern
from ..fuser_method_mappings import get_fuser_method
from ..fuser_method_mappings import get_fuser_method_new
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, Optional, Union
from .match_utils import MatchAllNode
# ----------------------------
# Fusion Pattern Registrations
# ----------------------------
# Base Pattern Handler
class FuseHandler(ABC):
""" Base handler class for the fusion patterns
"""
def __init__(self, quantizer: QuantizerCls, node: Node):
pass
@abstractmethod
def fuse(self,
quantizer: QuantizerCls,
load_arg: Callable,
root_node: Node,
matched_node_pattern: NodePattern,
fuse_custom_config_dict: Dict[str, Any],
fuser_method_mapping: Optional[Dict[Pattern, Union[torch.nn.Sequential, Callable]]]) -> Node:
pass
@register_fusion_pattern((torch.nn.ReLU, torch.nn.Conv1d))
@register_fusion_pattern((torch.nn.ReLU, torch.nn.Conv2d))
@register_fusion_pattern((torch.nn.ReLU, torch.nn.Conv3d))
@register_fusion_pattern((torch.nn.functional.relu, torch.nn.Conv1d))
@register_fusion_pattern((torch.nn.functional.relu, torch.nn.Conv2d))
@register_fusion_pattern((torch.nn.functional.relu, torch.nn.Conv3d))
@register_fusion_pattern((torch.nn.BatchNorm1d, torch.nn.Conv1d))
@register_fusion_pattern((torch.nn.BatchNorm2d, torch.nn.Conv2d))
@register_fusion_pattern((torch.nn.BatchNorm3d, torch.nn.Conv3d))
@register_fusion_pattern((torch.nn.ReLU, (torch.nn.BatchNorm1d, torch.nn.Conv1d)))
@register_fusion_pattern((torch.nn.ReLU, (torch.nn.BatchNorm2d, torch.nn.Conv2d)))
@register_fusion_pattern((torch.nn.ReLU, (torch.nn.BatchNorm3d, torch.nn.Conv3d)))
@register_fusion_pattern((torch.nn.functional.relu, (torch.nn.BatchNorm1d, torch.nn.Conv1d)))
@register_fusion_pattern((torch.nn.functional.relu, (torch.nn.BatchNorm2d, torch.nn.Conv2d)))
@register_fusion_pattern((torch.nn.functional.relu, (torch.nn.BatchNorm3d, torch.nn.Conv3d)))
@register_fusion_pattern((torch.nn.BatchNorm1d, torch.nn.Linear))
class ConvOrLinearBNReLUFusion(FuseHandler):
def __init__(self, quantizer: QuantizerCls, node: Node):
super().__init__(quantizer, node)
self.relu_node = None
self.bn_node = None
if (node.op == 'call_function' and node.target is torch.nn.functional.relu) or \
(node.op == 'call_module' and type(quantizer.modules[node.target]) == torch.nn.ReLU):
self.relu_node = node
assert isinstance(node.args[0], Node)
node = node.args[0]
assert node.op == 'call_module'
if type(quantizer.modules[node.target]) in [torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d]:
self.bn_node = node
self.bn = quantizer.modules[self.bn_node.target]
assert isinstance(node.args[0], Node)
node = node.args[0]
assert node.op == 'call_module'
self.conv_or_linear_node = node
self.conv_or_linear = quantizer.modules[self.conv_or_linear_node.target]
def fuse(self,
quantizer: QuantizerCls,
load_arg: Callable,
root_node: Node,
matched_node_pattern: NodePattern,
fuse_custom_config_dict: Dict[str, Any],
fuser_method_mapping: Optional[Dict[Pattern, Union[torch.nn.Sequential, Callable]]]) -> Node:
additional_fuser_method_mapping = fuse_custom_config_dict.get("additional_fuser_method_mapping", {})
op_list = []
if self.relu_node is not None:
# since relu can be used multiple times, we'll need to create a relu module for each match
if self.relu_node.op == 'call_module':
relu = torch.nn.ReLU(quantizer.modules[self.relu_node.target].inplace)
else:
# TODO: get inplace argument from functional
relu = torch.nn.ReLU()
op_list.append(relu)
relu.training = self.conv_or_linear.training
if self.bn_node is not None:
op_list.append(self.bn)
op_list.append(self.conv_or_linear)
else:
assert self.bn_node is not None
op_list.append(self.bn)
op_list.append(self.conv_or_linear)
# the modules are added in order of relu - bn - conv_or_linear
# so we need to correct it
op_list.reverse()
op_type_list = tuple(type(m) for m in op_list)
conv_or_linear_parent_name, conv_or_linear_name = _parent_name(self.conv_or_linear_node.target)
fuser_method = get_fuser_method(op_type_list, additional_fuser_method_mapping)
if fuser_method is None:
raise NotImplementedError("Cannot fuse modules: {}".format(op_type_list))
fused = fuser_method(*op_list)
setattr(quantizer.modules[conv_or_linear_parent_name], conv_or_linear_name, fused)
# TODO: do we need to make sure bn is only used once?
if self.bn_node is not None:
parent_name, name = _parent_name(self.bn_node.target)
setattr(quantizer.modules[parent_name], name, torch.nn.Identity())
# relu may be used multiple times, so we don't set relu to identity
return quantizer.fused_graph.node_copy(self.conv_or_linear_node, load_arg)
@register_fusion_pattern((torch.nn.functional.relu, torch.nn.Linear))
@register_fusion_pattern((torch.nn.ReLU, torch.nn.Linear))
@register_fusion_pattern((torch.nn.functional.relu, torch.nn.BatchNorm2d))
@register_fusion_pattern((torch.nn.ReLU, torch.nn.BatchNorm2d))
@register_fusion_pattern((torch.nn.functional.relu, torch.nn.BatchNorm3d))
@register_fusion_pattern((torch.nn.ReLU, torch.nn.BatchNorm3d))
class ModuleReLUFusion(FuseHandler):
def __init__(
self,
quantizer: QuantizerCls,
node: Node):
super().__init__(quantizer, node)
self.relu_node = node
assert isinstance(node.args[0], Node)
node = node.args[0]
assert node.op == 'call_module'
self.module_node = node
self.module = quantizer.modules[self.module_node.target]
def fuse(self, quantizer: QuantizerCls,
load_arg: Callable,
root_node: Node,
matched_node_pattern: NodePattern,
fuse_custom_config_dict: Dict[str, Any],
fuser_method_mapping: Optional[Dict[Pattern, Union[torch.nn.Sequential, Callable]]]) -> Node:
additional_fuser_method_mapping = fuse_custom_config_dict.get("additional_fuser_method_mapping", {})
assert root_node.op == "call_module", "Expecting module node to be a call_module Node"
root_module = quantizer.modules[root_node.target]
assert len(additional_fuser_method_mapping) == 0, "Fusion implementation is "
"undergoing changes, additoinal_fuser_method_mapping is not supported currently."
def get_module(n):
if n.op == "call_module":
return quantizer.modules[n.target]
elif n.op == "call_function" and n.target == torch.nn.functional.relu:
relu = torch.nn.ReLU()
relu.training = root_module.training
return relu
return MatchAllNode
matched_modules = tuple(map(get_module, matched_node_pattern))
# since relu can be used multiple times, we'll need to create a relu module for each match
def get_type(m):
return type(m)
matched_module_types = tuple(map(get_type, matched_modules))
module_parent_name, module_name = _parent_name(root_node.target)
fuser_method = get_fuser_method_new(matched_module_types, fuser_method_mapping)
# TODO: change the signature for fuser_method to take matched module patterns
# as input
fused_module = fuser_method(*matched_modules)
setattr(quantizer.modules[module_parent_name], module_name, fused_module)
return quantizer.fused_graph.node_copy(root_node, load_arg)
| 48.89881 | 118 | 0.684967 |
a31fadf9b33e9208ee29c713435331b8514e5684 | 9,786 | py | Python | derender/networks.py | tonyman1008/RADAR | b2fc944230c2fd445528a9827eea42e1a94957b8 | [
"CC0-1.0"
] | 38 | 2021-08-19T18:07:49.000Z | 2022-02-28T10:41:29.000Z | derender/networks.py | tonyman1008/RADAR | b2fc944230c2fd445528a9827eea42e1a94957b8 | [
"CC0-1.0"
] | 1 | 2021-10-30T14:43:18.000Z | 2021-11-13T01:18:53.000Z | derender/networks.py | tonyman1008/RADAR | b2fc944230c2fd445528a9827eea42e1a94957b8 | [
"CC0-1.0"
] | 5 | 2021-08-20T05:12:42.000Z | 2022-01-13T06:14:27.000Z | import numpy as np
import torch
import torch.nn as nn
import torchvision
EPS = 1e-7
class Encoder(nn.Module):
def __init__(self, cin, cout, in_size=64, nf=64, activation=nn.Tanh):
super(Encoder, self).__init__()
network = [
nn.Conv2d(cin, nf, kernel_size=4, stride=2, padding=1, bias=False), # 64x64 -> 32x32
nn.ReLU(inplace=True),
nn.Conv2d(nf, nf*2, kernel_size=4, stride=2, padding=1, bias=False), # 32x32 -> 16x16
nn.ReLU(inplace=True),
nn.Conv2d(nf*2, nf*4, kernel_size=4, stride=2, padding=1, bias=False), # 16x16 -> 8x8
nn.ReLU(inplace=True),
nn.Conv2d(nf*4, nf*8, kernel_size=4, stride=2, padding=1, bias=False), # 8x8 -> 4x4
nn.ReLU(inplace=True),
]
add_downsample = int(np.log2(in_size//64))
if add_downsample > 0:
for _ in range(add_downsample):
network += [
nn.Conv2d(nf*8, nf*8, kernel_size=4, stride=2, padding=1, bias=False), # 8x8 -> 4x4
nn.ReLU(inplace=True),
]
network += [
nn.Conv2d(nf*8, nf*8, kernel_size=4, stride=1, padding=0, bias=False), # 4x4 -> 1x1
nn.ReLU(inplace=True),
nn.Conv2d(nf*8, cout, kernel_size=1, stride=1, padding=0, bias=False)
]
if activation is not None:
network += [activation()]
self.network = nn.Sequential(*network)
def forward(self, input):
return self.network(input).reshape(input.size(0),-1)
class SoRNet(nn.Module):
def __init__(self, cin, cout2=5, in_size=64, out_size=32, zdim=128, nf=64, activation=nn.Tanh):
super(SoRNet, self).__init__()
encoder = [
nn.Conv2d(cin, nf, kernel_size=4, stride=2, padding=1, bias=False), # 64x64 -> 32x32
nn.ReLU(inplace=True),
nn.Conv2d(nf, nf*2, kernel_size=4, stride=2, padding=1, bias=False), # 32x32 -> 16x16
nn.ReLU(inplace=True),
nn.Conv2d(nf*2, nf*4, kernel_size=4, stride=2, padding=1, bias=False), # 16x16 -> 8x8
nn.ReLU(inplace=True),
nn.Conv2d(nf*4, nf*8, kernel_size=4, stride=2, padding=1, bias=False), # 8x8 -> 4x4
nn.ReLU(inplace=True),
]
add_downsample = int(np.log2(in_size//64))
if add_downsample > 0:
for _ in range(add_downsample):
encoder += [
nn.Conv2d(nf*8, nf*8, kernel_size=4, stride=2, padding=1, bias=False), # 8x8 -> 4x4
nn.ReLU(inplace=True),
]
encoder += [
nn.Conv2d(nf*8, nf*8, kernel_size=4, stride=1, padding=0, bias=False), # 4x4 -> 1x1
nn.ReLU(inplace=True),
nn.Conv2d(nf*8, zdim, kernel_size=1, stride=1, padding=0, bias=False),
nn.ReLU(inplace=True),
]
self.encoder = nn.Sequential(*encoder)
out_net1 = []
add_upsample = int(np.log2(out_size//2))
if add_upsample > 0:
for _ in range(add_upsample):
out_net1 += [
nn.Upsample(scale_factor=(2,1), mode='nearest'), # 1x1 -> 2x1
nn.Conv2d(zdim, zdim, kernel_size=(3,1), stride=(1,1), padding=(1,0), bias=False, padding_mode='replicate'),
nn.ReLU(inplace=True),
]
out_net1 += [
nn.Upsample(scale_factor=(2,1), mode='nearest'), # 16x1 -> 32x1
nn.Conv2d(zdim, 1, kernel_size=(3,1), stride=(1,1), padding=(1,0), bias=False, padding_mode='replicate'),
]
if activation is not None:
out_net1 += [activation()]
self.out_net1 = nn.Sequential(*out_net1)
out_net2 = [
nn.Linear(zdim, zdim),
nn.ReLU(inplace=True),
nn.Linear(zdim, cout2),
nn.Sigmoid(),
# nn.Tanh(),
]
self.out_net2 = nn.Sequential(*out_net2)
def forward(self, input):
z = self.encoder(input)
out1 = self.out_net1(z).view(input.size(0), -1)
out2 = self.out_net2(z.view(input.size(0), -1)) # /2+0.5
return out1, out2
class EnvMapNet(nn.Module):
def __init__(self, cin, cout, cout2=None, in_size=64, out_size=16, zdim=128, nf=64, activation=nn.Tanh):
super(EnvMapNet, self).__init__()
## downsampling
encoder = [
nn.Conv2d(cin, nf, kernel_size=4, stride=2, padding=1, bias=False), # 64x64 -> 32x32
nn.GroupNorm(16, nf),
# nn.BatchNorm2d(nf),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(nf, nf*2, kernel_size=4, stride=2, padding=1, bias=False), # 32x32 -> 16x16
nn.GroupNorm(16*2, nf*2),
# nn.BatchNorm2d(nf*2),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(nf*2, nf*4, kernel_size=4, stride=2, padding=1, bias=False), # 16x16 -> 8x8
nn.GroupNorm(16*4, nf*4),
# nn.BatchNorm2d(nf*4),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(nf*4, nf*8, kernel_size=4, stride=2, padding=1, bias=False), # 16x16 -> 8x8
nn.GroupNorm(16*8, nf*8),
# nn.BatchNorm2d(nf*4),
nn.LeakyReLU(0.2, inplace=True),
]
add_downsample = int(np.log2(in_size//128))
if add_downsample > 0:
for _ in range(add_downsample):
encoder += [
nn.Conv2d(nf*8, nf*8, kernel_size=4, stride=2, padding=1, bias=False), # 16x16 -> 8x8
nn.GroupNorm(16*8, nf*8),
# nn.BatchNorm2d(nf*8),
nn.LeakyReLU(0.2, inplace=True),
]
encoder += [
nn.Conv2d(nf*8, nf*8, kernel_size=4, stride=2, padding=1, bias=False), # 8x8 -> 4x4
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(nf*8, zdim, kernel_size=4, stride=1, padding=0, bias=False), # 4x4 -> 1x1
nn.ReLU(inplace=True)
]
self.encoder = nn.Sequential(*encoder)
## upsampling
decoder_envmap = [
nn.ConvTranspose2d(zdim, nf*8, kernel_size=(2,6), stride=1, padding=0, bias=False), # 1x1 -> 4x4
nn.ReLU(inplace=True),
]
add_upsample = int(np.log2(out_size//16))
if add_upsample > 0:
for _ in range(add_upsample):
decoder_envmap += [
nn.Upsample(scale_factor=2, mode='nearest'),
nn.Conv2d(nf*8, nf*8, kernel_size=3, stride=1, padding=1, bias=False, padding_mode='replicate'),
nn.GroupNorm(16*8, nf*8),
nn.ReLU(inplace=True),
]
decoder_envmap += [
nn.Upsample(scale_factor=2, mode='nearest'),
nn.Conv2d(nf*8, nf*4, kernel_size=3, stride=1, padding=1, bias=False, padding_mode='replicate'),
nn.GroupNorm(16*4, nf*4),
nn.ReLU(inplace=True),
nn.Upsample(scale_factor=2, mode='nearest'),
nn.Conv2d(nf*4, nf*2, kernel_size=3, stride=1, padding=1, bias=False, padding_mode='replicate'),
nn.GroupNorm(16*2, nf*2),
nn.ReLU(inplace=True),
nn.Upsample(scale_factor=2, mode='nearest'),
nn.Conv2d(nf*2, nf, kernel_size=3, stride=1, padding=1, bias=False, padding_mode='replicate'),
nn.GroupNorm(16, nf),
nn.ReLU(inplace=True),
nn.Conv2d(nf, cout, kernel_size=5, stride=1, padding=2, bias=False, padding_mode='replicate')
]
self.decoder_envmap = nn.Sequential(*decoder_envmap)
if activation is not None:
self.act = activation()
else:
self.act = None
if cout2 is not None:
decoder_light_param = [
nn.Linear(zdim, zdim),
nn.ReLU(inplace=True),
nn.Linear(zdim, cout2),
nn.Sigmoid()
]
self.decoder_light_param = nn.Sequential(*decoder_light_param)
else:
self.decoder_light_param = None
def forward(self, input):
z = self.encoder(input)
env_map = self.decoder_envmap(z)
env_map = env_map - 2 # initial sigmoid(-2)
# env_map = env_map - 3 # initial sigmoid(-3), for 32x96 env_map
if self.act is not None:
env_map = self.act(env_map)
if self.decoder_light_param is not None:
light_param = self.decoder_light_param(z.view(*z.shape[:2]))
return env_map, light_param
else:
return env_map
class DiscNet(nn.Module):
def __init__(self, cin, cout, nf=64, norm=nn.InstanceNorm2d, activation=None):
super(DiscNet, self).__init__()
network = [
nn.Conv2d(cin, nf, kernel_size=4, stride=2, padding=1, bias=False), # 64x64 -> 32x32
norm(nf),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(nf, nf*2, kernel_size=4, stride=2, padding=1, bias=False), # 32x32 -> 16x16
norm(nf*2),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(nf*2, nf*4, kernel_size=4, stride=2, padding=1, bias=False), # 16x16 -> 8x8
norm(nf*4),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(nf*4, nf*8, kernel_size=4, stride=2, padding=1, bias=False), # 8x8 -> 4x4
# norm(nf*8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(nf*8, cout, kernel_size=4, stride=1, padding=0, bias=False), # 4x4 -> 1x1
]
if activation is not None:
network += [activation()]
self.network = nn.Sequential(*network)
def forward(self, input):
return self.network(input).reshape(input.size(0),-1)
| 41.466102 | 128 | 0.54251 |
a31fd12f7a47de59f2c6b4b5f49ad1fb2f1eaf44 | 244 | py | Python | app/generator-input.py | consoles/dsa4js | 83a064c3485e33bf3f0c70a10167b8a5bc08e10e | [
"MIT"
] | 2 | 2018-01-19T08:16:13.000Z | 2019-08-15T12:26:08.000Z | app/generator-input.py | consoles/dsa4js | 83a064c3485e33bf3f0c70a10167b8a5bc08e10e | [
"MIT"
] | null | null | null | app/generator-input.py | consoles/dsa4js | 83a064c3485e33bf3f0c70a10167b8a5bc08e10e | [
"MIT"
] | 1 | 2019-08-15T12:26:11.000Z | 2019-08-15T12:26:11.000Z | #!/usr/bin python
#coding:utf-8
#
# 生成10^7个乱序整数
import random
RANGE = 10000000
f = open('../test/input/bitSort.input','w')
for i in random.sample(range(RANGE),RANGE):
f.write(str(i) + '\n')
f.close()
print 'generator input file success!' | 15.25 | 43 | 0.672131 |
a323da1e6144f951fab0d4c366a9e8d27bf93ca5 | 46,478 | py | Python | sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2020_09_01_dataplanepreview/models/_models.py | dubiety/azure-sdk-for-python | 62ffa839f5d753594cf0fe63668f454a9d87a346 | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2020_09_01_dataplanepreview/models/_models.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2020_09_01_dataplanepreview/models/_models.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class AssetJobInput(msrest.serialization.Model):
"""Asset input type.
All required parameters must be populated in order to send to Azure.
:ivar mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:ivar uri: Required. Input Asset URI.
:vartype uri: str
"""
_validation = {
'uri': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:keyword uri: Required. Input Asset URI.
:paramtype uri: str
"""
super(AssetJobInput, self).__init__(**kwargs)
self.mode = kwargs.get('mode', None)
self.uri = kwargs['uri']
class AssetJobOutput(msrest.serialization.Model):
"""Asset output type.
:ivar mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:ivar uri: Output Asset URI. This will have a default value of
"azureml/{jobId}/{outputFolder}/{outputFileName}" if omitted.
:vartype uri: str
"""
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:keyword uri: Output Asset URI. This will have a default value of
"azureml/{jobId}/{outputFolder}/{outputFileName}" if omitted.
:paramtype uri: str
"""
super(AssetJobOutput, self).__init__(**kwargs)
self.mode = kwargs.get('mode', None)
self.uri = kwargs.get('uri', None)
class BatchJob(msrest.serialization.Model):
"""Batch endpoint job.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar compute: Compute configuration used to set instance count.
:vartype compute: ~azure.mgmt.machinelearningservices.models.ComputeConfiguration
:ivar dataset: Input dataset
This will be deprecated. Use InputData instead.
:vartype dataset: ~azure.mgmt.machinelearningservices.models.InferenceDataInputBase
:ivar description: The asset description text.
:vartype description: str
:ivar error_threshold: Error threshold, if the error count for the entire input goes above this
value,
the batch inference will be aborted. Range is [-1, int.MaxValue]
-1 value indicates, ignore all failures during batch inference.
:vartype error_threshold: int
:ivar input_data: Input data for the job.
:vartype input_data: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
:ivar interaction_endpoints: Dictonary of endpoint URIs, keyed by enumerated job endpoints.
:vartype interaction_endpoints: dict[str,
~azure.mgmt.machinelearningservices.models.JobEndpoint]
:ivar logging_level: Logging level for batch inference operation. Possible values include:
"Info", "Warning", "Debug".
:vartype logging_level: str or ~azure.mgmt.machinelearningservices.models.BatchLoggingLevel
:ivar max_concurrency_per_instance: Indicates maximum number of parallelism per instance.
:vartype max_concurrency_per_instance: int
:ivar mini_batch_size: Size of the mini-batch passed to each batch invocation.
For FileDataset, this is the number of files per mini-batch.
For TabularDataset, this is the size of the records in bytes, per mini-batch.
:vartype mini_batch_size: long
:ivar name:
:vartype name: str
:ivar output: Location of the job output logs and artifacts.
:vartype output: ~azure.mgmt.machinelearningservices.models.JobOutputArtifacts
:ivar output_data: Job output data location
Optional parameter: when not specified, the default location is
workspaceblobstore location.
:vartype output_data: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutputV2]
:ivar output_dataset: Output dataset location
Optional parameter: when not specified, the default location is
workspaceblobstore location.
This will be deprecated. Use OutputData instead.
:vartype output_dataset: ~azure.mgmt.machinelearningservices.models.DataVersion
:ivar output_file_name: Output file name.
:vartype output_file_name: str
:ivar partition_keys: Partition keys list used for Named partitioning.
:vartype partition_keys: list[str]
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar provisioning_state: Possible values include: "Succeeded", "Failed", "Canceled",
"InProgress".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.JobProvisioningState
:ivar retry_settings: Retry Settings for the batch inference operation.
:vartype retry_settings: ~azure.mgmt.machinelearningservices.models.BatchRetrySettings
:ivar status: Status of the job. Possible values include: "NotStarted", "Starting",
"Provisioning", "Preparing", "Queued", "Running", "Finalizing", "CancelRequested", "Completed",
"Failed", "Canceled", "NotResponding", "Paused", "Unknown".
:vartype status: str or ~azure.mgmt.machinelearningservices.models.JobStatus
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
"""
_validation = {
'interaction_endpoints': {'readonly': True},
'output': {'readonly': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'compute': {'key': 'compute', 'type': 'ComputeConfiguration'},
'dataset': {'key': 'dataset', 'type': 'InferenceDataInputBase'},
'description': {'key': 'description', 'type': 'str'},
'error_threshold': {'key': 'errorThreshold', 'type': 'int'},
'input_data': {'key': 'inputData', 'type': '{JobInput}'},
'interaction_endpoints': {'key': 'interactionEndpoints', 'type': '{JobEndpoint}'},
'logging_level': {'key': 'loggingLevel', 'type': 'str'},
'max_concurrency_per_instance': {'key': 'maxConcurrencyPerInstance', 'type': 'int'},
'mini_batch_size': {'key': 'miniBatchSize', 'type': 'long'},
'name': {'key': 'name', 'type': 'str'},
'output': {'key': 'output', 'type': 'JobOutputArtifacts'},
'output_data': {'key': 'outputData', 'type': '{JobOutputV2}'},
'output_dataset': {'key': 'outputDataset', 'type': 'DataVersion'},
'output_file_name': {'key': 'outputFileName', 'type': 'str'},
'partition_keys': {'key': 'partitionKeys', 'type': '[str]'},
'properties': {'key': 'properties', 'type': '{str}'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'retry_settings': {'key': 'retrySettings', 'type': 'BatchRetrySettings'},
'status': {'key': 'status', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
"""
:keyword compute: Compute configuration used to set instance count.
:paramtype compute: ~azure.mgmt.machinelearningservices.models.ComputeConfiguration
:keyword dataset: Input dataset
This will be deprecated. Use InputData instead.
:paramtype dataset: ~azure.mgmt.machinelearningservices.models.InferenceDataInputBase
:keyword description: The asset description text.
:paramtype description: str
:keyword error_threshold: Error threshold, if the error count for the entire input goes above
this value,
the batch inference will be aborted. Range is [-1, int.MaxValue]
-1 value indicates, ignore all failures during batch inference.
:paramtype error_threshold: int
:keyword input_data: Input data for the job.
:paramtype input_data: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
:keyword logging_level: Logging level for batch inference operation. Possible values include:
"Info", "Warning", "Debug".
:paramtype logging_level: str or ~azure.mgmt.machinelearningservices.models.BatchLoggingLevel
:keyword max_concurrency_per_instance: Indicates maximum number of parallelism per instance.
:paramtype max_concurrency_per_instance: int
:keyword mini_batch_size: Size of the mini-batch passed to each batch invocation.
For FileDataset, this is the number of files per mini-batch.
For TabularDataset, this is the size of the records in bytes, per mini-batch.
:paramtype mini_batch_size: long
:keyword name:
:paramtype name: str
:keyword output_data: Job output data location
Optional parameter: when not specified, the default location is
workspaceblobstore location.
:paramtype output_data: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutputV2]
:keyword output_dataset: Output dataset location
Optional parameter: when not specified, the default location is
workspaceblobstore location.
This will be deprecated. Use OutputData instead.
:paramtype output_dataset: ~azure.mgmt.machinelearningservices.models.DataVersion
:keyword output_file_name: Output file name.
:paramtype output_file_name: str
:keyword partition_keys: Partition keys list used for Named partitioning.
:paramtype partition_keys: list[str]
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword retry_settings: Retry Settings for the batch inference operation.
:paramtype retry_settings: ~azure.mgmt.machinelearningservices.models.BatchRetrySettings
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
"""
super(BatchJob, self).__init__(**kwargs)
self.compute = kwargs.get('compute', None)
self.dataset = kwargs.get('dataset', None)
self.description = kwargs.get('description', None)
self.error_threshold = kwargs.get('error_threshold', None)
self.input_data = kwargs.get('input_data', None)
self.interaction_endpoints = None
self.logging_level = kwargs.get('logging_level', None)
self.max_concurrency_per_instance = kwargs.get('max_concurrency_per_instance', None)
self.mini_batch_size = kwargs.get('mini_batch_size', None)
self.name = kwargs.get('name', None)
self.output = None
self.output_data = kwargs.get('output_data', None)
self.output_dataset = kwargs.get('output_dataset', None)
self.output_file_name = kwargs.get('output_file_name', None)
self.partition_keys = kwargs.get('partition_keys', None)
self.properties = kwargs.get('properties', None)
self.provisioning_state = None
self.retry_settings = kwargs.get('retry_settings', None)
self.status = None
self.tags = kwargs.get('tags', None)
class Resource(msrest.serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class BatchJobResource(Resource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar properties: Required. [Required] Additional attributes of the entity.
:vartype properties: ~azure.mgmt.machinelearningservices.models.BatchJob
:ivar system_data: System data associated with resource provider.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'properties': {'required': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'BatchJob'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
**kwargs
):
"""
:keyword properties: Required. [Required] Additional attributes of the entity.
:paramtype properties: ~azure.mgmt.machinelearningservices.models.BatchJob
"""
super(BatchJobResource, self).__init__(**kwargs)
self.properties = kwargs['properties']
self.system_data = None
class BatchJobResourceArmPaginatedResult(msrest.serialization.Model):
"""A paginated list of BatchJob entities.
:ivar next_link: The link to the next page of BatchJob objects. If null, there are no
additional pages.
:vartype next_link: str
:ivar value: An array of objects of type BatchJob.
:vartype value: list[~azure.mgmt.machinelearningservices.models.BatchJobResource]
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[BatchJobResource]'},
}
def __init__(
self,
**kwargs
):
"""
:keyword next_link: The link to the next page of BatchJob objects. If null, there are no
additional pages.
:paramtype next_link: str
:keyword value: An array of objects of type BatchJob.
:paramtype value: list[~azure.mgmt.machinelearningservices.models.BatchJobResource]
"""
super(BatchJobResourceArmPaginatedResult, self).__init__(**kwargs)
self.next_link = kwargs.get('next_link', None)
self.value = kwargs.get('value', None)
class BatchRetrySettings(msrest.serialization.Model):
"""Retry settings for a batch inference operation.
:ivar max_retries: Maximum retry count for a mini-batch.
:vartype max_retries: int
:ivar timeout: Invocation timeout for a mini-batch, in ISO 8601 format.
:vartype timeout: ~datetime.timedelta
"""
_attribute_map = {
'max_retries': {'key': 'maxRetries', 'type': 'int'},
'timeout': {'key': 'timeout', 'type': 'duration'},
}
def __init__(
self,
**kwargs
):
"""
:keyword max_retries: Maximum retry count for a mini-batch.
:paramtype max_retries: int
:keyword timeout: Invocation timeout for a mini-batch, in ISO 8601 format.
:paramtype timeout: ~datetime.timedelta
"""
super(BatchRetrySettings, self).__init__(**kwargs)
self.max_retries = kwargs.get('max_retries', None)
self.timeout = kwargs.get('timeout', None)
class ComputeConfiguration(msrest.serialization.Model):
"""Configuration for compute binding.
:ivar instance_count: Number of instances or nodes.
:vartype instance_count: int
:ivar instance_type: SKU type to run on.
:vartype instance_type: str
:ivar is_local: Set to true for jobs running on local compute.
:vartype is_local: bool
:ivar location: Location for virtual cluster run.
:vartype location: str
:ivar properties: Additional properties.
:vartype properties: dict[str, str]
:ivar target: ARM resource ID of the Compute you are targeting. If not provided the resource
will be deployed as Managed.
:vartype target: str
"""
_attribute_map = {
'instance_count': {'key': 'instanceCount', 'type': 'int'},
'instance_type': {'key': 'instanceType', 'type': 'str'},
'is_local': {'key': 'isLocal', 'type': 'bool'},
'location': {'key': 'location', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'target': {'key': 'target', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword instance_count: Number of instances or nodes.
:paramtype instance_count: int
:keyword instance_type: SKU type to run on.
:paramtype instance_type: str
:keyword is_local: Set to true for jobs running on local compute.
:paramtype is_local: bool
:keyword location: Location for virtual cluster run.
:paramtype location: str
:keyword properties: Additional properties.
:paramtype properties: dict[str, str]
:keyword target: ARM resource ID of the Compute you are targeting. If not provided the resource
will be deployed as Managed.
:paramtype target: str
"""
super(ComputeConfiguration, self).__init__(**kwargs)
self.instance_count = kwargs.get('instance_count', None)
self.instance_type = kwargs.get('instance_type', None)
self.is_local = kwargs.get('is_local', None)
self.location = kwargs.get('location', None)
self.properties = kwargs.get('properties', None)
self.target = kwargs.get('target', None)
class DataVersion(msrest.serialization.Model):
"""Data asset version details.
All required parameters must be populated in order to send to Azure.
:ivar dataset_type: The Format of dataset. Possible values include: "Simple", "Dataflow".
:vartype dataset_type: str or ~azure.mgmt.machinelearningservices.models.DatasetType
:ivar datastore_id: ARM resource ID of the datastore where the asset is located.
:vartype datastore_id: str
:ivar description: The asset description text.
:vartype description: str
:ivar is_anonymous: If the name version are system generated (anonymous registration).
:vartype is_anonymous: bool
:ivar path: Required. [Required] The path of the file/directory in the datastore.
:vartype path: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
"""
_validation = {
'path': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'dataset_type': {'key': 'datasetType', 'type': 'str'},
'datastore_id': {'key': 'datastoreId', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'is_anonymous': {'key': 'isAnonymous', 'type': 'bool'},
'path': {'key': 'path', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
"""
:keyword dataset_type: The Format of dataset. Possible values include: "Simple", "Dataflow".
:paramtype dataset_type: str or ~azure.mgmt.machinelearningservices.models.DatasetType
:keyword datastore_id: ARM resource ID of the datastore where the asset is located.
:paramtype datastore_id: str
:keyword description: The asset description text.
:paramtype description: str
:keyword is_anonymous: If the name version are system generated (anonymous registration).
:paramtype is_anonymous: bool
:keyword path: Required. [Required] The path of the file/directory in the datastore.
:paramtype path: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
"""
super(DataVersion, self).__init__(**kwargs)
self.dataset_type = kwargs.get('dataset_type', None)
self.datastore_id = kwargs.get('datastore_id', None)
self.description = kwargs.get('description', None)
self.is_anonymous = kwargs.get('is_anonymous', None)
self.path = kwargs['path']
self.properties = kwargs.get('properties', None)
self.tags = kwargs.get('tags', None)
class ErrorDetail(msrest.serialization.Model):
"""Error detail information.
All required parameters must be populated in order to send to Azure.
:ivar code: Required. Error code.
:vartype code: str
:ivar message: Required. Error message.
:vartype message: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword code: Required. Error code.
:paramtype code: str
:keyword message: Required. Error message.
:paramtype message: str
"""
super(ErrorDetail, self).__init__(**kwargs)
self.code = kwargs['code']
self.message = kwargs['message']
class ErrorResponse(msrest.serialization.Model):
"""Error response information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Error code.
:vartype code: str
:ivar message: Error message.
:vartype message: str
:ivar details: An array of error detail objects.
:vartype details: list[~azure.mgmt.machinelearningservices.models.ErrorDetail]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'details': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ErrorResponse, self).__init__(**kwargs)
self.code = None
self.message = None
self.details = None
class InferenceDataInputBase(msrest.serialization.Model):
"""InferenceDataInputBase.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: InferenceDataUrlInput, InferenceDatasetIdInput, InferenceDatasetInput.
All required parameters must be populated in order to send to Azure.
:ivar data_input_type: Required. Constant filled by server. Possible values include:
"DatasetVersion", "DatasetId", "DataUrl".
:vartype data_input_type: str or
~azure.mgmt.machinelearningservices.models.InferenceDataInputType
"""
_validation = {
'data_input_type': {'required': True},
}
_attribute_map = {
'data_input_type': {'key': 'dataInputType', 'type': 'str'},
}
_subtype_map = {
'data_input_type': {'DataUrl': 'InferenceDataUrlInput', 'DatasetId': 'InferenceDatasetIdInput', 'DatasetVersion': 'InferenceDatasetInput'}
}
def __init__(
self,
**kwargs
):
"""
"""
super(InferenceDataInputBase, self).__init__(**kwargs)
self.data_input_type = None # type: Optional[str]
class InferenceDatasetIdInput(InferenceDataInputBase):
"""InferenceDatasetIdInput.
All required parameters must be populated in order to send to Azure.
:ivar data_input_type: Required. Constant filled by server. Possible values include:
"DatasetVersion", "DatasetId", "DataUrl".
:vartype data_input_type: str or
~azure.mgmt.machinelearningservices.models.InferenceDataInputType
:ivar dataset_id: ARM ID of the input dataset.
:vartype dataset_id: str
"""
_validation = {
'data_input_type': {'required': True},
}
_attribute_map = {
'data_input_type': {'key': 'dataInputType', 'type': 'str'},
'dataset_id': {'key': 'datasetId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword dataset_id: ARM ID of the input dataset.
:paramtype dataset_id: str
"""
super(InferenceDatasetIdInput, self).__init__(**kwargs)
self.data_input_type = 'DatasetId' # type: str
self.dataset_id = kwargs.get('dataset_id', None)
class InferenceDatasetInput(InferenceDataInputBase):
"""InferenceDatasetInput.
All required parameters must be populated in order to send to Azure.
:ivar data_input_type: Required. Constant filled by server. Possible values include:
"DatasetVersion", "DatasetId", "DataUrl".
:vartype data_input_type: str or
~azure.mgmt.machinelearningservices.models.InferenceDataInputType
:ivar dataset_name: Name of the input dataset.
:vartype dataset_name: str
:ivar dataset_version: Version of the input dataset.
:vartype dataset_version: str
"""
_validation = {
'data_input_type': {'required': True},
}
_attribute_map = {
'data_input_type': {'key': 'dataInputType', 'type': 'str'},
'dataset_name': {'key': 'datasetName', 'type': 'str'},
'dataset_version': {'key': 'datasetVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword dataset_name: Name of the input dataset.
:paramtype dataset_name: str
:keyword dataset_version: Version of the input dataset.
:paramtype dataset_version: str
"""
super(InferenceDatasetInput, self).__init__(**kwargs)
self.data_input_type = 'DatasetVersion' # type: str
self.dataset_name = kwargs.get('dataset_name', None)
self.dataset_version = kwargs.get('dataset_version', None)
class InferenceDataUrlInput(InferenceDataInputBase):
"""InferenceDataUrlInput.
All required parameters must be populated in order to send to Azure.
:ivar data_input_type: Required. Constant filled by server. Possible values include:
"DatasetVersion", "DatasetId", "DataUrl".
:vartype data_input_type: str or
~azure.mgmt.machinelearningservices.models.InferenceDataInputType
:ivar path: Required. Asset path to the input data, say a blob URL.
:vartype path: str
"""
_validation = {
'data_input_type': {'required': True},
'path': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'data_input_type': {'key': 'dataInputType', 'type': 'str'},
'path': {'key': 'path', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword path: Required. Asset path to the input data, say a blob URL.
:paramtype path: str
"""
super(InferenceDataUrlInput, self).__init__(**kwargs)
self.data_input_type = 'DataUrl' # type: str
self.path = kwargs['path']
class JobEndpoint(msrest.serialization.Model):
"""Job endpoint definition.
:ivar endpoint: Url for endpoint.
:vartype endpoint: str
:ivar job_endpoint_type: Endpoint type.
:vartype job_endpoint_type: str
:ivar port: Port for endpoint.
:vartype port: int
:ivar properties: Additional properties to set on the endpoint.
:vartype properties: dict[str, str]
"""
_attribute_map = {
'endpoint': {'key': 'endpoint', 'type': 'str'},
'job_endpoint_type': {'key': 'jobEndpointType', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
'properties': {'key': 'properties', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
"""
:keyword endpoint: Url for endpoint.
:paramtype endpoint: str
:keyword job_endpoint_type: Endpoint type.
:paramtype job_endpoint_type: str
:keyword port: Port for endpoint.
:paramtype port: int
:keyword properties: Additional properties to set on the endpoint.
:paramtype properties: dict[str, str]
"""
super(JobEndpoint, self).__init__(**kwargs)
self.endpoint = kwargs.get('endpoint', None)
self.job_endpoint_type = kwargs.get('job_endpoint_type', None)
self.port = kwargs.get('port', None)
self.properties = kwargs.get('properties', None)
class JobInput(msrest.serialization.Model):
"""Job input definition.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: MLTableJobInput, UriFileJobInput, UriFolderJobInput.
All required parameters must be populated in order to send to Azure.
:ivar description: Description for the input.
:vartype description: str
:ivar job_input_type: Required. Specifies the type of job.Constant filled by server. Possible
values include: "UriFile", "UriFolder", "MLTable".
:vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
"""
_validation = {
'job_input_type': {'required': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'job_input_type': {'key': 'jobInputType', 'type': 'str'},
}
_subtype_map = {
'job_input_type': {'MLTable': 'MLTableJobInput', 'UriFile': 'UriFileJobInput', 'UriFolder': 'UriFolderJobInput'}
}
def __init__(
self,
**kwargs
):
"""
:keyword description: Description for the input.
:paramtype description: str
"""
super(JobInput, self).__init__(**kwargs)
self.description = kwargs.get('description', None)
self.job_input_type = None # type: Optional[str]
class JobOutputArtifacts(msrest.serialization.Model):
"""Job output definition container information on where to find job logs and artifacts.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar datastore_id: ARM ID of the datastore where the job logs and artifacts are stored.
:vartype datastore_id: str
:ivar path: Path within the datastore to the job logs and artifacts.
:vartype path: str
"""
_validation = {
'datastore_id': {'readonly': True},
'path': {'readonly': True},
}
_attribute_map = {
'datastore_id': {'key': 'datastoreId', 'type': 'str'},
'path': {'key': 'path', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(JobOutputArtifacts, self).__init__(**kwargs)
self.datastore_id = None
self.path = None
class JobOutputV2(msrest.serialization.Model):
"""Job output definition container information on where to find the job output.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: UriFileJobOutput.
All required parameters must be populated in order to send to Azure.
:ivar description: Description for the output.
:vartype description: str
:ivar job_output_type: Required. Specifies the type of job.Constant filled by server. Possible
values include: "UriFile".
:vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
"""
_validation = {
'job_output_type': {'required': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'job_output_type': {'key': 'jobOutputType', 'type': 'str'},
}
_subtype_map = {
'job_output_type': {'UriFile': 'UriFileJobOutput'}
}
def __init__(
self,
**kwargs
):
"""
:keyword description: Description for the output.
:paramtype description: str
"""
super(JobOutputV2, self).__init__(**kwargs)
self.description = kwargs.get('description', None)
self.job_output_type = None # type: Optional[str]
class LabelClass(msrest.serialization.Model):
"""Label class definition.
:ivar display_name: Display name of the label class.
:vartype display_name: str
:ivar subclasses: Dictionary of subclasses of the label class.
:vartype subclasses: dict[str, ~azure.mgmt.machinelearningservices.models.LabelClass]
"""
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'subclasses': {'key': 'subclasses', 'type': '{LabelClass}'},
}
def __init__(
self,
**kwargs
):
"""
:keyword display_name: Display name of the label class.
:paramtype display_name: str
:keyword subclasses: Dictionary of subclasses of the label class.
:paramtype subclasses: dict[str, ~azure.mgmt.machinelearningservices.models.LabelClass]
"""
super(LabelClass, self).__init__(**kwargs)
self.display_name = kwargs.get('display_name', None)
self.subclasses = kwargs.get('subclasses', None)
class MLTableJobInput(JobInput, AssetJobInput):
"""MLTableJobInput.
All required parameters must be populated in order to send to Azure.
:ivar mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:ivar uri: Required. Input Asset URI.
:vartype uri: str
:ivar description: Description for the input.
:vartype description: str
:ivar job_input_type: Required. Specifies the type of job.Constant filled by server. Possible
values include: "UriFile", "UriFolder", "MLTable".
:vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
"""
_validation = {
'uri': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'job_input_type': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'job_input_type': {'key': 'jobInputType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:keyword uri: Required. Input Asset URI.
:paramtype uri: str
:keyword description: Description for the input.
:paramtype description: str
"""
super(MLTableJobInput, self).__init__(**kwargs)
self.mode = kwargs.get('mode', None)
self.uri = kwargs['uri']
self.job_input_type = 'MLTable' # type: str
self.description = kwargs.get('description', None)
self.job_input_type = 'MLTable' # type: str
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:ivar created_by: The identity that created the resource.
:vartype created_by: str
:ivar created_by_type: The type of identity that created the resource. Possible values include:
"User", "Application", "ManagedIdentity", "Key".
:vartype created_by_type: str or ~azure.mgmt.machinelearningservices.models.CreatedByType
:ivar created_at: The timestamp of resource creation (UTC).
:vartype created_at: ~datetime.datetime
:ivar last_modified_by: The identity that last modified the resource.
:vartype last_modified_by: str
:ivar last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:vartype last_modified_by_type: str or ~azure.mgmt.machinelearningservices.models.CreatedByType
:ivar last_modified_at: The timestamp of resource last modification (UTC).
:vartype last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
"""
:keyword created_by: The identity that created the resource.
:paramtype created_by: str
:keyword created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:paramtype created_by_type: str or ~azure.mgmt.machinelearningservices.models.CreatedByType
:keyword created_at: The timestamp of resource creation (UTC).
:paramtype created_at: ~datetime.datetime
:keyword last_modified_by: The identity that last modified the resource.
:paramtype last_modified_by: str
:keyword last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:paramtype last_modified_by_type: str or
~azure.mgmt.machinelearningservices.models.CreatedByType
:keyword last_modified_at: The timestamp of resource last modification (UTC).
:paramtype last_modified_at: ~datetime.datetime
"""
super(SystemData, self).__init__(**kwargs)
self.created_by = kwargs.get('created_by', None)
self.created_by_type = kwargs.get('created_by_type', None)
self.created_at = kwargs.get('created_at', None)
self.last_modified_by = kwargs.get('last_modified_by', None)
self.last_modified_by_type = kwargs.get('last_modified_by_type', None)
self.last_modified_at = kwargs.get('last_modified_at', None)
class UriFileJobInput(JobInput, AssetJobInput):
"""UriFileJobInput.
All required parameters must be populated in order to send to Azure.
:ivar mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:ivar uri: Required. Input Asset URI.
:vartype uri: str
:ivar description: Description for the input.
:vartype description: str
:ivar job_input_type: Required. Specifies the type of job.Constant filled by server. Possible
values include: "UriFile", "UriFolder", "MLTable".
:vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
"""
_validation = {
'uri': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'job_input_type': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'job_input_type': {'key': 'jobInputType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:keyword uri: Required. Input Asset URI.
:paramtype uri: str
:keyword description: Description for the input.
:paramtype description: str
"""
super(UriFileJobInput, self).__init__(**kwargs)
self.mode = kwargs.get('mode', None)
self.uri = kwargs['uri']
self.job_input_type = 'UriFile' # type: str
self.description = kwargs.get('description', None)
self.job_input_type = 'UriFile' # type: str
class UriFileJobOutput(JobOutputV2, AssetJobOutput):
"""UriFileJobOutput.
All required parameters must be populated in order to send to Azure.
:ivar mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:ivar uri: Output Asset URI. This will have a default value of
"azureml/{jobId}/{outputFolder}/{outputFileName}" if omitted.
:vartype uri: str
:ivar description: Description for the output.
:vartype description: str
:ivar job_output_type: Required. Specifies the type of job.Constant filled by server. Possible
values include: "UriFile".
:vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
"""
_validation = {
'job_output_type': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'job_output_type': {'key': 'jobOutputType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:keyword uri: Output Asset URI. This will have a default value of
"azureml/{jobId}/{outputFolder}/{outputFileName}" if omitted.
:paramtype uri: str
:keyword description: Description for the output.
:paramtype description: str
"""
super(UriFileJobOutput, self).__init__(**kwargs)
self.mode = kwargs.get('mode', None)
self.uri = kwargs.get('uri', None)
self.job_output_type = 'UriFile' # type: str
self.description = kwargs.get('description', None)
self.job_output_type = 'UriFile' # type: str
class UriFolderJobInput(JobInput, AssetJobInput):
"""UriFolderJobInput.
All required parameters must be populated in order to send to Azure.
:ivar mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:ivar uri: Required. Input Asset URI.
:vartype uri: str
:ivar description: Description for the input.
:vartype description: str
:ivar job_input_type: Required. Specifies the type of job.Constant filled by server. Possible
values include: "UriFile", "UriFolder", "MLTable".
:vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
"""
_validation = {
'uri': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'job_input_type': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'job_input_type': {'key': 'jobInputType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
:keyword mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:keyword uri: Required. Input Asset URI.
:paramtype uri: str
:keyword description: Description for the input.
:paramtype description: str
"""
super(UriFolderJobInput, self).__init__(**kwargs)
self.mode = kwargs.get('mode', None)
self.uri = kwargs['uri']
self.job_input_type = 'UriFolder' # type: str
self.description = kwargs.get('description', None)
self.job_input_type = 'UriFolder' # type: str
| 39.48853 | 146 | 0.650652 |
a3256f1d5ce64484739511b64bf4572f8dcbb09c | 407 | py | Python | utils.py | AbinavRavi/Federated-learning-MI | 06294e5de94bf5b8826dedb469a3430fdae76e37 | [
"MIT"
] | 3 | 2021-04-04T19:32:29.000Z | 2022-02-10T05:25:27.000Z | utils.py | AbinavRavi/Federated-learning-MI | 06294e5de94bf5b8826dedb469a3430fdae76e37 | [
"MIT"
] | null | null | null | utils.py | AbinavRavi/Federated-learning-MI | 06294e5de94bf5b8826dedb469a3430fdae76e37 | [
"MIT"
] | null | null | null | import nibabel as nib
import numpy as np
from glob import glob
def to_slice(image_path,seg_path):
image = nib.load(image_path).get_fdata()
seg = nib.load(seg_path).get_fdata()
image_list = []
seg_list = []
for i in range(image.shape[2]):
if(np.nonzero(image[i])!= 0):
image_list.append(image[i])
seg_list.append(seg[i])
return image_list,seg_list
| 22.611111 | 44 | 0.638821 |
a3259ed1f24efeaecf755551060f140ed167c93c | 576 | py | Python | tests/test_ebook.py | plysytsya/doublebook | 09dcd5399288c9544df928136a9e2f2e54639cbd | [
"MIT"
] | null | null | null | tests/test_ebook.py | plysytsya/doublebook | 09dcd5399288c9544df928136a9e2f2e54639cbd | [
"MIT"
] | null | null | null | tests/test_ebook.py | plysytsya/doublebook | 09dcd5399288c9544df928136a9e2f2e54639cbd | [
"MIT"
] | null | null | null | import os
import unittest
from doublebook.ebook import Ebook
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class EbookTest(unittest.TestCase):
def setUp(self):
path_to_text = os.path.join(THIS_DIR, "test_data", "zen_en.txt")
self.ebook = Ebook(path_to_text)
def test_read(self):
self.ebook.read()
self.assertIsInstance(self.ebook.content, str)
def test_tokenize(self):
self.ebook.tokenize()
self.assertIsInstance(self.ebook.sentences, list)
if __name__ == '__main__':
unittest.main(verbosity=3)
| 22.153846 | 72 | 0.689236 |
a32929c2bf6ce5c743f0108e1a7c3d364e872fd0 | 299 | py | Python | server/entities/log_group.py | thulio/watchlogs | 17469f77851ce0cab916c472f9f508790b6157bf | [
"MIT"
] | 1 | 2019-12-30T16:32:47.000Z | 2019-12-30T16:32:47.000Z | server/entities/log_group.py | thulio/watchlogs | 17469f77851ce0cab916c472f9f508790b6157bf | [
"MIT"
] | null | null | null | server/entities/log_group.py | thulio/watchlogs | 17469f77851ce0cab916c472f9f508790b6157bf | [
"MIT"
] | null | null | null | class LogGroup(object):
def __init__(self, name, streams=None):
self.name = name
self.streams = streams
@classmethod
def from_dict(cls, group_dict):
return LogGroup(group_dict['logGroupName'])
def __eq__(self, other):
return self.name == other.name
| 24.916667 | 51 | 0.64214 |
a32a2a3e9e3b2d5447dc0047b1a00f9577f9eedc | 13,924 | py | Python | build_github_actions.py | Geequlim/ECMAScript | 8817a9f63b28d07bde65334e8663d324da06282b | [
"MIT"
] | 155 | 2019-02-04T20:24:35.000Z | 2020-07-10T00:14:37.000Z | build_github_actions.py | Geequlim/ECMAScript | 8817a9f63b28d07bde65334e8663d324da06282b | [
"MIT"
] | 25 | 2019-02-15T05:36:02.000Z | 2020-07-10T08:06:00.000Z | build_github_actions.py | Geequlim/ECMAScript | 8817a9f63b28d07bde65334e8663d324da06282b | [
"MIT"
] | 13 | 2019-04-09T09:32:55.000Z | 2020-07-07T20:56:46.000Z | #!/usr/bin/env python
"""
run this every time you upgrade the godot-base version to generate new matching github workflows
You must be in this directory, and in the modules subfolder of godot (just as if you would install this project into godot)
usage:
python build_github_actions.py --godot-version "3.4.4-stable" --godot-github-folder ../../.github --ECMAS-github-folder .github
"""
import argparse
import yaml
import os
import subprocess
from dataclasses import dataclass, field
from typing import Dict, List, Any
import copy
# https://stackoverflow.com/a/33300001 + some changes
def str_presenter(dumper, data):
if len(data.splitlines()) > 1 or "\n" in data: # check for multiline string
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|")
return dumper.represent_scalar("tag:yaml.org,2002:str", data)
yaml.add_representer(str, str_presenter)
# to use with safe_dump:
yaml.representer.SafeRepresenter.add_representer(str, str_presenter)
# END https://stackoverflow.com/a/33300001
@dataclass
class BuildOpts:
SCONSFLAGS: str
GODOT_BASE_BRANCH: str
ENV: Dict[str, str] = field(default_factory=dict)
def add_to_flags(self, toadd: str) -> None:
if not self.SCONSFLAGS.endswith(" "):
toadd = f" {toadd}"
self.SCONSFLAGS = f"{self.SCONSFLAGS} {toadd}"
def get_fixed_flags(self) -> str:
todel = ["warnings=all", "werror=yes"]
for x in todel:
self.SCONSFLAGS = self.SCONSFLAGS.replace(x, "")
return self.SCONSFLAGS
def parseargs():
parser = argparse.ArgumentParser()
parser.add_argument("--godot-version", required=True)
parser.add_argument("--godot-github-folder", required=True)
parser.add_argument("--ECMAS-github-folder", required=True)
return parser.parse_args()
def checkout_local_godot_install(tag: str):
cmd = ["git", "checkout", f"tags/{tag}"]
ret = subprocess.run(cmd, cwd="../../")
if ret.returncode != 0:
raise RuntimeError(f"godot not setup properly, could not checkout '{' '.join(cmd)}'")
def get_windows_mingw_checkout_steps() -> List[Dict[str, Any]]:
out = [
{
"name": "setup-msys2",
"uses": "msys2/setup-msys2@v2",
"with": {"msystem": "MINGW64", "update": True, "install": "mingw-w64-x86_64-gcc"},
},
{
"name": "update mingw2",
"run": "pacman -Syu --needed --noconfirm mingw-w64-x86_64-python3-pip mingw-w64-x86_64-gcc mingw-w64-i686-python3-pip mingw-w64-i686-gcc make",
},
{
"name": "update scons",
"run": "pip3 install scons",
},
]
return out
def get_ECMAScript_checkout_steps() -> List[Dict[str, Any]]:
out = [
{
"name": "Checkout Godot",
"uses": "actions/checkout@v2",
"with": {"repository": "godotengine/godot", "ref": "${{ env.GODOT_BASE_BRANCH }}"},
},
{
"name": "Checkout ECMAScript",
"uses": "actions/checkout@v2",
"with": {"path": "${{github.workspace}}/modules/ECMAScript/"},
},
]
return out
def get_rid_of_ubsan_asan_linux(matrix_step: Dict[str, Any]) -> Dict[str, Any]:
for get_rid_of in ["use_ubsan=yes", "use_asan=yes"]:
matrix_step["name"] = matrix_step["name"].replace(get_rid_of, "").replace(" , ", " ").replace(", )", ")")
matrix_step["sconsflags"] = matrix_step["sconsflags"].replace(get_rid_of, "").replace(", )", ")")
return matrix_step
def fix_all_workflows(
ECMAS_github_folder: str, workflows: Dict[str, BuildOpts], wf_actions_that_require_shell: List[str]
) -> List[str]:
wf_names: List[str] = []
for wf_base_fn, build_opts in workflows.items():
full_fn = os.path.join(ECMAS_github_folder, "workflows", wf_base_fn)
data = yaml.safe_load(open(full_fn))
wf_names.append(data["name"])
build_opts.add_to_flags(data["env"]["SCONSFLAGS"])
data["env"]["SCONSFLAGS"] = build_opts.get_fixed_flags()
data["env"]["GODOT_BASE_BRANCH"] = build_opts.GODOT_BASE_BRANCH
for k, v in build_opts.ENV.items():
data["env"][k] = v
if True in data.keys():
new_data = {"name": data["name"], "on": data[True]}
del data[True]
for k, v in data.items():
if k in ("name", "on"):
continue
new_data[k] = v
data = new_data
assert len(data["jobs"]) == 1
only_template_name = list(data["jobs"].keys())[0]
new_steps = []
if "windows" in wf_base_fn:
# quickjs can't build under msvc, must use mingw, install it here
new_steps += get_windows_mingw_checkout_steps()
data["jobs"][only_template_name]["defaults"] = {"run": {"shell": "msys2 {0}"}}
elif "linux" in wf_base_fn:
for matrix_step in data["jobs"][only_template_name]["strategy"]["matrix"]["include"]:
# quickjs fails under ubsan & asan, don't include those flags
if "name" in matrix_step and "Editor and sanitizers" in matrix_step["name"]:
matrix_step = get_rid_of_ubsan_asan_linux(matrix_step)
base_github_string = "./.github/"
for step in data["jobs"][only_template_name]["steps"]:
# replace godot checkout routine with this checkout routine
if "uses" in step and "checkout" in step["uses"]:
new_steps += get_ECMAScript_checkout_steps()
elif (
"uses" in step
and base_github_string in step["uses"]
and any(x in step["uses"] for x in wf_actions_that_require_shell)
):
step["uses"] = step["uses"].replace(base_github_string, "./modules/ECMAScript/.github/")
to_add = {"shell": "msys2 {0}" if "windows" in wf_base_fn else "sh"}
if "with" not in step:
step["with"] = to_add
else:
step["with"].update(to_add)
new_steps.append(step)
else:
new_steps.append(step)
data["jobs"][only_template_name]["steps"] = new_steps
with open(full_fn, "w") as fh:
yaml.dump(data, fh, sort_keys=False, allow_unicode=True)
return wf_names
def fix_all_actions(ECMAS_github_folder: str, actions: List[str]) -> List[str]:
"""
This can be simplified once:
https://github.com/actions/runner/pull/1767
is completed
"""
actions_that_require_shell_set = set()
for action_base_fn in actions:
full_action_fn = os.path.join(ECMAS_github_folder, action_base_fn)
data = yaml.safe_load(open(full_action_fn))
new_steps = []
for step in data["runs"]["steps"]:
if "shell" in step:
for shell in ["sh", "msys2 {0}"]:
cp_step = copy.deepcopy(step)
cp_step["shell"] = shell
cp_step["if"] = f"inputs.shell == '{shell}'"
new_steps.append(cp_step)
data["inputs"]["shell"] = {"description": "the shell to run this under", "default": "sh"}
actions_that_require_shell_set.add(action_base_fn)
else:
new_steps.append(step)
# new_steps.append(step)
# Uncomment this when github actions updated
# if "shell" in step:
# step["shell"] = "${{ inputs.shell }}"
# data["inputs"]["shell"] = {"description": "the shell to run this under", "default": "sh"}
# new_steps.append(step)
# We ca
data["runs"]["steps"] = new_steps
with open(full_action_fn, "w") as fh:
yaml.dump(data, fh, sort_keys=False, allow_unicode=True)
return list(sorted([x.split("/")[1] for x in actions_that_require_shell_set]))
def add_publish_workflow(out_fn: str, wf_name_list: List[str]):
# "on": {"tag": "", "workflow_run": {"workflows": wf_name_list, "types": ["completed"]}},
# run_id: ${{ github.event.workflow_run.id }},
# var matchArtifact = artifacts.data.artifacts.filter((artifact) => {
# return artifact.name == "pr"
# })[0];
script_text = (
"""var total_slept = 0;
var downloaded_files = [];
var seen_completed_wfs = [];
var expected_to_see = """
+ str(len(wf_name_list))
+ """;
while (total_slept < 3600000 && seen_completed_wfs.length < expected_to_see) {
var all_workflows = await github.rest.actions.listWorkflowRunsForRepo({
owner: context.repo.owner,
repo: context.repo.repo,
});
console.log("Expecting to download from " + expected_to_see + " workflows, currently at " + seen_completed_wfs.length + ". Have already downloaded " + downloaded_files.length + " files as " + downloaded_files);
for (const workflow of all_workflows.data.workflow_runs) {
if (workflow.head_sha == "${{ github.sha }}") {
console.log("found " + workflow.name + " " + workflow.status);
if (workflow.status == "completed") {
if (seen_completed_wfs.includes(workflow.name)) {continue;}
if (workflow.conclusion == "success") {
var artifacts = await github.rest.actions.listWorkflowRunArtifacts({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: workflow.id,
per_page: 100,
});
for (const artifact of artifacts.data.artifacts) {
var fn = '${{github.workspace}}/' + artifact.name + '.zip';
if (downloaded_files.includes(fn)) {continue;}
var download = await github.rest.actions.downloadArtifact({
owner: context.repo.owner,
repo: context.repo.repo,
artifact_id: artifact.id,
archive_format: 'zip',
});
var fs = require('fs');
fs.writeFileSync(fn, Buffer.from(download.data));
downloaded_files.push(fn);
}
seen_completed_wfs.push(workflow.name);
}
}
}
}
if (seen_completed_wfs.length < expected_to_see) {
console.log("sleeping " + 300000);
await new Promise(r => setTimeout(r, 300000));
total_slept = total_slept + 300000;
console.log("done sleeping " + 300000);
}
}
console.log(downloaded_files);"""
)
data = {
"name": "🚢 Publish release",
# You should tag as late as possible, don't want to sleep too long and get push job killed
"on": {
"push": {"tags": ["*"]},
},
"jobs": {
"collect-template": {
"runs-on": "ubuntu-latest",
"steps": [
# {"name": "show dir", "run": "sleep 900"},
{
"name": "download artifacts",
"uses": "actions/github-script@v6",
"if": "startsWith(github.ref, 'refs/tags')",
"with": {"script": script_text},
},
{"name": "show dir", "run": "ls -R"},
{
"name": "Upload binaries to release",
"uses": "svenstaro/upload-release-action@v2",
"if": "startsWith(github.ref, 'refs/tags')",
"with": {
"repo_token": "${{ secrets.GITHUB_TOKEN }}",
"file": "*.zip",
"tag": "${{ github.ref }}",
"overwrite": "true",
"file_glob": "true",
},
},
],
}
},
}
with open(out_fn, "w") as fh:
yaml.dump(data, fh, sort_keys=False, allow_unicode=True)
def main():
args = parseargs()
assert os.path.isdir(args.godot_github_folder)
assert os.path.isdir(args.ECMAS_github_folder)
checkout_local_godot_install(args.godot_version)
for x in ["actions", "workflows"]:
subprocess.call(["rm", "-rf", os.path.join(args.ECMAS_github_folder, x)])
subprocess.call(
["cp", "-r", os.path.join(args.godot_github_folder, x), os.path.join(args.ECMAS_github_folder, x)]
)
basic_flags = " "
actions = [
"actions/godot-build/action.yml",
"actions/godot-cache/action.yml",
"actions/godot-deps/action.yml",
"actions/upload-artifact/action.yml",
]
wf_actions_that_require_shell = fix_all_actions(args.ECMAS_github_folder, actions)
workflows = {
"android_builds.yml": BuildOpts(basic_flags, args.godot_version),
"ios_builds.yml": BuildOpts(basic_flags, args.godot_version),
"javascript_builds.yml": BuildOpts(basic_flags, args.godot_version),
"linux_builds.yml": BuildOpts(basic_flags, args.godot_version),
"macos_builds.yml": BuildOpts(basic_flags, args.godot_version),
"server_builds.yml": BuildOpts(basic_flags, args.godot_version),
"windows_builds.yml": BuildOpts(f"{basic_flags} use_mingw=yes", args.godot_version),
}
wf_names = fix_all_workflows(args.ECMAS_github_folder, workflows, wf_actions_that_require_shell)
subprocess.call(["rm", os.path.join(args.ECMAS_github_folder, "workflows", "static_checks.yml")])
out_publish_fn = os.path.join(args.ECMAS_github_folder, "workflows", "on_tag.yml")
add_publish_workflow(out_publish_fn, wf_names)
if __name__ == "__main__":
main()
| 40.242775 | 214 | 0.568946 |
a32ad9de709c3a24f830152b0d7a35e9a5113527 | 10,061 | py | Python | src/panoramic/cli/husky/service/blending/tel_planner.py | kubamahnert/panoramic-cli | 036f45a05d39f5762088ce23dbe367b938192f79 | [
"MIT"
] | 5 | 2020-11-13T17:26:59.000Z | 2021-03-19T15:11:26.000Z | src/panoramic/cli/husky/service/blending/tel_planner.py | kubamahnert/panoramic-cli | 036f45a05d39f5762088ce23dbe367b938192f79 | [
"MIT"
] | 5 | 2020-10-28T10:22:35.000Z | 2021-01-27T17:33:58.000Z | src/panoramic/cli/husky/service/blending/tel_planner.py | kubamahnert/panoramic-cli | 036f45a05d39f5762088ce23dbe367b938192f79 | [
"MIT"
] | 3 | 2021-01-26T07:58:03.000Z | 2021-03-11T13:28:34.000Z | from collections import defaultdict
from typing import Dict, Iterable, List, Optional, Set, Tuple, cast
from sqlalchemy import column
from panoramic.cli.husky.common.enum import EnumHelper
from panoramic.cli.husky.core.taxonomy.aggregations import AggregationDefinition
from panoramic.cli.husky.core.taxonomy.enums import AggregationType, TaxonTypeEnum
from panoramic.cli.husky.core.taxonomy.models import Taxon
from panoramic.cli.husky.core.taxonomy.override_mapping.types import (
OverrideMappingTelData,
)
from panoramic.cli.husky.core.tel.exceptions import TelExpressionException
from panoramic.cli.husky.core.tel.result import PostFormula, PreFormula, TaxonToTemplate
from panoramic.cli.husky.core.tel.sql_formula import SqlFormulaTemplate, SqlTemplate
from panoramic.cli.husky.core.tel.tel_dialect import TaxonTelDialect
from panoramic.cli.husky.service.context import HuskyQueryContext
from panoramic.cli.husky.service.filter_builder.filter_clauses import FilterClause
from panoramic.cli.husky.service.types.api_data_request_types import BlendingDataRequest
from panoramic.cli.husky.service.utils.exceptions import (
HuskyInvalidTelException,
InvalidRequest,
)
from panoramic.cli.husky.service.utils.taxon_slug_expression import (
TaxonExpressionStr,
TaxonMap,
)
class TelPlan:
data_source_formula_templates: Dict[str, List[SqlFormulaTemplate]]
comparison_data_source_formula_templates: Dict[str, List[SqlFormulaTemplate]]
dimension_formulas: List[PreFormula]
comparison_dimension_formulas: List[PreFormula]
metric_pre: List[PreFormula]
metric_post: List[Tuple[PostFormula, Taxon]]
"""
List of formulas SQL formulas and taxons for the last phase
"""
data_source_filter_templates: Dict[str, TaxonToTemplate]
comparison_join_columns: List[str]
"""
List of columns to join data and comparison dataframes
"""
comparison_raw_taxon_slugs: List[TaxonExpressionStr]
"""
List of raw taxon slugs to use for comparison
"""
override_mappings: OverrideMappingTelData
"""
List of override mappings referenced in the result
"""
comparison_override_mappings: OverrideMappingTelData
"""
List of override mappings referenced in the result of comparison query
"""
def __init__(self):
self.data_source_formula_templates = defaultdict(list)
self.comparison_data_source_formula_templates = defaultdict(list)
self.data_source_filter_templates = defaultdict(dict)
self.dimension_formulas = []
self.comparison_dimension_formulas = []
self.metric_pre = []
self.metric_post = []
self.comparison_join_columns = []
self.comparison_raw_taxon_slugs = []
self.override_mappings = set()
self.comparison_override_mappings = set()
class TelPlanner:
@classmethod
def plan(
cls,
ctx: HuskyQueryContext,
request: BlendingDataRequest,
projection_taxons: TaxonMap,
all_taxons: TaxonMap,
taxon_to_ds: Dict[str, Set[str]],
) -> TelPlan:
"""
Prepares taxons plan
"""
plan = TelPlan()
result_cache = dict()
all_data_sources = {subreq.properties.data_source for subreq in request.data_subrequests}
for taxon in projection_taxons.values():
if taxon.calculation:
original_slug = taxon.comparison_taxon_slug_origin or taxon.slug
taxon_data_sources = taxon_to_ds[original_slug]
result = cls._parse_taxon_expr(ctx, taxon, taxon.slug, taxon_data_sources, all_taxons)
result_cache[taxon.slug] = result
# Create dict for dim templates, key is data source
for ds_formula in result.data_source_formula_templates:
plan.data_source_formula_templates[ds_formula.data_source].append(ds_formula)
plan.dimension_formulas.extend(result.dimension_formulas)
plan.metric_pre.extend(result.pre_formulas)
plan.metric_post.append((result.post_formula, taxon))
plan.override_mappings.update(result.override_mappings)
else:
sql_slug = column(taxon.slug_safe_sql_identifier)
if taxon.is_dimension:
aggregation = taxon.aggregation or AggregationDefinition(type=AggregationType.group_by)
else:
aggregation = taxon.aggregation or AggregationDefinition(type=AggregationType.sum)
plan.metric_pre.append(PreFormula(sql_slug, taxon.slug, aggregation))
plan.metric_post.append((PostFormula(sql_slug), taxon))
if request.comparison and request.comparison.taxons:
for taxon in [all_taxons[slug] for slug in request.comparison.taxons]:
if taxon.calculation:
taxon_data_sources = all_data_sources
result = cls._parse_taxon_expr(
ctx, taxon, 'comp_join_col_' + taxon.slug, taxon_data_sources, all_taxons
)
# Create dict for dim templates, key is data source
for ds_formula in result.data_source_formula_templates:
plan.data_source_formula_templates[ds_formula.data_source].append(ds_formula)
if result.override_mappings:
plan.override_mappings.update(result.override_mappings)
plan.comparison_override_mappings.update(result.override_mappings)
plan.dimension_formulas.extend(result.dimension_formulas)
for ds_formula in result.data_source_formula_templates:
plan.comparison_data_source_formula_templates[ds_formula.data_source].append(ds_formula)
plan.comparison_dimension_formulas.extend(result.dimension_formulas)
for dim_formula in result.dimension_formulas:
plan.comparison_join_columns.append(dim_formula.label)
else:
# Raw comparison join taxon taxon.. add it to join and also to select from dataframes
plan.comparison_join_columns.append(taxon.slug_safe_sql_identifier)
plan.comparison_raw_taxon_slugs.append(taxon.slug_safe_sql_identifier)
cls._populate_filter_templates_to_plan(ctx, plan, request, all_taxons)
return plan
@classmethod
def _populate_filter_templates_to_plan(
cls, ctx: HuskyQueryContext, plan: TelPlan, request: BlendingDataRequest, all_taxons: TaxonMap
):
"""
Prepare sql templates for filters, keyed by data source and then by taxon slug.
In general, TelPlan filtering works like this:
1. create template for each subrequest filter taxon (raw and computed)
2. pass that template as dict to the single husky
3. In select builder, render these templates to create records into taxon_model_info_map,
especially the sql accessor property.
:param ctx:
"""
for subrequest in request.data_subrequests:
data_source = subrequest.properties.data_source
filter_templates = cls.get_preaggregation_filter_templates(
ctx,
[subrequest.preaggregation_filters, subrequest.scope.preaggregation_filters],
all_taxons,
data_source,
)
plan.data_source_filter_templates[data_source] = filter_templates
@classmethod
def get_preaggregation_filter_templates(
cls,
ctx: HuskyQueryContext,
filter_clauses: List[Optional[FilterClause]],
all_taxons: TaxonMap,
data_source: str,
) -> TaxonToTemplate:
"""
Creates sql templates for each taxon. Returns them keys by taxon slug.
"""
taxons_to_template: TaxonToTemplate = dict()
for filter_clause in filter_clauses:
if filter_clause:
taxon_slugs = filter_clause.get_taxon_slugs()
for slug in taxon_slugs:
taxon = all_taxons[cast(TaxonExpressionStr, slug)]
if not taxon.is_dimension:
exc = InvalidRequest(
'request.preaggregation_filters',
f'Metric taxons are not allowed in preaggregation filters. Remove filter for taxon {taxon.slug}',
)
raise exc
if taxon.calculation:
result = cls._parse_taxon_expr(
ctx, taxon, taxon.slug, [data_source], all_taxons, subrequest_only=True
)
taxons_to_template[taxon.slug_expr] = result.data_source_formula_templates[0]
else:
taxons_to_template[taxon.slug_expr] = SqlFormulaTemplate(
SqlTemplate(f'${{{taxon.slug}}}'), taxon.slug_expr, data_source, {taxon.slug_expr}
)
return taxons_to_template
@staticmethod
def _parse_taxon_expr(
ctx: HuskyQueryContext,
taxon: Taxon,
tel_prefix: str,
data_sources: Iterable[str],
all_taxons: TaxonMap,
subrequest_only=False,
):
taxon_type = EnumHelper.from_value(TaxonTypeEnum, taxon.taxon_type)
try:
return TaxonTelDialect().render(
expr=cast(str, taxon.calculation),
ctx=ctx,
taxon_map=all_taxons,
taxon_slug=tel_prefix,
comparison=taxon.is_comparison_taxon,
data_sources=data_sources,
taxon_type=taxon_type,
aggregation=taxon.aggregation,
subrequest_only=subrequest_only,
)
except TelExpressionException as error:
raise HuskyInvalidTelException(error, taxon.slug)
| 43.743478 | 125 | 0.657191 |
a32bb9ecf389628aa17fb222486d5eb8bc144dcb | 13,836 | py | Python | mrcnn/callbacks.py | dtitenko-dev/Mask_RCNN | 5167db4174d96e9f2accc0a9f4866fb3a7bf5993 | [
"MIT"
] | null | null | null | mrcnn/callbacks.py | dtitenko-dev/Mask_RCNN | 5167db4174d96e9f2accc0a9f4866fb3a7bf5993 | [
"MIT"
] | null | null | null | mrcnn/callbacks.py | dtitenko-dev/Mask_RCNN | 5167db4174d96e9f2accc0a9f4866fb3a7bf5993 | [
"MIT"
] | null | null | null | import os
import re
import six
import h5py
import json
import logging
import tensorflow.keras as keras
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.saving import hdf5_format
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.saving import saving_utils
from tensorflow.python.keras.utils.io_utils import path_to_string
from tensorflow.python.distribute import distributed_file_utils
from tensorflow.python.lib.io import file_io
from tensorflow.python.training import checkpoint_management
from tensorflow.python.util import serialization
def save_optimizer_weights(model, filepath, overwrite=True, **kwargs):
if not isinstance(filepath, h5py.File):
# If file exists and should not be overwritten.
if not overwrite and os.path.isfile(filepath):
proceed = hdf5_format.ask_to_proceed_with_overwrite(filepath)
if not proceed:
return
f = h5py.File(filepath, mode='w')
opened_new_file = True
else:
f = filepath
opened_new_file = False
try:
model_metadata = saving_utils.model_metadata(
model, include_optimizer=True, require_config=False)
for k, v in model_metadata.items():
if isinstance(v, (dict, list, tuple)):
f.attrs[k] = json.dumps(
v, default=serialization.get_json_type).encode('utf8')
else:
f.attrs[k] = v
if not isinstance(model.optimizer, optimizers.TFOptimizer):
hdf5_format.save_optimizer_weights_to_hdf5_group(f, model.optimizer)
f.flush()
finally:
if opened_new_file:
f.close()
def load_optimizer_weights(model, filepath):
"""Loads optimizer weights to compiled model from hdf5 file.
Arguments:
model: Compiled model
"""
opened_new_file = not isinstance(filepath, h5py.File)
if opened_new_file:
f = h5py.File(filepath, mode='r')
else:
f = filepath
try:
if model.optimizer and 'optimizer_weights' in f:
try:
model.optimizer._create_all_weights(model.trainable_variables)
except (NotImplementedError, AttributeError):
logging.warning(
'Error when creating the weights of optimizer {}, making it '
'impossible to restore the saved optimizer state. As a result, '
'your model is starting with a freshly initialized optimizer.')
optimizer_weight_values = hdf5_format.load_optimizer_weights_from_hdf5_group(f)
try:
model.optimizer.set_weights(optimizer_weight_values)
except ValueError:
logging.warning('Error in loading the saved optimizer '
'state. As a result, your model is '
'starting with a freshly initialized '
'optimizer.')
finally:
if opened_new_file:
f.close()
return model
class OptimizerCheckpoint(keras.callbacks.Callback):
def __init__(self,
filepath,
verbose=0,
save_freq='epoch',
**kwargs):
super(OptimizerCheckpoint, self).__init__()
self.verbose = verbose
self.filepath = path_to_string(filepath)
self.save_freq = save_freq
self.epochs_since_last_save = 0
self._batches_seen_since_last_saving = 0
self._last_batch_seen = 0
self._current_epoch = 0
if 'load_weights_on_restart' in kwargs:
self.load_weights_on_restart = kwargs['load_weights_on_restart']
logging.warning('`load_weights_on_restart` argument is deprecated. '
'Please use `model.load_weights()` for loading weights '
'before the start of `model.fit()`.')
else:
self.load_weights_on_restart = False
if 'period' in kwargs:
self.period = kwargs['period']
logging.warning('`period` argument is deprecated. Please use `save_freq` '
'to specify the frequency in number of batches seen.')
else:
self.period = 1
if self.save_freq != 'epoch' and not isinstance(self.save_freq, int):
raise ValueError('Unrecognized save_freq: {}'.format(self.save_freq))
# Only the chief worker writes model checkpoints, but all workers
# restore checkpoint at on_train_begin().
self._chief_worker_only = False
def on_train_begin(self, logs=None):
if self.load_weights_on_restart:
filepath_to_load = (
self._get_most_recently_modified_file_matching_pattern(self.filepath))
if (filepath_to_load is not None and
self._checkpoint_exists(filepath_to_load)):
try:
# `filepath` may contain placeholders such as `{epoch:02d}`, and
# thus it attempts to load the most recently modified file with file
# name matching the pattern.
load_optimizer_weights(self.model, filepath=filepath_to_load)
except (IOError, ValueError) as e:
raise ValueError('Error loading file from {}. Reason: {}'.format(
filepath_to_load, e))
def on_train_batch_end(self, batch, logs=None):
if self._should_save_on_batch(batch):
self._save_optimizer_weights(epoch=self._current_epoch, logs=logs)
def on_epoch_begin(self, epoch, logs=None):
self._current_epoch = epoch
def on_epoch_end(self, epoch, logs=None):
self.epochs_since_last_save += 1
if self.save_freq == 'epoch':
self._save_optimizer_weights(epoch, logs)
def _should_save_on_batch(self, batch):
"""Handles batch-level saving logic, supports steps_per_execution."""
if self.save_freq == 'epoch':
return False
if batch <= self._last_batch_seen: # New epoch.
add_batches = batch + 1 # batches are zero-indexed.
else:
add_batches = batch - self._last_batch_seen
self._batches_seen_since_last_saving += add_batches
self._last_batch_seen = batch
if self._batches_seen_since_last_saving >= self.save_freq:
self._batches_seen_since_last_saving = 0
return True
return False
def _save_optimizer_weights(self, epoch, logs=None):
"""Saves the optimizer weights.
Arguments:
epoch: the epoch this iteration is in.
logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.
"""
logs = logs or {}
if isinstance(self.save_freq, int) or self.epochs_since_last_save >= self.period:
# Block only when saving interval is reached.
logs = tf_utils.to_numpy_or_python_type(logs)
self.epochs_since_last_save = 0
filepath = self._get_file_path(epoch, logs)
try:
if self.verbose > 0:
print('\nEpoch %05d: saving model to %s' % (epoch + 1, filepath))
save_optimizer_weights(self.model, filepath, overwrite=True)
except IOError as e:
# `e.errno` appears to be `None` so checking the content of `e.args[0]`.
if 'is a directory' in six.ensure_str(e.args[0]).lower():
raise IOError('Please specify a non-directory filepath for '
'ModelCheckpoint. Filepath used is an existing '
'directory: {}'.format(filepath))
def _get_file_path(self, epoch, logs):
"""Returns the file path for checkpoint."""
# pylint: disable=protected-access
try:
# `filepath` may contain placeholders such as `{epoch:02d}` and
# `{mape:.2f}`. A mismatch between logged metrics and the path's
# placeholders can cause formatting to fail.
file_path = self.filepath.format(epoch=epoch + 1, **logs)
except KeyError as e:
raise KeyError('Failed to format this callback filepath: "{}". '
'Reason: {}'.format(self.filepath, e))
self._write_filepath = distributed_file_utils.write_filepath(
file_path, self.model.distribute_strategy)
return self._write_filepath
def _maybe_remove_file(self):
# Remove the checkpoint directory in multi-worker training where this worker
# should not checkpoint. It is a dummy directory previously saved for sync
# distributed training.
distributed_file_utils.remove_temp_dir_with_filepath(
self._write_filepath, self.model.distribute_strategy)
def _checkpoint_exists(self, filepath):
"""Returns whether the checkpoint `filepath` refers to exists."""
if filepath.endswith('.h5'):
return file_io.file_exists(filepath)
tf_saved_optimizer_exists = file_io.file_exists(filepath + '.h5')
return tf_saved_optimizer_exists
def _get_most_recently_modified_file_matching_pattern(self, pattern):
"""Returns the most recently modified filepath matching pattern.
Pattern may contain python formatting placeholder. If
`tf.train.latest_checkpoint()` does not return None, use that; otherwise,
check for most recently modified one that matches the pattern.
In the rare case where there are more than one pattern-matching file having
the same modified time that is most recent among all, return the filepath
that is largest (by `>` operator, lexicographically using the numeric
equivalents). This provides a tie-breaker when multiple files are most
recent. Note that a larger `filepath` can sometimes indicate a later time of
modification (for instance, when epoch/batch is used as formatting option),
but not necessarily (when accuracy or loss is used). The tie-breaker is
put in the logic as best effort to return the most recent, and to avoid
undeterministic result.
Modified time of a file is obtained with `os.path.getmtime()`.
This utility function is best demonstrated via an example:
```python
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
]
for file_path in file_paths:
# Write something to each of the files
self.assertEqual(
_get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
```
Arguments:
pattern: The file pattern that may optionally contain python placeholder
such as `{epoch:02d}`.
Returns:
The most recently modified file's full filepath matching `pattern`. If
`pattern` does not contain any placeholder, this returns the filepath
that
exactly matches `pattern`. Returns `None` if no match is found.
"""
dir_name = os.path.dirname(pattern)
base_name = os.path.basename(pattern)
base_name_regex = '^' + re.sub(r'{.*}', r'.*', base_name) + '$'
# If tf.train.latest_checkpoint tells us there exists a latest checkpoint,
# use that as it is more robust than `os.path.getmtime()`.
latest_tf_checkpoint = checkpoint_management.latest_checkpoint(dir_name)
if latest_tf_checkpoint is not None and re.match(
base_name_regex, os.path.basename(latest_tf_checkpoint)):
return latest_tf_checkpoint
latest_mod_time = 0
file_path_with_latest_mod_time = None
n_file_with_latest_mod_time = 0
file_path_with_largest_file_name = None
if file_io.file_exists(dir_name):
for file_name in os.listdir(dir_name):
# Only consider if `file_name` matches the pattern.
if re.match(base_name_regex, file_name):
file_path = os.path.join(dir_name, file_name)
mod_time = os.path.getmtime(file_path)
if (file_path_with_largest_file_name is None or
file_path > file_path_with_largest_file_name):
file_path_with_largest_file_name = file_path
if mod_time > latest_mod_time:
latest_mod_time = mod_time
file_path_with_latest_mod_time = file_path
# In the case a file with later modified time is found, reset
# the counter for the number of files with latest modified time.
n_file_with_latest_mod_time = 1
elif mod_time == latest_mod_time:
# In the case a file has modified time tied with the most recent,
# increment the counter for the number of files with latest modified
# time by 1.
n_file_with_latest_mod_time += 1
if n_file_with_latest_mod_time == 1:
# Return the sole file that has most recent modified time.
return file_path_with_latest_mod_time
else:
# If there are more than one file having latest modified time, return
# the file path with the largest file name.
return file_path_with_largest_file_name
| 44.632258 | 92 | 0.626337 |
a32cb578d2151333cae6b68de0344e2c78b2c29d | 875 | py | Python | wapps/gallery/migrations/0002_manual_album_image_deletion.py | apihackers/wapps | e8158747aa3d77246d41142580faf9a5f2b0d968 | [
"MIT"
] | 7 | 2018-01-17T20:26:59.000Z | 2022-03-23T08:12:00.000Z | wapps/gallery/migrations/0002_manual_album_image_deletion.py | apihackers/wapps | e8158747aa3d77246d41142580faf9a5f2b0d968 | [
"MIT"
] | 511 | 2017-10-21T17:59:50.000Z | 2022-03-28T18:49:21.000Z | wapps/gallery/migrations/0002_manual_album_image_deletion.py | apihackers/wapps | e8158747aa3d77246d41142580faf9a5f2b0d968 | [
"MIT"
] | 2 | 2018-05-02T08:27:42.000Z | 2020-08-17T18:42:49.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-26 06:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from wapps.utils import get_image_model
def drop_empty_gallery_images(apps, schema_editor):
AlbumImage = apps.get_model('gallery', 'ManualAlbumImage') # noqa
for album_image in AlbumImage.objects.filter(image__isnull=True):
album_image.delete()
class Migration(migrations.Migration):
dependencies = [
('gallery', '0001_initial'),
]
operations = [
migrations.RunPython(drop_empty_gallery_images),
migrations.AlterField(
model_name='manualalbumimage',
name='image',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=get_image_model()),
),
]
| 28.225806 | 121 | 0.693714 |
a32cce774b4abbb45c4fa0ea764a45099fab3182 | 6,888 | py | Python | lib/examples/TOA_balance.py | CliMT/climt-legacy | adbd4fe77426c90deb8d2c046a2f3dc3b72df89e | [
"BSD-3-Clause"
] | null | null | null | lib/examples/TOA_balance.py | CliMT/climt-legacy | adbd4fe77426c90deb8d2c046a2f3dc3b72df89e | [
"BSD-3-Clause"
] | null | null | null | lib/examples/TOA_balance.py | CliMT/climt-legacy | adbd4fe77426c90deb8d2c046a2f3dc3b72df89e | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import climt
from pylab import *
# Replicates the behavior of the online radiation calculator
# see maths.ucd.ie/~rca
scheme = 'ccm3'
Insolation = 337. #* .75
Imbalance = 30.
Albedo = 30. /100.
CO2 = 350.
CH4 = 1.7 + 1.e-9
N2O = 0. + 1.e-9
TropoHeight = 16.
LapseRate = -6.5
LapseRate_RH = -6.5
BLRelHum = 80. / 100.
LTRelHum = 80. / 100.
UTRelHum = 80. / 100.
STRelHum = 1.e-16 / 100.
TropoHeight_RH = 16.
RH_control = 0
T0_RH = 20. + 273.15
Drop_size = 10.
Cloud_frac_hi = 0.
Cloud_frac_lo = 0.
Cloud_water_hi = 0.
Cloud_water_lo = 0.
zen = 60.
if Cloud_water_lo == 0.: Cloud_frac_lo = 0.
if Cloud_water_hi == 0.: Cloud_frac_hi = 0.
# instantiate radiation objects, get number of levels
r=climt.radiation(scheme=scheme)
nlev=r.nlev
# define some fixed profiles
SurfPres = 1000.
Pressure = ( arange(nlev)+ 0.5 ) * SurfPres/nlev
cldf = zeros( nlev, 'd') # Cloud frac
clwp = zeros( nlev, 'd') # Cloud liquid water path
cloud_lev_hi = int(nlev*0.2) # put high cloud roughly at 200 mb
cloud_lev_lo = int(nlev*0.8) # put low cloud roughly at 800 mb
cldf[cloud_lev_lo] = Cloud_frac_lo
cldf[cloud_lev_hi] = Cloud_frac_hi
clwp[cloud_lev_lo] = Cloud_water_lo
clwp[cloud_lev_hi] = Cloud_water_hi
# dictionary for input into rad call
input={}
input['ps'] = SurfPres
input['lev'] = Pressure
input['cldf'] = cldf
input['clwp'] = clwp
input['solin'] = Insolation
input['r_liq'] = Drop_size + Pressure*0.
input['r_ice'] = Drop_size + Pressure*0.
input['aldir'] = Albedo
input['aldif'] = Albedo
input['asdir'] = Albedo
input['asdif'] = Albedo
input['co2'] = CO2
input['ch4'] = CH4
input['n2o'] = N2O
input['zen'] = zen
#input['o3'] = Pressure*0. + 1.e-16
# functions
def profiles(SurfTemp):
"""
Compute temp and humidity profiles
Stratosphere is isothermal
"""
# parameters
Rd = r['Rd']
g = r['g']
# assume near-surface temp is 1 K less than surface
T0 = SurfTemp - 1.
# scale height (assume dry atmos) see Holton pg. 21
Tmean = (T0**2*TropoHeight + LapseRate*T0*TropoHeight**2
+ LapseRate**2*TropoHeight**3)/(T0*TropoHeight +
LapseRate*TropoHeight**2/2)
Tmean_RH = (T0_RH**2*TropoHeight_RH + LapseRate_RH*T0_RH*TropoHeight_RH**2
+ LapseRate_RH**2*TropoHeight_RH**3)/(T0_RH*TropoHeight_RH +
LapseRate_RH*TropoHeight_RH**2/2)
H = Rd*Tmean/g * 1.e-3 # [km]
H_RH = Rd*Tmean_RH/g * 1.e-3 # [km]
# now compute profiles
z = -H*log(Pressure/SurfPres)
z900 = -H*log(900./SurfPres)
z700 = -H*log(700./SurfPres)
T = T0 + LapseRate*z
Tstrat = T0 + LapseRate*TropoHeight
q = zeros(nlev, 'd')
for k in range(nlev-1,-1,-1): # compute from bottom up
if z[k] <= z900:
q[k] = climt.thermodyn.qs(T[k],Pressure[k])*BLRelHum
elif z[k] > z900 and z[k] <= z700:
q[k] = climt.thermodyn.qs(T[k],Pressure[k])*LTRelHum
elif z[k] > z700 and z[k] <= TropoHeight:
q[k] = climt.thermodyn.qs(T[k],Pressure[k])*UTRelHum
else:
T[k] = Tstrat
q[k] = 1.e-9 #climt.thermodyn.qsflatau(T[k],Pressure[k],2)*STRelHum
if T[k] < 273.-80.: q[k] = 1.e-9
# correct humidity if necessary
if RH_control:
z_RH = -H_RH*log(Pressure/SurfPres)
T_RH = T0_RH + LapseRate_RH*z_RH
z900 = -H_RH*log(900./SurfPres)
z700 = -H_RH*log(700./SurfPres)
q = zeros(nlev, 'd')
for k in range(nlev-1,-1,-1): # compute from bottom up
if z_RH[k] <= z900:
q[k] = climt.thermodyn.qs(T_RH[k],Pressure[k])*BLRelHum
elif z_RH[k] > z900 and z_RH[k] <= z700:
q[k] = climt.thermodyn.qs(T_RH[k],Pressure[k])*LTRelHum
elif z_RH[k] > z700 and z_RH[k] <= TropoHeight_RH:
q[k] = climt.thermodyn.qs(T_RH[k],Pressure[k])*UTRelHum
else:
T_RH[k] = T_RH[k+1]
q[k] = climt.thermodyn.qsflatau(T_RH[k],Pressure[k],2)*STRelHum
return T, q, z
def TOAFlux(SurfTemp):
(T,q,z) = profiles(SurfTemp)
input['T'] = T
input['Ts'] = SurfTemp
input['q'] = q
r(**input)
return r.swflx[0].item() + r.lwflx[0].item() + Imbalance
# Now compute equil surf temp assuming low albedo
try:
Teq = climt.mathutil.ridder_root(TOAFlux, (173.15,353.15), accuracy=0.1)
except climt.mathutil.BracketingException, err:
if str(err) == 'initial interval does not bracket a root: root probably to the right of interval':
print '<P><font color="red"><b>Equilibrium surface temperature exceeds 80 <sup>o</sup>C.</font>'
if str(err) == 'initial interval does not bracket a root: root probably to the left of interval':
print '<P><font color="blue"><b>Equilibrium surface temperature less than -100 <sup>o</sup>C.</font>'
sys.exit(1)
T,q,z = profiles(Teq)
input['T'] = T
input['Ts'] = Teq
input['q'] = q
r(**input)
# print results
print
print 'Equilibrium near-surface air temperature is %4.1f degC (%4.1f K)' % ((Teq-273.15-1.),Teq-1.)
print
print r['SwToaCf'],r['LwToaCf'],Teq
sys.exit()
print 'Profiles'
print("lev p z T q LW flux LW heating SW flux SW heating cld frac cld water\n")
for i in range(r.nlev):
print("%3i %6.1f %7.2f %6.1f %6.2f %10.2f %6.2f %10.2f %6.2f %6.1f %6.1f" % \
(i, Pressure[i], z[i], T[i], q[i], r['lwflx'][i], r['lwhr'][i], r['swflx'][i], r['swhr'][i] , cldf[i], clwp[i]))
# make plot
def setlims(x,y):
dx = (max(x)-min(x))*.05
if dx == 0.: dx=1.
xmin = min(x) - dx
xmax = max(x) + dx
# set(gca(), 'xlim', [xmin,xmax],'ylim', [0,z[0]])
xlim([xmin,xmax])
ylim([0,z[0]])
subplot(231)
T = T-273.15
plot(T,z, 'b-o',linewidth=1,ms=3)
#title(r'$\rm{Temperature} (^__\rm{o}\rm{C})$')
title('Temperature (C)',fontsize=10)
ylabel('height (km)',fontsize=10)
setlims(T,z)
subplot(232)
plot(q,z, 'b-o',linewidth=1,ms=3)
title('Specific humidity (g/kg)',fontsize=10)
setlims(q,z)
subplot(233)
plot(clwp,z, 'b-o',linewidth=1,ms=3)
title('Cloud water path (g/m2)',fontsize=10)
setlims(clwp,z)
ax=subplot(234)
ax.xaxis.set_major_locator(MultipleLocator(50))
plot(r['lwflx'],z, 'b-o',linewidth=1,ms=3)
title('Longwave flux (W/m2)',fontsize=10)
ylabel('height (km)',fontsize=10)
setlims(r['lwflx'],z)
ax=subplot(235)
ax.xaxis.set_major_locator(MultipleLocator(50))
plot(r['swflx'],z, 'b-o',linewidth=1,ms=3)
title('Shortwave flux (W/m2)',fontsize=10)
setlims(r['swflx'],z)
subplot(236)
plot(r['lwhr'],z,'b-o', r['swhr'],z,'r-o', r['swhr']+r['lwhr'],z,'k-o',linewidth=1,ms=3)
title('Heating rates (K/day)',fontsize=10)
legend(('LW', 'SW', 'Total'), 'upper left')
x=r['lwhr'].tolist()
x.extend(r['swhr'].tolist())
x.extend((r['lwhr']+r['swhr']).tolist())
setlims(array(x),z)
#savefig(os.path.join(ImageDir,TimeStamp),dpi=100)
show()
| 30.613333 | 123 | 0.612079 |
a32d307b1fe682f59762c7e5a70a9d45122fc794 | 117 | py | Python | tools/inject_pydoc/idp.py | fengjixuchui/src | 0c5a6cd8057717f73b1373f8d85eb9b19e1934e1 | [
"BSD-3-Clause"
] | 1,160 | 2015-05-02T15:13:20.000Z | 2022-03-31T20:04:28.000Z | tools/inject_pydoc/idp.py | fengjixuchui/src | 0c5a6cd8057717f73b1373f8d85eb9b19e1934e1 | [
"BSD-3-Clause"
] | 19 | 2015-04-20T13:47:00.000Z | 2021-07-07T13:00:42.000Z | tools/inject_pydoc/idp.py | fengjixuchui/src | 0c5a6cd8057717f73b1373f8d85eb9b19e1934e1 | [
"BSD-3-Clause"
] | 257 | 2015-04-01T21:42:33.000Z | 2022-03-10T11:57:51.000Z | {
"ev_get_bg_color" : {
"repl_text" : ("(self, color, ea) -> int", "(self, ea) -> int or None"),
}
}
| 19.5 | 80 | 0.452991 |
a32d410f0fad03a9c0fdccb975ef58812fe45a3f | 4,576 | py | Python | pca.py | vgp314/Udacity-Arvato-Identify-Customer-Segments | 6be1d4f1eeac391c17c70fdf584bdc4813f80fd8 | [
"ADSL"
] | 1 | 2020-05-21T23:56:57.000Z | 2020-05-21T23:56:57.000Z | pca.py | vgp314/Udacity-Arvato-Identify-Customer-Segments | 6be1d4f1eeac391c17c70fdf584bdc4813f80fd8 | [
"ADSL"
] | null | null | null | pca.py | vgp314/Udacity-Arvato-Identify-Customer-Segments | 6be1d4f1eeac391c17c70fdf584bdc4813f80fd8 | [
"ADSL"
] | null | null | null | #pca model n componentes
from sklearn.decomposition import PCA
import numpy as np
from pylab import rcParams
import matplotlib.pyplot as plt
import pandas as pd
def pca_model_n_components(df,n_components):
'''
Definition:
Initialize pca with n_components
args:
dataframe and number of components
returns:
pca initialized and pca fitted and transformed
'''
pca = PCA(n_components)
return pca,pca.fit_transform(df)
def pca_model(df):
'''
Definition:
Initialize pca
args:
dataframe
returns:
pca initialized and pca fitted and transformed
'''
pca = PCA()
return pca,pca.fit_transform(df)
def get_min_components_variance(df,retain_variance):
'''
Definition:
get min components to retain variance
args:
dataframe and retained_variance ratio
returns:
number of min components to retain variance
'''
pca,pca_tranformed = pca_model(df)
cumulative_sum = np.cumsum(pca.explained_variance_ratio_)
return min(np.where(cumulative_sum>=retain_variance)[0]+1)
def plot_curve_min_components_variance(df,mode="cumulative_variance"):
'''
Definition:
plot curve of variance of pca
args:
dataframe and mode to be plotted (cumulative_variance or variance)
returns:
None, only plot the curve
'''
rcParams['figure.figsize'] = 12, 8
pca,pca_transformed = pca_model(df)
fig = plt.figure()
explained_variance = pca.explained_variance_ratio_
cumulative_sum = np.cumsum(explained_variance)
n_components = len(explained_variance)
ind = np.arange(n_components)
ax = plt.subplot(111)
if(mode=="cumulative_variance"):
title = "Explained Cumulative Variance per Principal Component"
ylabel = "Cumulative Variance (%)"
ax.plot(ind, cumulative_sum)
mark_1 = get_min_components_variance(df,0.2)
mark_2 = get_min_components_variance(df,0.4)
mark_3 = get_min_components_variance(df,0.6)
mark_4 = get_min_components_variance(df,0.8)
mark_5 = get_min_components_variance(df,0.9)
mark_6 = get_min_components_variance(df,0.95)
mark_7 = get_min_components_variance(df,0.99)
plt.hlines(y=0.2, xmin=0, xmax=mark_1, color='green', linestyles='dashed',zorder=1)
plt.hlines(y=0.4, xmin=0, xmax=mark_2, color='green', linestyles='dashed',zorder=2)
plt.hlines(y=0.6, xmin=0, xmax=mark_3, color='green', linestyles='dashed',zorder=3)
plt.hlines(y=0.8, xmin=0, xmax=mark_4, color='green', linestyles='dashed',zorder=4)
plt.hlines(y=0.9, xmin=0, xmax=mark_5, color='green', linestyles='dashed',zorder=5)
plt.hlines(y=0.95, xmin=0, xmax=mark_6, color='green', linestyles='dashed',zorder=6)
plt.hlines(y=0.99, xmin=0, xmax=mark_7, color='green', linestyles='dashed',zorder=6)
plt.vlines(x=mark_1, ymin=0, ymax=0.2, color='green', linestyles='dashed',zorder=7)
plt.vlines(x=mark_2, ymin=0, ymax=0.4, color='green', linestyles='dashed',zorder=8)
plt.vlines(x=mark_3, ymin=0, ymax=0.6, color='green', linestyles='dashed',zorder=9)
plt.vlines(x=mark_4, ymin=0, ymax=0.8, color='green', linestyles='dashed',zorder=10)
plt.vlines(x=mark_5, ymin=0, ymax=0.9, color='green', linestyles='dashed',zorder=11)
plt.vlines(x=mark_6, ymin=0, ymax=0.95, color='green', linestyles='dashed',zorder=12)
plt.vlines(x=mark_7, ymin=0, ymax=0.99, color='green', linestyles='dashed',zorder=12)
else:
title = "Variance per Principal Component"
ylabel = "Variance (%)"
ax.plot(ind, explained_variance)
ax.set_xlabel("Number of principal components")
ax.set_ylabel(ylabel)
plt.title(title)
def report_features(feature_names,pca,component_number):
'''
Definition:
This function returns the weights of the original features in relation to a component number of pca
args:
feature_names, pca model and the component_number
returns:
data frame with features names and the correspondent weights
'''
components = pca.components_
feature_weights = dict(zip(feature_names, components[component_number]))
sorted_weights = sorted(feature_weights.items(), key = lambda kv: kv[1])
data = []
for feature, weight, in sorted_weights:
data.append([feature,weight])
df = pd.DataFrame(data,columns=["feature","weight"])
df.set_index("feature",inplace=True)
return df
| 29.908497 | 101 | 0.671547 |
a32ec2ac9f37deceb74746f32c5ce3fa89c08ee8 | 4,446 | py | Python | media_analyzer/core/top_news.py | nyancol/MediaAnalyzer | fe504aa63646d27dfca6ca2c5435b0877d65ab2a | [
"MIT"
] | null | null | null | media_analyzer/core/top_news.py | nyancol/MediaAnalyzer | fe504aa63646d27dfca6ca2c5435b0877d65ab2a | [
"MIT"
] | null | null | null | media_analyzer/core/top_news.py | nyancol/MediaAnalyzer | fe504aa63646d27dfca6ca2c5435b0877d65ab2a | [
"MIT"
] | null | null | null | import datetime
import numpy as np
import json
from sklearn.decomposition import NMF, LatentDirichletAllocation, TruncatedSVD
from sklearn.feature_extraction.text import CountVectorizer
from nltk.corpus import stopwords
import spacy
from media_analyzer import database
NUM_TOPICS = 20
def load_data(begin, end, language):
res = None
with database.connection() as conn:
cur = conn.cursor()
cur.execute(f"""SELECT text, tokens
FROM tweets
WHERE language = '{language}'
AND '{begin}'::date < created_at
AND created_at < '{end}'::date;""")
res = cur.fetchall()
return [{"text": text, "tokens": tokens} for text, tokens in res]
def create_model(language, data):
stop_words = stopwords.words(language)
vectorizer = CountVectorizer(min_df=5, max_df=0.9, lowercase=True,
stop_words=stop_words, token_pattern='[a-zA-Z\-][a-zA-Z\-]{2,}')
data_vectorized = vectorizer.fit_transform(data)
# Build a Non-Negative Matrix Factorization Model
nmf_model = NMF(n_components=NUM_TOPICS)
nmf_Z = nmf_model.fit_transform(data_vectorized)
return nmf_model, vectorizer.get_feature_names()
def get_top_topics(language, tweets):
model, vocabulary = create_model(language, [tweet["text"] for tweet in tweets])
components = []
special_words = {"nhttps"}
for topic in model.components_:
keywords = [vocabulary[i] for i in np.argwhere(topic >= 1).flatten()]
keywords = [key for key in keywords if key not in special_words]
if keywords:
components.append(keywords)
return components
def get_last_date(language):
res = None
with database.connection() as conn:
cur = conn.cursor()
cur.execute(f"""SELECT MAX(begin)
FROM thirty_days_topics
WHERE language = '{language}';""")
res = cur.fetchone()
return res[0] if res else None
def save_topics(begin, language, topics):
sql = """INSERT INTO thirty_days_topics (begin, language, topics)
VALUES (%(begin)s, %(language)s, %(topics)s);"""
entry = {"begin": begin, "language": language, "topics": json.dumps(topics)}
with database.connection() as conn:
cur = conn.cursor()
cur.execute(sql, entry)
conn.commit()
cur.close()
def get_date_fist_tweets():
res = None
with database.connection() as conn:
cur = conn.cursor()
cur.execute("SELECT MIN(created_at) FROM tweets;")
res = cur.fetchone()
return res[0]
def count_matches(tweets, topics, language):
def count_matches_tweet(tokens, topics):
topics = [set(keywords) for keywords in topics]
topics_matched = np.zeros(len(topics), dtype=int)
for i, keywords in enumerate(topics):
if any([token in keywords for token in tokens]):
topics_matched[i] = 1
return topics_matched
def get_tokens(language, topics):
parsers = {"english": "en", "french": "fr",
"spanish": "es", "italian": "it"}
parser = spacy.load(parsers[language])
return [[parser(key)[0].lemma_ for key in keywords] for keywords in topics]
tokenized_topics = get_tokens(language, topics)
matches = np.zeros(len(topics), dtype=int)
for tweet in tweets:
matches += count_matches_tweet(tweet["tokens"], tokenized_topics)
return [{"keywords": topic, "matches": match}
for topic, match in zip(topics, matches.tolist())]
def compute_language(language):
begin = get_last_date(language)
if begin is None:
begin = datetime.datetime(2018, 12, 1).date()
else:
begin += datetime.timedelta(days=1)
while begin < datetime.datetime.now().date() - datetime.timedelta(days=30):
end = begin + datetime.timedelta(days=30)
print(f"Computing interval: {begin} -> {end} for {language}")
tweets = load_data(begin, end, language)
topics = get_top_topics(language, tweets)
topics = count_matches(tweets, topics, language)
save_topics(begin, language, topics)
begin += datetime.timedelta(days=1)
def compute():
languages = database.get_languages()
for language in languages:
compute_language(language)
if __name__ == "__main__":
compute()
| 33.938931 | 97 | 0.634278 |
a3306511794dd745848ecb0131b99b481e38843f | 669 | py | Python | PYTHON/IP-Address-Checker.py | ayushyado/HACKTOBERFEST2021-2 | b63d568fd7f33023ca0b0dbd91325444c70c4d10 | [
"MIT"
] | 125 | 2021-10-01T19:05:26.000Z | 2021-10-03T13:32:42.000Z | PYTHON/IP-Address-Checker.py | ayushyado/HACKTOBERFEST2021-2 | b63d568fd7f33023ca0b0dbd91325444c70c4d10 | [
"MIT"
] | 201 | 2021-10-30T20:40:01.000Z | 2022-03-22T17:26:28.000Z | PYTHON/IP-Address-Checker.py | ayushyado/HACKTOBERFEST2021-2 | b63d568fd7f33023ca0b0dbd91325444c70c4d10 | [
"MIT"
] | 294 | 2021-10-01T18:46:05.000Z | 2021-10-03T14:25:07.000Z | import urllib2
def get_public_ip(request_target):
grabber = urllib2.build_opener()
grabber.addheaders = [('Useragent','Mozilla/5.0')]
try:
public_ip_address = grabber.open(target_url).read()
except urllib2.HTTPError, error:
print("There was an error trying to get your Public IP: %s") % (error)
except urllib2.URLError, error:
print("There was an error trying to get your Public IP: %s") % (error)
return public_ip_address
public_ip = "None"
target_url = "http://ip.42.pl/raw"
public_ip = get_public_ip(target_url)
if not "None" in public_ip:
print("Your Public IP address is: %s") % (str(public_ip))
else:
print("Your Public IP address was not found") | 31.857143 | 72 | 0.730942 |
a3325c6fb73e3191f10fa77771bfdc292d1ff768 | 2,586 | py | Python | scraper.py | squash-bit/Automate-Whatsapp-News | 9bdbbbb397dc680825b19adcda4da81d1f66270c | [
"MIT"
] | 4 | 2020-11-21T19:08:56.000Z | 2021-05-06T13:09:45.000Z | scraper.py | squash-bit/Agent-Wallie | 9bdbbbb397dc680825b19adcda4da81d1f66270c | [
"MIT"
] | 1 | 2021-05-06T19:26:06.000Z | 2021-05-06T19:26:06.000Z | scraper.py | squash-bit/Agent-Wallie | 9bdbbbb397dc680825b19adcda4da81d1f66270c | [
"MIT"
] | 1 | 2021-05-06T13:25:08.000Z | 2021-05-06T13:25:08.000Z | # import necessary modules
import os
import re
import requests
import newspaper
from bs4 import BeautifulSoup
from newspaper import Article
from newspaper import Config
from article_summarizer import summarizer
from time import sleep
# clean data
class Cleanser:
"""Scrape the news site and get the relevant updates.."""
def __init__(self, buzz_words):
# get the markup from ['https://yourwebpage.com/']
self.url = 'https://news.ycombinator.com/news'
self.buzz_words = buzz_words
self.articles_final = []
def gather_info(self):
# get recommended articles[title, link, summary] only for user
try:
# scrape only links and titles of articles present in the url:https://news.ycombinator.com/news
# then summarize each article using it's link...
r = requests.get(self.url)
html_soup = BeautifulSoup(r.text, 'html.parser')
for item in html_soup.find_all('tr', class_='athing'):
item_a = item.find('a', class_='storylink')
item_link = item_a.get('href') if item_a else None
item_text = item_a.get_text(strip=True) if item_a else None
# list of words that occur most frequent in article
keywords = self.get_keywords(item_link)
for buzz_word in self.buzz_words:
# find articles that contains any of buzz_words by iterating through the keywords
if buzz_word.lower() in keywords:
print(keywords)
# summarize contents using article_summarizer
summary = summarizer(item_link)
self.articles_final.append(
{'link' : item_link,
'title' : item_text,
'summary': summary})
except requests.exceptions.SSLError:
print("Max retries exceeded, Try again later...")
return self.articles_final
# get a list of words that occur most frequent in an article
def get_keywords(self, url):
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'
config = Config()
config.browser_user_agent = user_agent
paper = Article(url, config=config)
try:
paper.download()
paper.parse()
paper.nlp()
except:
return []
return paper.keywords
| 39.181818 | 144 | 0.593968 |
a3340d73b31131cbb0f369140b3afe55408788f6 | 1,351 | py | Python | soli/aria/forms/species.py | rcdixon/soli | d29c77c1d391dfcc3c0dd0297ecf93fa9aa046ab | [
"MIT"
] | null | null | null | soli/aria/forms/species.py | rcdixon/soli | d29c77c1d391dfcc3c0dd0297ecf93fa9aa046ab | [
"MIT"
] | null | null | null | soli/aria/forms/species.py | rcdixon/soli | d29c77c1d391dfcc3c0dd0297ecf93fa9aa046ab | [
"MIT"
] | null | null | null | from aria.models import Genus, Species, Subspecies
from django import forms
from django.forms import inlineformset_factory
from .templates.templates import createTextInput, createSelectInput
class CreateSpeciesForm(forms.ModelForm):
class Meta:
model = Species
fields = ["name", "common_name"]
widgets = {
"name": createTextInput("Species"),
"common_name": createTextInput("Common Name")
}
def __init__(self, *args, **kwargs):
super(CreateSpeciesForm, self).__init__(*args, **kwargs)
self.fields["genus"] = forms.ModelChoiceField(
queryset=Genus.objects.all().order_by("name"),
widget=createSelectInput("Genus", ["font-italic"]))
self.fields["genus"].empty_label = "Genus"
def saveSpecies(self, request):
species = self.save(commit=False)
species.sp_ge_num = Genus(ge_num=request.POST["genus"])
species.save()
def subspeciesFormSet(species=Species()):
formset = inlineformset_factory(
Species,
Subspecies,
fields=["name"],
extra=1,
can_delete=False,
widgets={
"name": createTextInput("Subspecies")
})
subspecies = formset(instance=species)
if len(subspecies) == 1:
subspecies = subspecies[0]
return subspecies
| 29.369565 | 67 | 0.634345 |
a3349b6abd791f21baf0e781406ef6802460401f | 285 | py | Python | hour.py | anokata/pythonPetProjects | 245c3ff11ae560b17830970061d8d60013948fd7 | [
"MIT"
] | 3 | 2017-04-30T17:44:53.000Z | 2018-02-03T06:02:11.000Z | hour.py | anokata/pythonPetProjects | 245c3ff11ae560b17830970061d8d60013948fd7 | [
"MIT"
] | 10 | 2021-03-18T20:17:19.000Z | 2022-03-11T23:14:19.000Z | hour.py | anokata/pythonPetProjects | 245c3ff11ae560b17830970061d8d60013948fd7 | [
"MIT"
] | null | null | null | import math
def angle(m):
return 5.5 * m/60;
print(angle(20))
i = 0
for m in range(0,1440*60):
a = angle(m) / 360
d = a - math.floor(a)
if (d < 0.00001):
print(a, math.floor(a), d, d == 0.0)
i += 1
print(i)
for m in range(25):
print(360*m/5.5)
| 14.25 | 44 | 0.508772 |
a33556dfd1ea6c5a377213bf148dae18a67adec5 | 4,038 | py | Python | src/third_party/wiredtiger/test/suite/test_encrypt08.py | benety/mongo | 203430ac9559f82ca01e3cbb3b0e09149fec0835 | [
"Apache-2.0"
] | null | null | null | src/third_party/wiredtiger/test/suite/test_encrypt08.py | benety/mongo | 203430ac9559f82ca01e3cbb3b0e09149fec0835 | [
"Apache-2.0"
] | null | null | null | src/third_party/wiredtiger/test/suite/test_encrypt08.py | benety/mongo | 203430ac9559f82ca01e3cbb3b0e09149fec0835 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Public Domain 2014-present MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# test_encrypt08.py
# Test some error conditions with the libsodium encryption extension.
#
import wiredtiger, wttest
from wtscenario import make_scenarios
#
# Test sodium encryption configuration.
# This exercises the error paths in the encryptor's customize method when
# used for system (not per-table) encryption.
#
class test_encrypt08(wttest.WiredTigerTestCase):
uri = 'file:test_encrypt08'
# To test the sodium encryptor, we use secretkey= rather than
# setting a keyid, because for a "real" (vs. test-only) encryptor,
# keyids require some kind of key server, and (a) setting one up
# for testing would be a nuisance and (b) currently the sodium
# encryptor doesn't support any anyway.
#
# It expects secretkey= to provide a hex-encoded 256-bit chacha20 key.
# This key will serve for testing purposes.
sodium_testkey = '0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'
encrypt_type = [
('nokey', dict( sys_encrypt='',
msg='/no key given/')),
('keyid', dict( sys_encrypt='keyid=123',
msg='/keyids not supported/')),
('twokeys', dict( sys_encrypt='keyid=123,secretkey=' + sodium_testkey,
msg='/keys specified with both/')),
('nothex', dict( sys_encrypt='secretkey=plop',
msg='/secret key not hex/')),
('badsize', dict( sys_encrypt='secretkey=0123456789abcdef',
msg='/wrong secret key length/')),
]
scenarios = make_scenarios(encrypt_type)
def conn_extensions(self, extlist):
extlist.skip_if_missing = True
extlist.extension('encryptors', 'sodium')
# Do not use conn_config to set the encryption, because that sets
# the encryption during open when we don't have control and can't
# catch exceptions. Instead we'll let the frameork open without
# encryption and then reopen ourselves. This seems to behave as
# desired (we get the intended errors from inside the encryptor)
# even though one might expect it to fail because it's reopening
# the database with different encryption. (If in the future it starts
# doing that, the workaround is to override setUpConnectionOpen.
# I'm not doing that now because it's quite a bit messier.)
# (Re)open the database with bad encryption config.
def test_encrypt(self):
sysconfig = 'encryption=(name=sodium,{0}),'.format(self.sys_encrypt)
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda:
self.reopen_conn(config = sysconfig),
self.msg)
if __name__ == '__main__':
wttest.run()
| 43.419355 | 87 | 0.698613 |
a336bdbfb6767de53ac20167cacab792872e5ecf | 1,779 | py | Python | {{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/urls.py | mabdullahabid/cookiecutter-django-rest | 8cab90f115b99f7b700ec38a08cb3647eb0a847b | [
"MIT"
] | null | null | null | {{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/urls.py | mabdullahabid/cookiecutter-django-rest | 8cab90f115b99f7b700ec38a08cb3647eb0a847b | [
"MIT"
] | null | null | null | {{cookiecutter.github_repository_name}}/{{cookiecutter.app_name}}/urls.py | mabdullahabid/cookiecutter-django-rest | 8cab90f115b99f7b700ec38a08cb3647eb0a847b | [
"MIT"
] | null | null | null | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, re_path, include, reverse_lazy
from django.views.generic.base import RedirectView
from rest_framework import permissions
from rest_framework.authtoken import views
from rest_framework.routers import DefaultRouter
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from .users.views import UserViewSet, UserCreateViewSet
router = DefaultRouter()
router.register(r"users", UserViewSet)
router.register(r"users", UserCreateViewSet)
urlpatterns = [
path("admin/", admin.site.urls),
path("api/v1/", include(router.urls)),
path("api-token-auth/", views.obtain_auth_token),
path("api-auth/", include("rest_framework.urls", namespace="rest_framework")),
# the 'api-root' from django rest-frameworks default router
# http://www.django-rest-framework.org/api-guide/routers/#defaultrouter
re_path(r"^$", RedirectView.as_view(url=reverse_lazy("api-root"), permanent=False)),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
admin.site.site_header = "{{ cookiecutter.app_title }}"
admin.site.site_title = "{{ cookiecutter.app_title }} Admin Portal"
admin.site.index_title = "{{ cookiecutter.app_title }} Admin"
# Swagger
api_info = openapi.Info(
title="{{ cookiecutter.app_title }} API",
default_version="v1",
description="API Documentation for {{ cookiecutter.app_title }}",
contact=openapi.Contact(email="{{ cookiecutter.email }}"),
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
] | 35.58 | 88 | 0.754918 |
a33868010eb5e7ae344ef9b1e3fe0336947b0c2f | 4,260 | py | Python | pdk_api.py | audacious-software/Passive-Data-Kit-External-Sensors | c4781c04ce3cb485b0c1e50a9e7c6db0c92a9959 | [
"Apache-2.0"
] | null | null | null | pdk_api.py | audacious-software/Passive-Data-Kit-External-Sensors | c4781c04ce3cb485b0c1e50a9e7c6db0c92a9959 | [
"Apache-2.0"
] | null | null | null | pdk_api.py | audacious-software/Passive-Data-Kit-External-Sensors | c4781c04ce3cb485b0c1e50a9e7c6db0c92a9959 | [
"Apache-2.0"
] | null | null | null | # pylint: disable=line-too-long, no-member
from __future__ import print_function
import arrow
import requests
from django.conf import settings
from django.contrib.gis.geos import GEOSGeometry
from django.utils import timezone
from django.utils.text import slugify
from passive_data_kit_external_sensors.models import SensorRegion, Sensor, SensorLocation, SensorDataPayload, SensorModel
def fetch_sensors():
sensors = []
if hasattr(settings, 'PDK_EXTERNAL_SENSORS_PURPLE_AIR_URL'): # pylint: disable=too-many-nested-blocks
valid_region = None
for region in SensorRegion.objects.filter(include_sensors=True):
if valid_region is None:
valid_region = region.bounds
else:
valid_region = valid_region.union(region.bounds)
response = requests.get(settings.PDK_EXTERNAL_SENSORS_PURPLE_AIR_URL)
if response.status_code == 200:
sensors = response.json()['results']
region_matches = []
for sensor in sensors:
if 'Lat' in sensor and 'Lon' in sensor:
sensor_location = GEOSGeometry('POINT(%f %f)' % (sensor['Lon'], sensor['Lat'],))
if valid_region.contains(sensor_location):
if 'ID' in sensor:
sensor['pdk_identifier'] = 'purpleair-' + str(sensor['ID'])
if 'LastSeen' in sensor:
sensor['pdk_observed'] = arrow.get(sensor['LastSeen']).datetime
region_matches.append(sensor)
# else:
# print('INCOMPLETE? ' + json.dumps(sensor, indent=2))
print('START: ' + str(len(sensors)) + ' - IMPORT: ' + str(len(region_matches)))
else:
print('Unexpected HTTP status code for ' + settings.PDK_EXTERNAL_SENSORS_PURPLE_AIR_URL+ ' - ' + str(response.status_code))
return sensors
def ingest_sensor_data(sensor_data):
if 'pdk_identifier' in sensor_data:
identifier = sensor_data['pdk_identifier']
if identifier.startswith('purpleair-') and ('pdk_observed' in sensor_data) and ('Lat' in sensor_data) and ('Lon' in sensor_data):
model = None
if 'Type' in sensor_data:
model = SensorModel.objects.filter(identifier=slugify(sensor_data['Type'])).first()
if model is None:
model = SensorModel(identifier=slugify(sensor_data['Type']), name=sensor_data['Type'])
model.manufacturer = 'Unknown (via Purple Air)'
model.save()
sensor = Sensor.objects.filter(identifier=identifier).first()
now = timezone.now()
if sensor is None:
sensor = Sensor(identifier=identifier)
if 'Label' in sensor_data:
sensor.name = sensor_data['Label'].strip()
else:
sensor.name = identifier
sensor.added = now
sensor.model = model
sensor.save()
sensor.last_checked = now
sensor.save()
payload_when = sensor_data['pdk_observed']
del sensor_data['pdk_observed']
sensor_location = GEOSGeometry('POINT(%f %f)' % (sensor_data['Lon'], sensor_data['Lat'],))
last_location = sensor.locations.all().order_by('-last_observed').first()
if last_location is None or last_location.location.distance(sensor_location) > 0.00001:
last_location = SensorLocation.objects.create(sensor=sensor, first_observed=now, last_observed=now, location=sensor_location)
else:
if last_location.last_observed != payload_when:
last_location.last_observed = payload_when
last_location.save()
last_payload = sensor.data_payloads.filter(observed__gte=payload_when).first()
if last_payload is None:
print('ADDING PAYLOAD...')
data_payload = SensorDataPayload(sensor=sensor, observed=payload_when, location=last_location)
data_payload.definition = sensor_data
data_payload.save()
| 38.035714 | 141 | 0.603286 |
a339496b618754603c49253c77c1461b236400c0 | 37,496 | py | Python | fgcm/fgcmConfig.py | erykoff/fgcm | 51c39c5c7f904fbac755e775038730b4e6ba11bd | [
"Apache-2.0"
] | 5 | 2018-02-02T15:36:46.000Z | 2021-05-11T21:54:49.000Z | fgcm/fgcmConfig.py | erykoff/fgcm | 51c39c5c7f904fbac755e775038730b4e6ba11bd | [
"Apache-2.0"
] | 1 | 2021-08-19T19:56:33.000Z | 2021-08-19T19:56:33.000Z | fgcm/fgcmConfig.py | lsst/fgcm | 51c39c5c7f904fbac755e775038730b4e6ba11bd | [
"Apache-2.0"
] | 10 | 2019-01-09T22:50:04.000Z | 2020-02-12T16:36:27.000Z | import numpy as np
import os
import sys
import yaml
from .fgcmUtilities import FocalPlaneProjectorFromOffsets
from .fgcmLogger import FgcmLogger
class ConfigField(object):
"""
A validatable field with a default
"""
def __init__(self, datatype, value=None, default=None, required=False, length=None):
self._datatype = datatype
self._value = value
self._required = required
self._length = length
_default = default
if self._datatype == np.ndarray:
if default is not None:
_default = np.atleast_1d(default)
if value is not None:
self._value = np.atleast_1d(value)
if datatype is not None:
if _default is not None:
if type(_default) != datatype:
raise TypeError("Default is the wrong datatype.")
if self._value is not None:
if type(self._value) != datatype:
raise TypeError("Value is the wrong datatype.")
if self._value is None:
self._value = _default
def __get__(self, obj, type=None):
return self._value
def __set__(self, obj, value):
# need to convert to numpy array if necessary
if self._datatype == np.ndarray:
self._value = np.atleast_1d(value)
else:
self._value = value
def validate(self, name):
if self._required:
if self._value is None:
raise ValueError("Required ConfigField %s is not set" % (name))
elif self._value is None:
# Okay to have None for not required
return True
if self._datatype is not None:
if type(self._value) != self._datatype:
raise ValueError("Datatype mismatch for %s (got %s, expected %s)" %
(name, str(type(self._value)), str(self._datatype)))
if self._length is not None:
if len(self._value) != self._length:
raise ValueError("ConfigField %s has the wrong length (%d != %d)" %
(name, len(self._value), self._length))
return True
class FgcmConfig(object):
"""
Class which contains the FGCM Configuration. Note that if you have fits files
as input, use configWithFits(configDict) to initialize.
parameters
----------
configDict: dict
Dictionary with configuration values
lutIndex: numpy recarray
All the information from the LUT index values
lutStd: numpy recarray
All the information from the LUT standard values
expInfo: numpy recarray
Info about each exposure
checkFiles: bool, default=False
Check that all fits files exist
noOutput: bool, default=False
Do not create an output directory.
ccdOffsets : `np.ndarray`, optional
CCD Offset table.
focalPlaneProjector : `FocalPlaneProjector`, optional
A focal plane projector object to generate the
focal plane mapping at an arbitrary angle.
"""
bands = ConfigField(list, required=True)
fitBands = ConfigField(list, required=True)
notFitBands = ConfigField(list, required=True)
requiredBands = ConfigField(list, required=True)
filterToBand = ConfigField(dict, required=True)
exposureFile = ConfigField(str, required=False)
ccdOffsetFile = ConfigField(str, required=False)
obsFile = ConfigField(str, required=False)
indexFile = ConfigField(str, required=False)
refstarFile = ConfigField(str, required=False)
UTBoundary = ConfigField(float, default=0.0)
washMJDs = ConfigField(np.ndarray, default=np.array((0.0)))
epochMJDs = ConfigField(np.ndarray, default=np.array((0.0, 1e10)))
coatingMJDs = ConfigField(np.ndarray, default=np.array((0.0)))
epochNames = ConfigField(list, required=False)
lutFile = ConfigField(str, required=False)
expField = ConfigField(str, default='EXPNUM')
ccdField = ConfigField(str, default='CCDNUM')
latitude = ConfigField(float, required=True)
defaultCameraOrientation = ConfigField(float, default=0.0)
seeingField = ConfigField(str, default='SEEING')
seeingSubExposure = ConfigField(bool, default=False)
deepFlag = ConfigField(str, default='DEEPFLAG')
fwhmField = ConfigField(str, default='PSF_FWHM')
skyBrightnessField = ConfigField(str, default='SKYBRIGHTNESS')
minObsPerBand = ConfigField(int, default=2)
minObsPerBandFill = ConfigField(int, default=1)
nCore = ConfigField(int, default=1)
randomSeed = ConfigField(int, required=False)
logger = ConfigField(None, required=False)
outputFgcmcalZpts = ConfigField(bool, default=False)
brightObsGrayMax = ConfigField(float, default=0.15)
minStarPerCCD = ConfigField(int, default=5)
minStarPerExp = ConfigField(int, default=100)
minCCDPerExp = ConfigField(int, default=5)
maxCCDGrayErr = ConfigField(float, default=0.05)
ccdGraySubCCDDict = ConfigField(dict, default={})
ccdGraySubCCDChebyshevOrder = ConfigField(int, default=1)
ccdGraySubCCDTriangular = ConfigField(bool, default=True)
ccdGrayFocalPlaneDict = ConfigField(dict, default={})
ccdGrayFocalPlaneChebyshevOrder = ConfigField(int, default=3)
focalPlaneSigmaClip = ConfigField(float, default=4.0)
ccdGrayFocalPlaneFitMinCcd = ConfigField(int, default=1)
aperCorrFitNBins = ConfigField(int, default=5)
aperCorrInputSlopeDict = ConfigField(dict, default={})
illegalValue = ConfigField(float, default=-9999.0)
sedBoundaryTermDict = ConfigField(dict, required=True)
sedTermDict = ConfigField(dict, required=True)
starColorCuts = ConfigField(list, required=True)
quantityCuts = ConfigField(list, default=[])
cycleNumber = ConfigField(int, default=0)
outfileBase = ConfigField(str, required=True)
maxIter = ConfigField(int, default=50)
deltaMagBkgOffsetPercentile = ConfigField(float, default=0.25)
deltaMagBkgPerCcd = ConfigField(bool, default=False)
sigFgcmMaxErr = ConfigField(float, default=0.01)
sigFgcmMaxEGrayDict = ConfigField(dict, default={})
ccdGrayMaxStarErr = ConfigField(float, default=0.10)
mirrorArea = ConfigField(float, required=True) # cm^2
cameraGain = ConfigField(float, required=True)
approxThroughputDict = ConfigField(dict, default={})
ccdStartIndex = ConfigField(int, default=0)
minExpPerNight = ConfigField(int, default=10)
expGrayInitialCut = ConfigField(float, default=-0.25)
expVarGrayPhotometricCutDict = ConfigField(dict, default={})
expGrayPhotometricCutDict = ConfigField(dict, required=True)
expGrayRecoverCut = ConfigField(float, default=-1.0)
expGrayHighCutDict = ConfigField(dict, required=True)
expGrayErrRecoverCut = ConfigField(float, default=0.05)
sigmaCalRange = ConfigField(list, default=[0.001, 0.003], length=2)
sigmaCalFitPercentile = ConfigField(list, default=[0.05, 0.15], length=2)
sigmaCalPlotPercentile = ConfigField(list, default=[0.05, 0.95], length=2)
sigma0Phot = ConfigField(float, default=0.003)
logLevel = ConfigField(str, default='INFO')
quietMode = ConfigField(bool, default=False)
useRepeatabilityForExpGrayCutsDict = ConfigField(dict, default={})
mapLongitudeRef = ConfigField(float, default=0.0)
autoPhotometricCutNSig = ConfigField(float, default=3.0)
autoPhotometricCutStep = ConfigField(float, default=0.0025)
autoHighCutNSig = ConfigField(float, default=4.0)
instrumentParsPerBand = ConfigField(bool, default=False)
instrumentSlopeMinDeltaT = ConfigField(float, default=5.0)
refStarSnMin = ConfigField(float, default=20.0)
refStarOutlierNSig = ConfigField(float, default=4.0)
applyRefStarColorCuts = ConfigField(bool, default=True)
useRefStarsWithInstrument = ConfigField(bool, default=True)
mapNSide = ConfigField(int, default=256)
nStarPerRun = ConfigField(int, default=200000)
nExpPerRun = ConfigField(int, default=1000)
varNSig = ConfigField(float, default=100.0)
varMinBand = ConfigField(int, default=2)
useSedLUT = ConfigField(bool, default=False)
modelMagErrors = ConfigField(bool, default=False)
freezeStdAtmosphere = ConfigField(bool, default=False)
reserveFraction = ConfigField(float, default=0.1)
precomputeSuperStarInitialCycle = ConfigField(bool, default=False)
useRetrievedPwv = ConfigField(bool, default=False)
useNightlyRetrievedPwv = ConfigField(bool, default=False)
useQuadraticPwv = ConfigField(bool, default=False)
pwvRetrievalSmoothBlock = ConfigField(int, default=25)
fitMirrorChromaticity = ConfigField(bool, default=False)
useRetrievedTauInit = ConfigField(bool, default=False)
tauRetrievalMinCCDPerNight = ConfigField(int, default=100)
superStarSubCCDDict = ConfigField(dict, default={})
superStarSubCCDChebyshevOrder = ConfigField(int, default=1)
superStarSubCCDTriangular = ConfigField(bool, default=False)
superStarSigmaClip = ConfigField(float, default=5.0)
clobber = ConfigField(bool, default=False)
printOnly = ConfigField(bool, default=False)
outputStars = ConfigField(bool, default=False)
fillStars = ConfigField(bool, default=False)
outputZeropoints = ConfigField(bool, default=False)
outputPath = ConfigField(str, required=False)
saveParsForDebugging = ConfigField(bool, default=False)
doPlots = ConfigField(bool, default=True)
pwvFile = ConfigField(str, required=False)
externalPwvDeltaT = ConfigField(float, default=0.1)
tauFile = ConfigField(str, required=False)
externalTauDeltaT = ConfigField(float, default=0.1)
fitGradientTolerance = ConfigField(float, default=1e-5)
stepUnitReference = ConfigField(float, default=0.0001)
experimentalMode = ConfigField(bool, default=False)
resetParameters = ConfigField(bool, default=True)
noChromaticCorrections = ConfigField(bool, default=False)
colorSplitBands = ConfigField(list, default=['g', 'i'], length=2)
expGrayCheckDeltaT = ConfigField(float, default=10. / (24. * 60.))
modelMagErrorNObs = ConfigField(int, default=100000)
inParameterFile = ConfigField(str, required=False)
inFlagStarFile = ConfigField(str, required=False)
zpsToApplyFile = ConfigField(str, required=False)
maxFlagZpsToApply = ConfigField(int, default=2)
def __init__(self, configDict, lutIndex, lutStd, expInfo, checkFiles=False, noOutput=False, ccdOffsets=None, focalPlaneProjector=None):
self._setVarsFromDict(configDict)
self._setDefaultLengths()
self.validate()
# First thing: set the random seed if desired
if self.randomSeed is not None:
np.random.seed(seed=self.randomSeed)
if self.outputPath is None:
self.outputPath = os.path.abspath('.')
else:
self.outputPath = os.path.abspath(self.outputPath)
# create output path if necessary
if not noOutput:
if (not os.path.isdir(self.outputPath)):
try:
os.makedirs(self.outputPath)
except:
raise IOError("Could not create output path: %s" % (self.outputPath))
if (self.cycleNumber < 0):
raise ValueError("Illegal cycleNumber: must be >= 0")
self.inParameterFile = None
self.inFlagStarFile = None
if (self.cycleNumber >= 1) and checkFiles:
if ('inParameterFile' not in configDict):
raise ValueError("Must provide inParameterFile for cycleNumber > 0")
self.inParameterFile = configDict['inParameterFile']
if ('inFlagStarFile' not in configDict):
raise ValueError("Must provide inFlagStarFile for cycleNumber > 0")
self.inFlagStarFile = configDict['inFlagStarFile']
# check the cut values
self.outfileBaseWithCycle = '%s_cycle%02d' % (self.outfileBase, self.cycleNumber)
logFile = '%s/%s.log' % (self.outputPath, self.outfileBaseWithCycle)
if os.path.isfile(logFile) and not self.clobber:
raise RuntimeError("Found logFile %s, but clobber == False." % (logFile))
self.plotPath = None
if self.doPlots:
self.plotPath = '%s/%s_plots' % (self.outputPath,self.outfileBaseWithCycle)
if os.path.isdir(self.plotPath) and not self.clobber:
# check if directory is empty
if len(os.listdir(self.plotPath)) > 0:
raise RuntimeError("Found plots in %s, but clobber == False." % (self.plotPath))
# set up logger are we get the name...
if ('logger' not in configDict):
self.externalLogger = False
self.fgcmLog = FgcmLogger('%s/%s.log' % (self.outputPath,
self.outfileBaseWithCycle),
self.logLevel, printLogger=configDict['printOnly'])
if configDict['printOnly']:
self.fgcmLog.info('Logging to console')
else:
self.fgcmLog.info('Logging started to %s' % (self.fgcmLog.logFile))
else:
# Support an external logger such as LSST that has .info() and .debug() calls
self.externalLogger = True
self.fgcmLog = configDict['logger']
try:
if not self.quietMode:
self.fgcmLog.info('Logging to external logger.')
except:
raise RuntimeError("Logging to configDict['logger'] failed.")
if (self.experimentalMode) :
self.fgcmLog.info('ExperimentalMode set to True')
if (self.resetParameters) :
self.fgcmLog.info('Will reset atmosphere parameters')
if (self.noChromaticCorrections) :
self.fgcmLog.warning('No chromatic corrections will be applied. I hope this is what you wanted for a test!')
if (self.plotPath is not None and not os.path.isdir(self.plotPath)):
try:
os.makedirs(self.plotPath)
except:
raise IOError("Could not create plot path: %s" % (self.plotPath))
if (self.illegalValue >= 0.0):
raise ValueError("Must set illegalValue to a negative number")
# and look at the lutFile
self.nCCD = lutIndex['NCCD'][0]
# these are np arrays and encoded as such
try:
self.lutFilterNames = [n.decode('utf-8') for n in lutIndex['FILTERNAMES'][0]]
except AttributeError:
self.lutFilterNames = [n for n in lutIndex['FILTERNAMES'][0]]
try:
self.lutStdFilterNames = [n.decode('utf-8') for n in lutIndex['STDFILTERNAMES'][0]]
except AttributeError:
self.lutStdFilterNames = [n for n in lutIndex['STDFILTERNAMES'][0]]
self.pmbRange = np.array([np.min(lutIndex['PMB']),np.max(lutIndex['PMB'])])
self.pwvRange = np.array([np.min(lutIndex['PWV']),np.max(lutIndex['PWV'])])
self.O3Range = np.array([np.min(lutIndex['O3']),np.max(lutIndex['O3'])])
self.tauRange = np.array([np.min(lutIndex['TAU']),np.max(lutIndex['TAU'])])
self.alphaRange = np.array([np.min(lutIndex['ALPHA']),np.max(lutIndex['ALPHA'])])
self.zenithRange = np.array([np.min(lutIndex['ZENITH']),np.max(lutIndex['ZENITH'])])
# newer band checks
# 1) check that all the filters in filterToBand are in lutFilterNames
# 2) check that all the lutStdFilterNames are lutFilterNames (redundant)
# 3) check that each band has ONE standard filter
# 4) check that all the fitBands are in bands
# 5) check that all the notFitBands are in bands
# 6) check that all the requiredBands are in bands
# 1) check that all the filters in filterToBand are in lutFilterNames
for filterName in self.filterToBand:
if filterName not in self.lutFilterNames:
raise ValueError("Filter %s in filterToBand not in LUT" % (filterName))
# 2) check that all the lutStdFilterNames are lutFilterNames (redundant)
for lutStdFilterName in self.lutStdFilterNames:
if lutStdFilterName not in self.lutFilterNames:
raise ValueError("lutStdFilterName %s not in list of lutFilterNames" % (lutStdFilterName))
# 3) check that each band has ONE standard filter
bandStdFilterIndex = np.zeros(len(self.bands), dtype=np.int32) - 1
for i, band in enumerate(self.bands):
for j, filterName in enumerate(self.lutFilterNames):
# Not every LUT filter must be in the filterToBand mapping.
# If it is not there, it will not be used.
if filterName in self.filterToBand:
if self.filterToBand[filterName] == band:
# If we haven't found it yet, set the index
ind = list(self.lutFilterNames).index(self.lutStdFilterNames[j])
if bandStdFilterIndex[i] < 0:
bandStdFilterIndex[i] = ind
else:
if self.lutStdFilterNames[ind] != self.lutStdFilterNames[bandStdFilterIndex[i]]:
raise ValueError("Band %s has multiple standard filters (%s, %s)" %
(band, self.lutStdFilterNames[ind],
self.lutStdFilterNames[bandStdFilterIndex[i]]))
# 4) check that all the fitBands are in bands
for fitBand in self.fitBands:
if fitBand not in self.bands:
raise ValueError("Band %s from fitBands not in full bands" % (fitBand))
# 5) check that all the notFitBands are in bands
for notFitBand in self.notFitBands:
if notFitBand not in self.bands:
raise ValueError("Band %s from notFitBands not in full bands" % (notFitBand))
# 6) check that all the requiredBands are in bands
for requiredBand in self.requiredBands:
if requiredBand not in self.bands:
raise ValueError("Band %s from requiredBands not in full bands" % (requiredBand))
bandString = " ".join(self.bands)
self.fgcmLog.info('Found %d CCDs and %d bands (%s)' %
(self.nCCD,len(self.bands),bandString))
# get LUT standard values
self.pmbStd = lutStd['PMBSTD'][0]
self.pwvStd = lutStd['PWVSTD'][0]
self.lnPwvStd = np.log(lutStd['PWVSTD'][0])
self.o3Std = lutStd['O3STD'][0]
self.tauStd = lutStd['TAUSTD'][0]
self.lnTauStd = np.log(lutStd['TAUSTD'][0])
self.alphaStd = lutStd['ALPHASTD'][0]
self.zenithStd = lutStd['ZENITHSTD'][0]
# Cut the LUT filter names to those that are actually used
usedFilterNames = self.filterToBand.keys()
usedLutFilterMark = np.zeros(len(self.lutFilterNames), dtype=bool)
for i, f in enumerate(self.lutFilterNames):
if f in usedFilterNames:
usedLutFilterMark[i] = True
self.lutFilterNames = [f for i, f in enumerate(self.lutFilterNames) if usedLutFilterMark[i]]
self.lutStdFilterNames = [f for i, f in enumerate(self.lutStdFilterNames) if usedLutFilterMark[i]]
# And the lambdaStd and I10Std, for each *band*
self.lambdaStdBand = lutStd['LAMBDASTD'][0][bandStdFilterIndex]
self.I10StdBand = lutStd['I10STD'][0][bandStdFilterIndex]
self.I0StdBand = lutStd['I0STD'][0][bandStdFilterIndex]
self.I1StdBand = lutStd['I1STD'][0][bandStdFilterIndex]
self.I2StdBand = lutStd['I2STD'][0][bandStdFilterIndex]
self.lambdaStdFilter = lutStd['LAMBDASTDFILTER'][0][usedLutFilterMark]
# Convert maps to lists...
self.ccdGraySubCCD = self._convertDictToBandList(self.ccdGraySubCCDDict,
bool, False, required=False)
self.ccdGrayFocalPlane = self._convertDictToBandList(self.ccdGrayFocalPlaneDict,
bool, False, required=False)
self.superStarSubCCD = self._convertDictToBandList(self.superStarSubCCDDict,
bool, False, required=False)
self.aperCorrInputSlopes = self._convertDictToBandList(self.aperCorrInputSlopeDict,
float, self.illegalValue,
ndarray=True, required=False)
self.sigFgcmMaxEGray = self._convertDictToBandList(self.sigFgcmMaxEGrayDict,
float, 0.05, required=False)
self.approxThroughput = self._convertDictToBandList(self.approxThroughputDict,
float, 1.0, required=False)
self.expVarGrayPhotometricCut = self._convertDictToBandList(self.expVarGrayPhotometricCutDict,
float, 0.0005,
ndarray=True, required=False)
self.expGrayPhotometricCut = self._convertDictToBandList(self.expGrayPhotometricCutDict,
float, -0.05,
ndarray=True, required=True,
dictName='expGrayPhotometricCutDict')
self.expGrayHighCut = self._convertDictToBandList(self.expGrayHighCutDict,
float, 0.10,
ndarray=True, required=True,
dictName='expGrayHighCutDict')
self.useRepeatabilityForExpGrayCuts = self._convertDictToBandList(self.useRepeatabilityForExpGrayCutsDict,
bool, False, required=False)
if self.colorSplitBands[0] not in self.bands or self.colorSplitBands[1] not in self.bands:
raise RuntimeError("Bands listed in colorSplitBands must be valid bands.")
self.colorSplitIndices = [self.bands.index(x) for x in self.colorSplitBands]
if (self.expGrayPhotometricCut.max() >= 0.0):
raise ValueError("expGrayPhotometricCut must all be negative")
if (self.expGrayHighCut.max() <= 0.0):
raise ValueError("expGrayHighCut must all be positive")
if self.sigmaCalRange[1] < self.sigmaCalRange[0]:
raise ValueError("sigmaCalRange[1] must me equal to or larger than sigmaCalRange[0]")
# and look at the exposure file and grab some stats
self.expRange = np.array([np.min(expInfo[self.expField]),np.max(expInfo[self.expField])])
self.mjdRange = np.array([np.min(expInfo['MJD']),np.max(expInfo['MJD'])])
self.nExp = expInfo.size
if ccdOffsets is None and focalPlaneProjector is None:
raise ValueError("Must supply either ccdOffsets or focalPlaneProjector")
elif ccdOffsets is not None and focalPlaneProjector is not None:
raise ValueError("Must supply only one of ccdOffsets or focalPlaneProjector")
elif focalPlaneProjector is not None:
self.focalPlaneProjector = focalPlaneProjector
else:
# Use old ccd offsets, so create a translator
self.focalPlaneProjector = FocalPlaneProjectorFromOffsets(ccdOffsets)
# based on mjdRange, look at epochs; also sort.
# confirm that we cover all the exposures, and remove excess epochs
# are they sorted?
if (self.epochMJDs != np.sort(self.epochMJDs)).any():
raise ValueError("epochMJDs must be sorted in ascending order")
test=np.searchsorted(self.epochMJDs,self.mjdRange)
if test.min() == 0:
self.fgcmLog.warning("Exposure start MJD before epoch range. Adding additional epoch.")
self.epochMJDs = np.insert(self.epochMJDs, 0, self.mjdRange[0] - 1.0)
if self.epochNames is not None:
self.epochNames.insert(0, 'epoch-pre')
if test.max() == self.epochMJDs.size:
self.fgcmLog.warning("Exposure end MJD after epoch range. Adding additional epoch.")
self.epochMJDs = np.insert(self.epochMJDs, len(self.epochMJDs), self.mjdRange[1] + 1.0)
if self.epochNames is not None:
self.epochNames.insert(len(self.epochNames), 'epoch-post')
if self.epochNames is None:
self.epochNames = []
for i in range(self.epochMJDs.size):
self.epochNames.append('epoch%d' % (i))
# crop to valid range
self.epochMJDs = self.epochMJDs[test[0]-1:test[1]+1]
self.epochNames = self.epochNames[test[0]-1:test[1]+1]
# and look at washMJDs; also sort
st=np.argsort(self.washMJDs)
if (not np.array_equal(st,np.arange(self.washMJDs.size))):
raise ValueError("Input washMJDs must be in sort order.")
gd,=np.where((self.washMJDs > self.mjdRange[0]) &
(self.washMJDs < self.mjdRange[1]))
self.washMJDs = self.washMJDs[gd]
# and the coating MJDs
st = np.argsort(self.coatingMJDs)
if (not np.array_equal(st, np.arange(self.coatingMJDs.size))):
raise ValueError("Input coatingMJDs must be in sort order.")
gd, = np.where((self.coatingMJDs > self.mjdRange[0]) &
(self.coatingMJDs < self.mjdRange[1]))
self.coatingMJDs = self.coatingMJDs[gd]
# Deal with fit band, notfit band, required, and notrequired indices
bandFitFlag = np.zeros(len(self.bands), dtype=bool)
bandNotFitFlag = np.zeros_like(bandFitFlag)
bandRequiredFlag = np.zeros_like(bandFitFlag)
for i, band in enumerate(self.bands):
if band in self.fitBands:
bandFitFlag[i] = True
if band in self.requiredBands:
bandRequiredFlag[i] = True
if len(self.notFitBands) > 0:
if band in self.notFitBands:
bandNotFitFlag[i] = True
if band in self.fitBands and band in self.notFitBands:
raise ValueError("Cannot have the same band in fitBands and notFitBands")
self.bandFitIndex = np.where(bandFitFlag)[0]
self.bandNotFitIndex = np.where(bandNotFitFlag)[0]
self.bandRequiredIndex = np.where(bandRequiredFlag)[0]
self.bandNotRequiredIndex = np.where(~bandRequiredFlag)[0]
if np.array_equal(self.bandFitIndex, self.bandRequiredIndex):
self.allFitBandsAreRequired = True
else:
self.allFitBandsAreRequired = False
# and check the star color cuts and replace with indices...
# note that self.starColorCuts is a copy so that we don't overwrite.
for cCut in self.starColorCuts:
if (not isinstance(cCut[0],int)) :
if (cCut[0] not in self.bands):
raise ValueError("starColorCut band %s not in list of bands!" % (cCut[0]))
cCut[0] = list(self.bands).index(cCut[0])
if (not isinstance(cCut[1],int)) :
if (cCut[1] not in self.bands):
raise ValueError("starColorCut band %s not in list of bands!" % (cCut[1]))
cCut[1] = list(self.bands).index(cCut[1])
# Check for input aperture corrections.
if self.aperCorrFitNBins == 0 and np.any(self.aperCorrInputSlopes == self.illegalValue):
self.fgcmLog.warning("Aperture corrections will not be fit; strongly recommend setting aperCorrInputSlopeDict")
# Check the sed mapping dictionaries
# First, make sure every band is listed in the sedTermDict
for band in self.bands:
if band not in self.sedTermDict:
raise RuntimeError("Band %s not listed in sedTermDict." % (band))
# Second, make sure sedBoundaryTermDict is correct format
for boundaryTermName, boundaryTerm in self.sedBoundaryTermDict.items():
if 'primary' not in boundaryTerm or 'secondary' not in boundaryTerm:
raise RuntimeError("sedBoundaryTerm %s must have primary and secondary keys." % (boundaryTerm))
if boundaryTerm['primary'] not in self.bands:
raise RuntimeError("sedBoundaryTerm %s band %s not in list of bands." %
(boundaryTermName, boundaryTerm['primary']))
if boundaryTerm['secondary'] not in self.bands:
raise RuntimeError("sedBoundaryTerm %s band %s not in list of bands." %
(boundaryTermName, boundaryTerm['secondary']))
# Third, extract all the terms and bands from sedTermDict, make sure all
# are defined.
mapBands = []
mapTerms = []
for band in self.sedTermDict:
sedTerm = self.sedTermDict[band]
if 'extrapolated' not in sedTerm:
raise RuntimeError("sedTermDict %s must have 'extrapolated' key." % (band))
if 'constant' not in sedTerm:
raise RuntimeError("sedTermDict %s must have 'constant' key." % (band))
if 'primaryTerm' not in sedTerm:
raise RuntimeError("sedTermDict %s must have a primaryTerm." % (band))
if 'secondaryTerm' not in sedTerm:
raise RuntimeError("sedTermDict %s must have a secondaryTerm." % (band))
mapTerms.append(sedTerm['primaryTerm'])
if sedTerm['secondaryTerm'] is not None:
mapTerms.append(sedTerm['secondaryTerm'])
if sedTerm['extrapolated']:
if sedTerm['secondaryTerm'] is None:
raise RuntimeError("sedTermDict %s must have a secondaryTerm if extrapolated." % (band))
if 'primaryBand' not in sedTerm:
raise RuntimeError("sedTermDict %s must have a primaryBand if extrapolated." % (band))
if 'secondaryBand' not in sedTerm:
raise RuntimeError("sedTermDict %s must have a secondaryBand if extrapolated." % (band))
if 'tertiaryBand' not in sedTerm:
raise RuntimeError("sedTermDict %s must have a tertiaryBand if extrapolated." % (band))
mapBands.append(sedTerm['primaryBand'])
mapBands.append(sedTerm['secondaryBand'])
mapBands.append(sedTerm['tertiaryBand'])
for mapTerm in mapTerms:
if mapTerm not in self.sedBoundaryTermDict:
raise RuntimeError("Term %s is used in sedTermDict but not in sedBoundaryTermDict" % (mapTerm))
for mapBand in mapBands:
if mapBand not in self.bands:
raise RuntimeError("Band %s is used in sedTermDict but not in bands" % (mapBand))
# and AB zeropoint
self.hPlanck = 6.6
self.expPlanck = -27.0
self.zptABNoThroughput = (-48.6 - 2.5 * self.expPlanck +
2.5 * np.log10(self.mirrorArea) -
2.5 * np.log10(self.hPlanck * self.cameraGain))
self.fgcmLog.info("AB offset (w/o throughput) estimated as %.4f" % (self.zptABNoThroughput))
self.configDictSaved = configDict
## FIXME: add pmb scaling?
def updateCycleNumber(self, newCycleNumber):
"""
Update the cycle number for re-use of config.
Parameters
----------
newCycleNumber: `int`
"""
self.cycleNumber = newCycleNumber
self.outfileBaseWithCycle = '%s_cycle%02d' % (self.outfileBase, self.cycleNumber)
logFile = '%s/%s.log' % (self.outputPath, self.outfileBaseWithCycle)
if os.path.isfile(logFile) and not self.clobber:
raise RuntimeError("Found logFile %s, but clobber == False." % (logFile))
self.plotPath = None
if self.doPlots:
self.plotPath = '%s/%s_plots' % (self.outputPath,self.outfileBaseWithCycle)
if os.path.isdir(self.plotPath) and not self.clobber:
# check if directory is empty
if len(os.listdir(self.plotPath)) > 0:
raise RuntimeError("Found plots in %s, but clobber == False." % (self.plotPath))
if not self.externalLogger:
self.fgcmLog = FgcmLogger('%s/%s.log' % (self.outputPath,
self.outfileBaseWithCycle),
self.logLevel, printLogger=configDict['printOnly'])
if (self.plotPath is not None and not os.path.isdir(self.plotPath)):
try:
os.makedirs(self.plotPath)
except:
raise IOError("Could not create plot path: %s" % (self.plotPath))
@staticmethod
def _readConfigDict(configFile):
"""
Internal method to read a configuration dictionary from a yaml file.
"""
with open(configFile) as f:
configDict = yaml.load(f, Loader=yaml.SafeLoader)
print("Configuration read from %s" % (configFile))
return configDict
@classmethod
def configWithFits(cls, configDict, noOutput=False):
"""
Initialize FgcmConfig object and read in fits files.
parameters
----------
configDict: dict
Dictionary with config variables.
noOutput: bool, default=False
Do not create output directory.
"""
import fitsio
expInfo = fitsio.read(configDict['exposureFile'], ext=1)
try:
lutIndex = fitsio.read(configDict['lutFile'], ext='INDEX')
lutStd = fitsio.read(configDict['lutFile'], ext='STD')
except:
raise IOError("Could not read LUT info")
ccdOffsets = fitsio.read(configDict['ccdOffsetFile'], ext=1)
return cls(configDict, lutIndex, lutStd, expInfo, checkFiles=True, noOutput=noOutput, ccdOffsets=ccdOffsets)
def saveConfigForNextCycle(self,fileName,parFile,flagStarFile):
"""
Save a yaml configuration file for the next fit cycle (using fits files).
Parameters
----------
fileName: string
Config file filename
parFile: string
File with saved parameters from previous cycle
flagStarFile: string
File with flagged stars from previous cycle
"""
configDict = self.configDictSaved.copy()
# save the outputPath
configDict['outputPath'] = self.outputPath
# update the cycleNumber
configDict['cycleNumber'] = self.cycleNumber + 1
# default to NOT freeze atmosphere
configDict['freezeStdAtmosphere'] = False
# do we want to increase maxIter? Hmmm.
configDict['inParameterFile'] = parFile
configDict['inFlagStarFile'] = flagStarFile
# And update the photometric cuts...
# These need to be converted to lists of floats
for i, b in enumerate(self.bands):
configDict['expGrayPhotometricCutDict'][b] = float(self.expGrayPhotometricCutDict[b])
configDict['expGrayHighCutDict'][b] = float(self.expGrayHighCutDict[b])
with open(fileName,'w') as f:
yaml.dump(configDict, stream=f)
def _setVarsFromDict(self, d):
for key in d:
if key not in type(self).__dict__:
raise AttributeError("Unknown config variable: %s" % (key))
setattr(self, key, d[key])
def validate(self):
"""
"""
for var in type(self).__dict__:
try:
type(self).__dict__[var].validate(var)
except AttributeError:
pass
def _setDefaultLengths(self):
"""
"""
pass
def _convertDictToBandList(self, inputDict, dtype, default,
required=False, ndarray=False, dictName=''):
"""
Convert an input dict into a list or ndarray in band order.
Parameters
----------
inputDict : `dict`
Input dictionary
dtype : `type`
Type of array
default : value of dtype
Default value
ndarray : `bool`, optional
Return ndarray (True) or list (False)
required : `bool`, optional
All bands are required?
dictName: `str`, optional
Name of dict for error logging. Should be set if required is True.
Returns
-------
bandOrderedList : `ndarray` or `list`
"""
if ndarray:
retval = np.zeros(len(self.bands), dtype=dtype) + default
else:
retval = [default]*len(self.bands)
if required:
for band in self.bands:
if band not in inputDict:
raise RuntimeError("All bands must be listed in %s" % (dictName))
for i, band in enumerate(self.bands):
if band in inputDict:
retval[i] = inputDict[band]
return retval
| 46.063882 | 139 | 0.618759 |
a339e1e76e5e76805cb412ba27d8dde8548e8e54 | 1,474 | py | Python | spearmint/transformations/demos/bibeta/show_warp_bibeta.py | fernandezdaniel/Spearmint | 3c9e0a4be6108c3d652606bd957f0c9ae1bfaf84 | [
"RSA-MD"
] | 6 | 2021-06-29T11:26:49.000Z | 2022-01-20T18:12:47.000Z | spearmint/transformations/demos/bibeta/show_warp_bibeta.py | fernandezdaniel/Spearmint | 3c9e0a4be6108c3d652606bd957f0c9ae1bfaf84 | [
"RSA-MD"
] | null | null | null | spearmint/transformations/demos/bibeta/show_warp_bibeta.py | fernandezdaniel/Spearmint | 3c9e0a4be6108c3d652606bd957f0c9ae1bfaf84 | [
"RSA-MD"
] | 9 | 2018-06-28T13:06:35.000Z | 2021-06-20T18:21:58.000Z | #Bibeta in action.
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import beta
from scipy.stats import randint
def plot_1D_function(x, y, y_name='y'):
ax = plt.subplot(111)
ax.plot(x, y, label=y_name)
plt.legend(loc='best')
plt.title(y_name)
plt.show()
def fixed_objective_function(x, a=25):
target = np.sin(a*x)
t_max = np.max(target)
t_min = np.min(target)
return (target-t_min)/(t_max-t_min)
a_1 = np.linspace(0,10,100)
a_2 = np.linspace(0,10,100)
b_1 = np.linspace(0,10,100)
b_2 = np.linspace(0,10,100)
pi = np.linspace(0,1,10)
input_space = np.linspace(0,1,1000)
pi_rvs = randint.rvs(0,10)
a_1_rvs = randint.rvs(0,100)
a_2_rvs = randint.rvs(0,100)
b_1_rvs = randint.rvs(0,100)
b_2_rvs = randint.rvs(0,100)
a = a_1[a_1_rvs]
b = b_1[b_1_rvs]
a1 = a_1[a_1_rvs]
a2 = a_2[a_2_rvs]
b1 = b_1[b_1_rvs]
b2 = b_2[b_2_rvs]
p = pi[pi_rvs]
beta_cdf = beta.cdf(input_space,a,b)
bibeta_cdf = p*beta.cdf(input_space,a1,b1) + (1-p)*beta.cdf(input_space,a2,b2)
plot_1D_function(input_space, input_space, 'Input Space')
plot_1D_function(input_space, beta_cdf, 'Beta cdf')
plot_1D_function(input_space, bibeta_cdf, 'Bibeta cdf')
plot_1D_function(input_space, fixed_objective_function(input_space), 'Objective')
plot_1D_function(input_space, fixed_objective_function(beta_cdf), 'Warped Objective Beta')
plot_1D_function(input_space, fixed_objective_function(bibeta_cdf), 'Warped Objective Bibeta')
| 29.48 | 94 | 0.720488 |
a33b33e393caf1662689964e235489b7f1ad1bdc | 1,350 | py | Python | setup.py | agflood/scatteringmatrix | dc9a048e4b57475c67bbac7d3165cf29b5cc66e9 | [
"MIT"
] | 1 | 2021-04-18T16:09:17.000Z | 2021-04-18T16:09:17.000Z | setup.py | agflood/scatteringmatrix | dc9a048e4b57475c67bbac7d3165cf29b5cc66e9 | [
"MIT"
] | 6 | 2018-09-03T06:46:39.000Z | 2019-05-25T21:42:17.000Z | setup.py | agflood/scatteringmatrix | dc9a048e4b57475c67bbac7d3165cf29b5cc66e9 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from scatteringmatrix import __version__
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='scatteringmatrix',
version=__version__,
description='Optical scattering matrix library',
long_description=long_description,
long_description_content_type="text/markdown",
author='Andrew G. Flood',
author_email='andrew.flood@mail.utoronto.ca',
url='https://github.com/agflood/scatteringmatrix',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='optics scattering matrix photonics',
packages=find_packages(exclude=('tests', 'docs', 'sphinx')),
py_modules=["scatteringmatrix"],
install_requires=['numpy','scipy'],
python_requires='>=3',
zip_safe=False)
| 39.705882 | 66 | 0.60963 |