code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import logging
from dataclasses import asdict
import voluptuous as vol
from homeassistant import config_entries
from .const import CONF_URL, CONF_TOKEN, DOMAIN
_LOGGER = logging.getLogger(__name__)
class ZWaveMeConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""ZWaveMe integration config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Initialize flow."""
self.url = vol.UNDEFINED
self.token = vol.UNDEFINED
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
errors = {}
if self._async_current_entries():
return self.async_abort(reason="already_configured")
if user_input is not None:
if "host" in user_input:
self.url = user_input["host"]
else:
if "url" in user_input:
self.url = user_input["url"]
else:
user_input["url"] = self.url
self.token = user_input["token"]
if not user_input["url"].startswith("ws://") and not user_input[
"url"
].startswith("wss://"):
user_input["url"] = "ws://" + user_input["url"] + ":8083"
self.url = "ws://" + self.url + ":8083"
await self.async_set_unique_id(DOMAIN + self.url)
return self.async_create_entry(
title=self.url,
data=user_input,
description_placeholders={
"docs_url": "https://zwayhomeautomation.docs.apiary.io/"
},
)
if self.url != vol.UNDEFINED:
schema = vol.Schema(
{
vol.Required(CONF_TOKEN): str,
}
)
else:
schema = vol.Schema(
{
vol.Required(CONF_URL): str,
vol.Required(CONF_TOKEN): str,
}
)
return self.async_show_form(
step_id="user",
data_schema=schema,
description_placeholders={
"docs_url": "https://zwayhomeautomation.docs.apiary.io/"
},
errors=errors,
)
async def async_step_zeroconf(self, discovery_info):
"""Handle a discovered Z-Wave accessory.
This flow is triggered by the discovery component.
"""
if isinstance(discovery_info, dict):
return await self.async_step_user(discovery_info)
else:
return await self.async_step_user(asdict(discovery_info)) | homeassistant/components/zwave_me/config_flow.py |
import logging
from dataclasses import asdict
import voluptuous as vol
from homeassistant import config_entries
from .const import CONF_URL, CONF_TOKEN, DOMAIN
_LOGGER = logging.getLogger(__name__)
class ZWaveMeConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""ZWaveMe integration config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Initialize flow."""
self.url = vol.UNDEFINED
self.token = vol.UNDEFINED
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
errors = {}
if self._async_current_entries():
return self.async_abort(reason="already_configured")
if user_input is not None:
if "host" in user_input:
self.url = user_input["host"]
else:
if "url" in user_input:
self.url = user_input["url"]
else:
user_input["url"] = self.url
self.token = user_input["token"]
if not user_input["url"].startswith("ws://") and not user_input[
"url"
].startswith("wss://"):
user_input["url"] = "ws://" + user_input["url"] + ":8083"
self.url = "ws://" + self.url + ":8083"
await self.async_set_unique_id(DOMAIN + self.url)
return self.async_create_entry(
title=self.url,
data=user_input,
description_placeholders={
"docs_url": "https://zwayhomeautomation.docs.apiary.io/"
},
)
if self.url != vol.UNDEFINED:
schema = vol.Schema(
{
vol.Required(CONF_TOKEN): str,
}
)
else:
schema = vol.Schema(
{
vol.Required(CONF_URL): str,
vol.Required(CONF_TOKEN): str,
}
)
return self.async_show_form(
step_id="user",
data_schema=schema,
description_placeholders={
"docs_url": "https://zwayhomeautomation.docs.apiary.io/"
},
errors=errors,
)
async def async_step_zeroconf(self, discovery_info):
"""Handle a discovered Z-Wave accessory.
This flow is triggered by the discovery component.
"""
if isinstance(discovery_info, dict):
return await self.async_step_user(discovery_info)
else:
return await self.async_step_user(asdict(discovery_info)) | 0.690768 | 0.067701 |
import os
import html
import signal
from chwall.gui.shared import ChwallGui
from chwall.wallpaper import current_wallpaper_info
from chwall.utils import get_binary_path, reset_pending_list
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gdk, GdkPixbuf, GLib, Gtk # noqa: E402
import gettext # noqa: E402
# Uncomment the following line during development.
# Please, be cautious to NOT commit the following line uncommented.
# gettext.bindtextdomain("chwall", "./locale")
gettext.textdomain("chwall")
_ = gettext.gettext
class ChwallApp(ChwallGui):
def __init__(self):
super().__init__()
self.app = Gtk.Window(title="Chwall")
self.app.set_icon_name("chwall")
self.app.set_position(Gtk.WindowPosition.CENTER_ALWAYS)
self.app.set_resizable(False)
self.app.connect("destroy", self.kthxbye)
hb = Gtk.HeaderBar()
hb.set_show_close_button(True)
hb.props.title = "Chwall"
button = Gtk.ToggleButton()
button.set_image(Gtk.Image.new_from_icon_name(
"open-menu-symbolic", Gtk.IconSize.BUTTON))
button.set_tooltip_text(_("Preferences"))
button.connect("toggled", self.show_main_menu)
hb.pack_end(button)
self.app.set_titlebar(hb)
app_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.notif_reset = Gtk.InfoBar()
self.notif_reset.set_message_type(Gtk.MessageType.WARNING)
notif_box = self.notif_reset.get_content_area()
notif_box.add(
Gtk.Label(label=_("Wallpapers list may be built again. It "
"may take a long time if you have a lot "
"of sources enabled. Please be patient.")))
app_box.pack_start(self.notif_reset, False, False, 0)
self.wallpaper = Gtk.Image()
app_box.pack_start(self.wallpaper, True, True, 0)
control_box = Gtk.ActionBar()
button = Gtk.Button.new_from_icon_name(
"media-skip-backward-symbolic", Gtk.IconSize.LARGE_TOOLBAR)
button.set_tooltip_text(_("Previous wallpaper"))
button.connect("clicked", self.on_change_wallpaper, True)
control_box.pack_start(button)
self.daemon_play_pause_button = Gtk.Button.new()
self.decorate_play_pause_button(True)
self.daemon_play_pause_button.connect(
"clicked", self.on_play_pause_clicked)
control_box.pack_start(self.daemon_play_pause_button)
button = Gtk.Button.new_from_icon_name(
"media-skip-forward-symbolic", Gtk.IconSize.LARGE_TOOLBAR)
button.set_tooltip_text(_("Next wallpaper"))
button.connect("clicked", self.on_change_wallpaper)
control_box.pack_start(button)
button = Gtk.Separator()
control_box.pack_start(button)
button = Gtk.Button.new_from_icon_name(
"media-playback-stop-symbolic", Gtk.IconSize.LARGE_TOOLBAR)
button.set_tooltip_text(_("Stop daemon and erase pending list"))
button.connect("clicked", self.on_stop_clicked)
control_box.pack_start(button)
button = Gtk.Separator()
control_box.pack_start(button)
self.favorite_button = Gtk.Button.new_from_icon_name(
"bookmark-new", Gtk.IconSize.LARGE_TOOLBAR)
control_box.pack_start(self.favorite_button)
self.walldesc = Gtk.Label(
hexpand=True, halign=Gtk.Align.CENTER,
justify=Gtk.Justification.CENTER,
wrap=True, single_line_mode=True
)
self.walldesc.set_markup(
"<a href=\"https://git.umaneti.net/chwall/\">Chwall</a>"
)
control_box.set_center_widget(self.walldesc)
button = Gtk.Button.new_from_icon_name(
"edit-delete", Gtk.IconSize.LARGE_TOOLBAR)
button.set_tooltip_text(_("Blacklist"))
button.connect("clicked", self.on_blacklist_wallpaper)
control_box.pack_end(button)
app_box.pack_end(control_box, False, False, 0)
self.app.add(app_box)
self.app.show_all()
self.update_wall_box()
signal.signal(signal.SIGUSR1, self.update_wall_box)
def update_wall_box(self, _signo=None, _stack_frame=None):
self.notif_reset.set_revealed(False)
self.notif_reset.hide()
wallinfo = current_wallpaper_info()
if wallinfo["type"] is None:
self.walldesc.set_markup("<i>{}</i>".format(
_("Current wallpaper is not managed by Chwall")))
self.wallpaper.set_from_icon_name(
"preferences-desktop-wallpaper-symbolic", Gtk.IconSize.DIALOG)
self.favorite_button.set_sensitive(False)
self.favorite_button.set_tooltip_text(
_("Current wallpaper is not managed by Chwall"))
return
try:
if self.is_current_wall_favorite(wallinfo):
self.favorite_button.set_sensitive(False)
self.favorite_button.set_tooltip_text(_("Already a favorite"))
else:
self.favorite_button.set_sensitive(True)
self.favorite_button.set_tooltip_text(_("Save as favorite"))
self.favorite_button.connect(
"clicked", self.on_favorite_wallpaper)
except PermissionError:
self.favorite_button.set_sensitive(False)
self.favorite_button.set_tooltip_text(
_("Error accessing the favorites folder"))
label_str = "<a href=\"{link}\">{text}</a>".format(
link=html.escape(wallinfo["remote-uri"]),
text=wallinfo["description"].replace("&", "&"))
self.walldesc.set_markup(label_str)
self.walldesc.grab_focus()
# Show it now to reserve correct size
self.walldesc.show()
# Now we can use this width to display the wallpaper itself
size_data = self.app.get_preferred_size()
# Get `natural_size`
width = size_data[1].width
if width < 800:
width = 800
try:
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_scale(
wallinfo["local-picture-path"], width, 600, True)
self.wallpaper.set_from_pixbuf(pixbuf)
except gi.repository.GLib.Error:
self.wallpaper.set_from_icon_name(
"image-missing", Gtk.IconSize.DIALOG)
self.wallpaper.show()
self.app.resize(width, size_data[1].height)
def show_main_menu(self, widget):
if not widget.get_active():
return
menu = Gtk.Menu()
dinfo = self.daemon_info()
if dinfo["next-change"] != -1:
item = Gtk.MenuItem.new_with_label(
dinfo["next-change-label"])
item.set_sensitive(False)
menu.append(item)
item = Gtk.SeparatorMenuItem()
menu.append(item)
item = Gtk.MenuItem.new_with_label(
_("Display notification icon"))
if self.is_chwall_component_started("icon"):
item.set_sensitive(False)
else:
item.connect("activate", self.run_chwall_component, "icon")
menu.append(item)
item = Gtk.MenuItem.new_with_label(_("Preferences"))
item.connect("activate", self.show_preferences_dialog)
menu.append(item)
item = Gtk.MenuItem.new_with_label(_("About Chwall"))
item.connect("activate", self.show_about_dialog)
menu.append(item)
menu.show_all()
menu.connect("hide", lambda _w, b: b.set_active(False), widget)
menu.popup_at_widget(widget, Gdk.Gravity.SOUTH_WEST,
Gdk.Gravity.NORTH_WEST, None)
def decorate_play_pause_button(self, startup=False):
dinfo = self.daemon_info()
# At startup we need to draw the real state of the daemon, but later,
# this function is called *before* the state change, thus it must
# reflect the future state of the daemon
if startup:
current_state = dinfo["daemon-state"]
elif dinfo["daemon-state"] == "started":
current_state = "stopped"
else:
current_state = "started"
if current_state == "started":
self.daemon_play_pause_button.set_image(
Gtk.Image.new_from_icon_name("media-playback-pause-symbolic",
Gtk.IconSize.LARGE_TOOLBAR))
self.daemon_play_pause_button.set_tooltip_text(_("Stop daemon"))
else:
self.daemon_play_pause_button.set_image(
Gtk.Image.new_from_icon_name("media-playback-start-symbolic",
Gtk.IconSize.LARGE_TOOLBAR))
self.daemon_play_pause_button.set_tooltip_text(_("Start daemon"))
return current_state
def on_play_pause_clicked(self, widget):
# When called after a click, this method return the future state. Then
# we should actually kill the daemon if the *current_state* is
# *stopped*.
if self.decorate_play_pause_button() == "stopped":
self.stop_daemon()
return
# Else we should start the server
self.notif_reset.show()
self.notif_reset.set_revealed(True)
self.run_chwall_component(widget, "daemon")
def on_stop_clicked(self, widget):
self.stop_daemon()
reset_pending_list()
self.decorate_play_pause_button(True)
def _build_translations_for_desktop_file(localedir):
lng_attrs = {
"gname": [],
"comment": [],
"next_name": [],
"previous_name": [],
"blacklist_name": []
}
for lng in sorted(os.listdir(localedir)):
if lng in ["chwall.pot", "en"]:
continue
domain_file = os.path.join(localedir, lng, "LC_MESSAGES", "chwall.mo")
if not os.path.exists(domain_file):
continue
glng = gettext.translation(
"chwall", localedir=localedir,
languages=[lng])
glng.install()
_ = glng.gettext
lng_attrs["gname"].append(
"GenericName[{lang}]={key}".format(
lang=lng, key=_("Wallpaper Changer")))
lng_attrs["comment"].append(
"Comment[{lang}]={key}".format(
lang=lng,
key=_("Main window of the Chwall wallpaper changer")))
lng_attrs["next_name"].append(
"Name[{lang}]={key}".format(
lang=lng,
key=_("Next wallpaper")))
lng_attrs["previous_name"].append(
"Name[{lang}]={key}".format(
lang=lng,
key=_("Previous wallpaper")))
lng_attrs["blacklist_name"].append(
"Name[{lang}]={key}".format(
lang=lng,
key=_("Blacklist")))
return lng_attrs
def _build_action_block(name, lng_attrs):
label = name.capitalize()
block_cmd = get_binary_path("client", "xdg", name)
block = ["", "[Desktop Action {name}]".format(name=label),
"Exec={app_exec}".format(app_exec=block_cmd),
"Name={name} wallpaper".format(name=label)]
for line in lng_attrs[name + "_name"]:
block.append(line)
return block
def generate_desktop_file(localedir="./locale", out="chwall-app.desktop"):
lng_attrs = _build_translations_for_desktop_file(localedir)
df_content = ["[Desktop Entry]"]
df_content.append("Name=Chwall")
df_content.append("GenericName=Wallpaper Changer")
for line in lng_attrs["gname"]:
df_content.append(line)
df_content.append("Comment=Main window of the Chwall wallpaper changer")
for line in lng_attrs["comment"]:
df_content.append(line)
df_content = "\n".join(df_content)
df_content += """
Exec={app_exec}
Icon=chwall
Terminal=false
Type=Application
Categories=GTK;GNOME;Utility;
StartupNotify=false
Actions=Next;Previous;Blacklist;
""".format(app_exec=get_binary_path("app", "xdg"))
actions = _build_action_block("next", lng_attrs) \
+ _build_action_block("previous", lng_attrs) \
+ _build_action_block("blacklist", lng_attrs)
df_content += "\n".join(actions)
if out == "print":
print(df_content)
else:
with open(out, "w") as f:
f.write(df_content)
def start_app():
# Install signal handlers
GLib.unix_signal_add(GLib.PRIORITY_DEFAULT, signal.SIGTERM,
Gtk.main_quit, None)
GLib.unix_signal_add(GLib.PRIORITY_DEFAULT, signal.SIGINT,
Gtk.main_quit, None)
ChwallApp()
Gtk.main()
if __name__ == "__main__":
start_app() | chwall/gui/app.py |
import os
import html
import signal
from chwall.gui.shared import ChwallGui
from chwall.wallpaper import current_wallpaper_info
from chwall.utils import get_binary_path, reset_pending_list
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gdk, GdkPixbuf, GLib, Gtk # noqa: E402
import gettext # noqa: E402
# Uncomment the following line during development.
# Please, be cautious to NOT commit the following line uncommented.
# gettext.bindtextdomain("chwall", "./locale")
gettext.textdomain("chwall")
_ = gettext.gettext
class ChwallApp(ChwallGui):
def __init__(self):
super().__init__()
self.app = Gtk.Window(title="Chwall")
self.app.set_icon_name("chwall")
self.app.set_position(Gtk.WindowPosition.CENTER_ALWAYS)
self.app.set_resizable(False)
self.app.connect("destroy", self.kthxbye)
hb = Gtk.HeaderBar()
hb.set_show_close_button(True)
hb.props.title = "Chwall"
button = Gtk.ToggleButton()
button.set_image(Gtk.Image.new_from_icon_name(
"open-menu-symbolic", Gtk.IconSize.BUTTON))
button.set_tooltip_text(_("Preferences"))
button.connect("toggled", self.show_main_menu)
hb.pack_end(button)
self.app.set_titlebar(hb)
app_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.notif_reset = Gtk.InfoBar()
self.notif_reset.set_message_type(Gtk.MessageType.WARNING)
notif_box = self.notif_reset.get_content_area()
notif_box.add(
Gtk.Label(label=_("Wallpapers list may be built again. It "
"may take a long time if you have a lot "
"of sources enabled. Please be patient.")))
app_box.pack_start(self.notif_reset, False, False, 0)
self.wallpaper = Gtk.Image()
app_box.pack_start(self.wallpaper, True, True, 0)
control_box = Gtk.ActionBar()
button = Gtk.Button.new_from_icon_name(
"media-skip-backward-symbolic", Gtk.IconSize.LARGE_TOOLBAR)
button.set_tooltip_text(_("Previous wallpaper"))
button.connect("clicked", self.on_change_wallpaper, True)
control_box.pack_start(button)
self.daemon_play_pause_button = Gtk.Button.new()
self.decorate_play_pause_button(True)
self.daemon_play_pause_button.connect(
"clicked", self.on_play_pause_clicked)
control_box.pack_start(self.daemon_play_pause_button)
button = Gtk.Button.new_from_icon_name(
"media-skip-forward-symbolic", Gtk.IconSize.LARGE_TOOLBAR)
button.set_tooltip_text(_("Next wallpaper"))
button.connect("clicked", self.on_change_wallpaper)
control_box.pack_start(button)
button = Gtk.Separator()
control_box.pack_start(button)
button = Gtk.Button.new_from_icon_name(
"media-playback-stop-symbolic", Gtk.IconSize.LARGE_TOOLBAR)
button.set_tooltip_text(_("Stop daemon and erase pending list"))
button.connect("clicked", self.on_stop_clicked)
control_box.pack_start(button)
button = Gtk.Separator()
control_box.pack_start(button)
self.favorite_button = Gtk.Button.new_from_icon_name(
"bookmark-new", Gtk.IconSize.LARGE_TOOLBAR)
control_box.pack_start(self.favorite_button)
self.walldesc = Gtk.Label(
hexpand=True, halign=Gtk.Align.CENTER,
justify=Gtk.Justification.CENTER,
wrap=True, single_line_mode=True
)
self.walldesc.set_markup(
"<a href=\"https://git.umaneti.net/chwall/\">Chwall</a>"
)
control_box.set_center_widget(self.walldesc)
button = Gtk.Button.new_from_icon_name(
"edit-delete", Gtk.IconSize.LARGE_TOOLBAR)
button.set_tooltip_text(_("Blacklist"))
button.connect("clicked", self.on_blacklist_wallpaper)
control_box.pack_end(button)
app_box.pack_end(control_box, False, False, 0)
self.app.add(app_box)
self.app.show_all()
self.update_wall_box()
signal.signal(signal.SIGUSR1, self.update_wall_box)
def update_wall_box(self, _signo=None, _stack_frame=None):
self.notif_reset.set_revealed(False)
self.notif_reset.hide()
wallinfo = current_wallpaper_info()
if wallinfo["type"] is None:
self.walldesc.set_markup("<i>{}</i>".format(
_("Current wallpaper is not managed by Chwall")))
self.wallpaper.set_from_icon_name(
"preferences-desktop-wallpaper-symbolic", Gtk.IconSize.DIALOG)
self.favorite_button.set_sensitive(False)
self.favorite_button.set_tooltip_text(
_("Current wallpaper is not managed by Chwall"))
return
try:
if self.is_current_wall_favorite(wallinfo):
self.favorite_button.set_sensitive(False)
self.favorite_button.set_tooltip_text(_("Already a favorite"))
else:
self.favorite_button.set_sensitive(True)
self.favorite_button.set_tooltip_text(_("Save as favorite"))
self.favorite_button.connect(
"clicked", self.on_favorite_wallpaper)
except PermissionError:
self.favorite_button.set_sensitive(False)
self.favorite_button.set_tooltip_text(
_("Error accessing the favorites folder"))
label_str = "<a href=\"{link}\">{text}</a>".format(
link=html.escape(wallinfo["remote-uri"]),
text=wallinfo["description"].replace("&", "&"))
self.walldesc.set_markup(label_str)
self.walldesc.grab_focus()
# Show it now to reserve correct size
self.walldesc.show()
# Now we can use this width to display the wallpaper itself
size_data = self.app.get_preferred_size()
# Get `natural_size`
width = size_data[1].width
if width < 800:
width = 800
try:
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_scale(
wallinfo["local-picture-path"], width, 600, True)
self.wallpaper.set_from_pixbuf(pixbuf)
except gi.repository.GLib.Error:
self.wallpaper.set_from_icon_name(
"image-missing", Gtk.IconSize.DIALOG)
self.wallpaper.show()
self.app.resize(width, size_data[1].height)
def show_main_menu(self, widget):
if not widget.get_active():
return
menu = Gtk.Menu()
dinfo = self.daemon_info()
if dinfo["next-change"] != -1:
item = Gtk.MenuItem.new_with_label(
dinfo["next-change-label"])
item.set_sensitive(False)
menu.append(item)
item = Gtk.SeparatorMenuItem()
menu.append(item)
item = Gtk.MenuItem.new_with_label(
_("Display notification icon"))
if self.is_chwall_component_started("icon"):
item.set_sensitive(False)
else:
item.connect("activate", self.run_chwall_component, "icon")
menu.append(item)
item = Gtk.MenuItem.new_with_label(_("Preferences"))
item.connect("activate", self.show_preferences_dialog)
menu.append(item)
item = Gtk.MenuItem.new_with_label(_("About Chwall"))
item.connect("activate", self.show_about_dialog)
menu.append(item)
menu.show_all()
menu.connect("hide", lambda _w, b: b.set_active(False), widget)
menu.popup_at_widget(widget, Gdk.Gravity.SOUTH_WEST,
Gdk.Gravity.NORTH_WEST, None)
def decorate_play_pause_button(self, startup=False):
dinfo = self.daemon_info()
# At startup we need to draw the real state of the daemon, but later,
# this function is called *before* the state change, thus it must
# reflect the future state of the daemon
if startup:
current_state = dinfo["daemon-state"]
elif dinfo["daemon-state"] == "started":
current_state = "stopped"
else:
current_state = "started"
if current_state == "started":
self.daemon_play_pause_button.set_image(
Gtk.Image.new_from_icon_name("media-playback-pause-symbolic",
Gtk.IconSize.LARGE_TOOLBAR))
self.daemon_play_pause_button.set_tooltip_text(_("Stop daemon"))
else:
self.daemon_play_pause_button.set_image(
Gtk.Image.new_from_icon_name("media-playback-start-symbolic",
Gtk.IconSize.LARGE_TOOLBAR))
self.daemon_play_pause_button.set_tooltip_text(_("Start daemon"))
return current_state
def on_play_pause_clicked(self, widget):
# When called after a click, this method return the future state. Then
# we should actually kill the daemon if the *current_state* is
# *stopped*.
if self.decorate_play_pause_button() == "stopped":
self.stop_daemon()
return
# Else we should start the server
self.notif_reset.show()
self.notif_reset.set_revealed(True)
self.run_chwall_component(widget, "daemon")
def on_stop_clicked(self, widget):
self.stop_daemon()
reset_pending_list()
self.decorate_play_pause_button(True)
def _build_translations_for_desktop_file(localedir):
lng_attrs = {
"gname": [],
"comment": [],
"next_name": [],
"previous_name": [],
"blacklist_name": []
}
for lng in sorted(os.listdir(localedir)):
if lng in ["chwall.pot", "en"]:
continue
domain_file = os.path.join(localedir, lng, "LC_MESSAGES", "chwall.mo")
if not os.path.exists(domain_file):
continue
glng = gettext.translation(
"chwall", localedir=localedir,
languages=[lng])
glng.install()
_ = glng.gettext
lng_attrs["gname"].append(
"GenericName[{lang}]={key}".format(
lang=lng, key=_("Wallpaper Changer")))
lng_attrs["comment"].append(
"Comment[{lang}]={key}".format(
lang=lng,
key=_("Main window of the Chwall wallpaper changer")))
lng_attrs["next_name"].append(
"Name[{lang}]={key}".format(
lang=lng,
key=_("Next wallpaper")))
lng_attrs["previous_name"].append(
"Name[{lang}]={key}".format(
lang=lng,
key=_("Previous wallpaper")))
lng_attrs["blacklist_name"].append(
"Name[{lang}]={key}".format(
lang=lng,
key=_("Blacklist")))
return lng_attrs
def _build_action_block(name, lng_attrs):
label = name.capitalize()
block_cmd = get_binary_path("client", "xdg", name)
block = ["", "[Desktop Action {name}]".format(name=label),
"Exec={app_exec}".format(app_exec=block_cmd),
"Name={name} wallpaper".format(name=label)]
for line in lng_attrs[name + "_name"]:
block.append(line)
return block
def generate_desktop_file(localedir="./locale", out="chwall-app.desktop"):
lng_attrs = _build_translations_for_desktop_file(localedir)
df_content = ["[Desktop Entry]"]
df_content.append("Name=Chwall")
df_content.append("GenericName=Wallpaper Changer")
for line in lng_attrs["gname"]:
df_content.append(line)
df_content.append("Comment=Main window of the Chwall wallpaper changer")
for line in lng_attrs["comment"]:
df_content.append(line)
df_content = "\n".join(df_content)
df_content += """
Exec={app_exec}
Icon=chwall
Terminal=false
Type=Application
Categories=GTK;GNOME;Utility;
StartupNotify=false
Actions=Next;Previous;Blacklist;
""".format(app_exec=get_binary_path("app", "xdg"))
actions = _build_action_block("next", lng_attrs) \
+ _build_action_block("previous", lng_attrs) \
+ _build_action_block("blacklist", lng_attrs)
df_content += "\n".join(actions)
if out == "print":
print(df_content)
else:
with open(out, "w") as f:
f.write(df_content)
def start_app():
# Install signal handlers
GLib.unix_signal_add(GLib.PRIORITY_DEFAULT, signal.SIGTERM,
Gtk.main_quit, None)
GLib.unix_signal_add(GLib.PRIORITY_DEFAULT, signal.SIGINT,
Gtk.main_quit, None)
ChwallApp()
Gtk.main()
if __name__ == "__main__":
start_app() | 0.358578 | 0.064979 |
from behave import Given, When, Then
import time
@Given(u'the manager is on the login page')
def get_login_page(context):
context.driver.get("http://127.0.0.1:5500/home.html")
@When(u'the manager inputs their username into the username bar')
def enter_username(context):
context.home_page.select_username_input().send_keys('Master Jedi')
@When(u'the manager inputs their password into the password bar')
def enter_password(context):
context.home_page.select_password_input().send_keys('<PASSWORD>RealChosenOne')
@When(u'the manager selects the user role')
def select_role(context):
context.manager_page.select_manager_role_dropdown().click()
@When(u'the manager clicks the submit button')
def click_submit_button(context):
context.home_page.select_submit_button().click()
@Then(u"the manager should be sent to their webpage")
def check_manager_title(context):
time.sleep(2)
title = context.driver.title
assert title == "Manager"
@Given(u'I am on the manager page')
def get_manager_page(context):
context.driver.get('http://127.0.0.1:5500/manager.html')
time.sleep(3)
@When(u'I click the pending tab')
def click_pending_tab(context):
context.manager_page.select_pending_tab().click()
time.sleep(3)
@When(u'I click the pending button')
def click_pending_button(context):
context.manager_page.select_pending_plus_button().click()
time.sleep(3)
@When(u'I enter the date')
def click_date_button(context):
context.manager_page.select_date().send_keys('12222021')
@When(u'I select a status')
def select_status(context):
context.manager_page.select_status_from_dropdown().click()
time.sleep(1)
@When(u'I enter the reason')
def input_reason(context):
context.manager_page.select_reason_input().send_keys('Your reimbursement have been approved')
@When(u'I click the save changes button')
def click_save_changes_button(context):
context.manager_page.select_save_changes_button().click()
time.sleep(1)
@Then(u'I should see a success message')
def check_success_message(context):
assert context.manager_page.select_success_message() == "Reimbursement was updated successfully." | steps/manager_steps.py | from behave import Given, When, Then
import time
@Given(u'the manager is on the login page')
def get_login_page(context):
context.driver.get("http://127.0.0.1:5500/home.html")
@When(u'the manager inputs their username into the username bar')
def enter_username(context):
context.home_page.select_username_input().send_keys('Master Jedi')
@When(u'the manager inputs their password into the password bar')
def enter_password(context):
context.home_page.select_password_input().send_keys('<PASSWORD>RealChosenOne')
@When(u'the manager selects the user role')
def select_role(context):
context.manager_page.select_manager_role_dropdown().click()
@When(u'the manager clicks the submit button')
def click_submit_button(context):
context.home_page.select_submit_button().click()
@Then(u"the manager should be sent to their webpage")
def check_manager_title(context):
time.sleep(2)
title = context.driver.title
assert title == "Manager"
@Given(u'I am on the manager page')
def get_manager_page(context):
context.driver.get('http://127.0.0.1:5500/manager.html')
time.sleep(3)
@When(u'I click the pending tab')
def click_pending_tab(context):
context.manager_page.select_pending_tab().click()
time.sleep(3)
@When(u'I click the pending button')
def click_pending_button(context):
context.manager_page.select_pending_plus_button().click()
time.sleep(3)
@When(u'I enter the date')
def click_date_button(context):
context.manager_page.select_date().send_keys('12222021')
@When(u'I select a status')
def select_status(context):
context.manager_page.select_status_from_dropdown().click()
time.sleep(1)
@When(u'I enter the reason')
def input_reason(context):
context.manager_page.select_reason_input().send_keys('Your reimbursement have been approved')
@When(u'I click the save changes button')
def click_save_changes_button(context):
context.manager_page.select_save_changes_button().click()
time.sleep(1)
@Then(u'I should see a success message')
def check_success_message(context):
assert context.manager_page.select_success_message() == "Reimbursement was updated successfully." | 0.359027 | 0.133387 |
import math
class Shot():
def __init__(self, power, angle, my_tank, enemy_tank, map_height, map_width):
self.power = power/5
self.angle = angle
self.my_tank = my_tank
self.enemy_tank = enemy_tank
self.map_width = map_width
self.map_height = map_height
self.path = []
self._calculate_path()
def _calculate_path(self):
"""
Calculate the path your shot should follow.
"""
gravity = 0.3
x = self.my_tank.get_shot_start_position()[0]
y = self.my_tank.get_shot_start_position()[1]
#Append initial position of the shot
self.path.append((x,y))
#If the attacking tank is on the left side, the shot will go to the right
if self.my_tank.get_team() == 1:
#Calculate the initial horizontal (x) and vertical (y) velocity of the shot
xvel = math.cos(math.radians(abs(self.angle))) * self.power
yvel = math.sin(math.radians(abs(self.angle))) * -self.power
#While the shot is above the ground, and didnt reach the vertical limit of the screen
while x < self.map_width and y < self.map_height:
x += xvel
y += yvel
yvel += gravity
self.path.append((round(x),round(y)))
#If the attacking tank is on the right, we have a different logic in order to make
#the shot travel to the left
else:
#Calculate the initial horizontal (x) and vertical (y) velocity of the shot
xvel = math.cos(math.radians(abs(self.angle))) * -self.power
yvel = math.sin(math.radians(abs(self.angle))) * -self.power
#While the shot is above the ground, and didnt reach the vertical limit of the screen
while x > 0 and y < self.map_height:
x += xvel
y += yvel
yvel += gravity
self.path.append((round(x),round(y)))
def get_path(self):
"""
Returns the calculated path
"""
return self.path
def check_hit(self, tank=None):
"""
Checks if the current path hits the enemy tank.
If none defined, uses the enemy_tank as default
"""
if not tank:
tank_rect = self.enemy_tank.get_rect()
else:
tank_rect = tank.get_rect()
for point in self.path:
if tank_rect.collidepoint(point):
return True
return False | shot.py | import math
class Shot():
def __init__(self, power, angle, my_tank, enemy_tank, map_height, map_width):
self.power = power/5
self.angle = angle
self.my_tank = my_tank
self.enemy_tank = enemy_tank
self.map_width = map_width
self.map_height = map_height
self.path = []
self._calculate_path()
def _calculate_path(self):
"""
Calculate the path your shot should follow.
"""
gravity = 0.3
x = self.my_tank.get_shot_start_position()[0]
y = self.my_tank.get_shot_start_position()[1]
#Append initial position of the shot
self.path.append((x,y))
#If the attacking tank is on the left side, the shot will go to the right
if self.my_tank.get_team() == 1:
#Calculate the initial horizontal (x) and vertical (y) velocity of the shot
xvel = math.cos(math.radians(abs(self.angle))) * self.power
yvel = math.sin(math.radians(abs(self.angle))) * -self.power
#While the shot is above the ground, and didnt reach the vertical limit of the screen
while x < self.map_width and y < self.map_height:
x += xvel
y += yvel
yvel += gravity
self.path.append((round(x),round(y)))
#If the attacking tank is on the right, we have a different logic in order to make
#the shot travel to the left
else:
#Calculate the initial horizontal (x) and vertical (y) velocity of the shot
xvel = math.cos(math.radians(abs(self.angle))) * -self.power
yvel = math.sin(math.radians(abs(self.angle))) * -self.power
#While the shot is above the ground, and didnt reach the vertical limit of the screen
while x > 0 and y < self.map_height:
x += xvel
y += yvel
yvel += gravity
self.path.append((round(x),round(y)))
def get_path(self):
"""
Returns the calculated path
"""
return self.path
def check_hit(self, tank=None):
"""
Checks if the current path hits the enemy tank.
If none defined, uses the enemy_tank as default
"""
if not tank:
tank_rect = self.enemy_tank.get_rect()
else:
tank_rect = tank.get_rect()
for point in self.path:
if tank_rect.collidepoint(point):
return True
return False | 0.518059 | 0.535402 |
import urllib.parse
import requests_oauthlib as roauth
import pandas as pd
from tradeking import utils
BASE_URL = 'https://api.tradeking.com/v1'
_DATE_KEYS = ('date', 'datetime', 'divexdate', 'divpaydt', 'timestamp',
'pr_date', 'wk52hidate', 'wk52lodate', 'xdate')
_FLOAT_KEYS = ('ask', 'bid', 'chg', 'cl', 'div', 'dollar_value', 'eps',
'hi', 'iad', 'idelta', 'igamma', 'imp_volatility', 'irho',
'itheta', 'ivega', 'last', 'lo', 'opn', 'opt_val', 'pchg',
'pcls', 'pe', 'phi', 'plo', 'popn', 'pr_adp_100', 'pr_adp_200',
'pr_adp_50', 'prbook', 'prchg', 'strikeprice', 'volatility12',
'vwap', 'wk52hi', 'wk52lo', 'yield')
_INT_KEYS = ('asksz', 'basis', 'bidsz', 'bidtick', 'days_to_expiration',
'incr_vl', 'openinterest', 'pr_openinterest', 'prem_mult', 'pvol',
'sho', 'tr_num', 'vl', 'xday', 'xmonth', 'xyear')
def _quotes_to_df(quotes):
if not isinstance(quotes, list):
quotes = [quotes]
df = pd.DataFrame.from_records(quotes, index='symbol')
for col in df.keys().intersection(_DATE_KEYS):
kwargs = {}
if col == 'timestamp':
kwargs['unit'] = 's'
try:
df[col] = pd.to_datetime(df[col], **kwargs)
except ValueError:
pass
for col in df.keys().intersection(_INT_KEYS):
cleaned = df[col].str.replace(r'[$,%]', '')
df[col] = cleaned.astype('int', errors='ignore')
for col in df.keys().intersection(_FLOAT_KEYS):
cleaned = df[col].str.replace(r'[$,%]', '')
df[col] = cleaned.astype('float', errors='ignore')
return df
# TODO(jkoelker) Would be nice to do a proper DSL
class OptionQuery(object):
FIELDS = ('strikeprice', 'xdate', 'xmonth', 'xyear', 'put_call', 'unique')
OPS = {'<': 'lt', 'lt': 'lt',
'>': 'gt', 'gt': 'gt',
'>=': 'gte', 'gte': 'gte',
'<=': 'lte', 'lte': 'lte',
'=': 'eq', '==': 'eq', 'eq': 'eq'}
def __init__(self, query):
if isinstance(query, str):
query = [query]
self._query = []
for part in query:
field, op, value = part.split()
field = field.lower()
if field not in self.FIELDS or op not in self.OPS:
continue
if field == 'xdate':
value = pd.to_datetime(value).strftime('%Y%m%d')
self._query.append((field, self.OPS[op], value))
def __str__(self):
return ' AND '.join(['%s-%s:%s' % (field, op, value)
for field, op, value in self._query])
class API(object):
def __init__(self, consumer_key, consumer_secret,
oauth_token, oauth_secret):
self._api = roauth.OAuth1Session(client_key=consumer_key,
client_secret=consumer_secret,
resource_owner_key=oauth_token,
resource_owner_secret=oauth_secret)
def join(self, *paths, **kwargs):
if len(paths) == 1:
paths = paths[0]
if kwargs.get('clean', True):
paths = [p.rstrip('/') for p in paths]
return '/'.join(paths)
def request(self, method, url, format='json', decode=True, **kwargs):
if format:
url = '.'.join((url, format))
r = self._api.request(method, url, **kwargs)
if decode:
r = r.json()
return r
def get(self, url, format='json', decode=True, **kwargs):
return self.request('GET', url=url, format=format, decode=decode,
**kwargs)
def post(self, url, format='json', decode=True, **kwargs):
return self.request('POST', url=url, format=format, decode=decode,
**kwargs)
class Account(object):
def __init__(self, api, account_id):
self._api = api
self.account_id = account_id
def _get(self, what=None, **kwargs):
params = [BASE_URL, 'accounts', self.account_id]
if what is not None:
params.append(what)
path = self._api.join(params)
return self._api.get(path, **kwargs)
def _balances(self, **kwargs):
return self._get('balances', **kwargs)
def _history(self, date_range='all', transactions='all', **kwargs):
params = {'range': date_range, 'transactions': transactions}
return self._get('history', params=params, **kwargs)
def _holdings(self, **kwargs):
return self._get('holdings', **kwargs)
def _orders(self, **kwargs):
return self._get('orders', **kwargs)
@property
def balances(self):
r = self._balances()
return r['response']['accountbalance']
def history(self, date_range='all', transactions='all'):
r = self._history(date_range=date_range, transactions=transactions)
return r['response']['transactions']['transaction']
@property
def holdings(self):
r = self._holdings()
return r['response']['accountholdings']['holding']
# TODO(jkoelker)
def order(self, order, preview=True):
pass
@property
def orders(self):
r = self._orders()
return r['response']['orderstatus']
class News(object):
def __init__(self, api):
self._api = api
def _article(self, article_id, **kwargs):
path = self._api.join(BASE_URL, 'market', 'news', article_id)
return self._api.get(path, **kwargs)
def _search(self, keywords=None, symbols=None, maxhits=None,
startdate=None, enddate=None, **kwargs):
if not keywords and not symbols:
raise ValueError('Either keywords or symbols are required')
data = {}
if keywords:
if isinstance(keywords, str):
keywords = [keywords]
data['keywords'] = ','.join(keywords)
if symbols:
if isinstance(symbols, str):
symbols = [symbols]
data['symbols'] = ','.join(symbols)
if maxhits:
data['maxhits'] = maxhits
# TODO(jkoelker) calculate enddate to be now()
if (not startdate and enddate) or (not enddate and startdate):
raise ValueError('Both startdate and endate are required if one '
'is specified')
if startdate and enddate:
data['startdate'] = startdate
data['enddate'] = enddate
path = self._api.join(BASE_URL, 'market', 'news', 'search')
return self._api.post(path, data=data, **kwargs)
def article(self, article_id):
r = self._article(article_id=article_id)
return r['response']['article']
def search(self, keywords=None, symbols=None, maxhits=None, startdate=None,
enddate=None):
r = self._search(keywords=keywords, symbols=symbols, maxhits=maxhits,
startdate=startdate, enddate=enddate)
return r['response']['articles']['article']
class Options(object):
def __init__(self, api, market):
self._api = api
self._market = market
symbol = staticmethod(utils.option_symbol)
symbols = staticmethod(utils.option_symbols)
decode = staticmethod(utils.parse_option_symbol)
def _expirations(self, symbol, **kwargs):
params = {'symbol': symbol}
path = self._api.join(BASE_URL, 'market', 'options', 'expirations')
return self._api.get(path, params=params, **kwargs)
def _search(self, symbol, query, fields=None, query_is_prepared=False,
**kwargs):
if not isinstance(query, OptionQuery) and not query_is_prepared:
query = OptionQuery(query)
data = {'symbol': symbol, 'query': query}
if fields is not None:
data['fids'] = ','.join(fields)
path = self._api.join(BASE_URL, 'market', 'options', 'search')
return self._api.post(path, data=data, **kwargs)
def _strikes(self, symbol, **kwargs):
params = {'symbol': symbol}
path = self._api.join(BASE_URL, 'market', 'options', 'strikes')
return self._api.get(path, params=params, **kwargs)
def expirations(self, symbol):
r = self._expirations(symbol=symbol)
expirations = r['response']['expirationdates']['date']
return pd.to_datetime(pd.Series(expirations))
def search(self, symbol, query, fields=None):
r = self._search(symbol=symbol, query=query, fields=fields)
return _quotes_to_df(r['response']['quotes']['quote'])
def strikes(self, symbol):
r = self._strikes(symbol=symbol)
strikes = r['response']['prices']['price']
return pd.Series(strikes, dtype=float)
def quote(self, symbol, strikes=None, expirations=None, calls=True,
puts=True, fields=None):
if strikes is None:
strikes = self.strikes(symbol)
if expirations is None:
expirations = self.expirations(symbol)
symbols = utils.option_symbols(symbol, expirations, strikes, calls,
puts)
return self._market.quotes(symbols=symbols, fields=fields)
class Market(object):
def __init__(self, api):
self._api = api
self.news = News(self._api)
self.options = Options(self._api, self)
def _clock(self, **kwargs):
path = self._api.join(BASE_URL, 'market', 'clock')
return self._api.get(path, **kwargs)
def _quotes(self, symbols, fields=None, **kwargs):
if isinstance(symbols, (list, tuple)):
symbols = ','.join(symbols)
params = {'symbols': symbols}
if fields is not None:
params['fids'] = ','.join(fields)
path = self._api.join(BASE_URL, 'market', 'ext', 'quotes')
return self._api.post(path, data=params, **kwargs)
def _toplist(self, list_type='toppctgainers', **kwargs):
path = self._api.join(BASE_URL, 'market', 'toplists', list_type)
return self._api.get(path, **kwargs)
@property
def clock(self):
r = self._clock()
r = r['response']
del r['@id']
return r
def quotes(self, symbols, fields=None):
r = self._quotes(symbols=symbols, fields=fields)
return _quotes_to_df(r['response']['quotes']['quote'])
def toplist(self, list_type='toppctgainers'):
r = self._toplist(list_type=list_type)
return _quotes_to_df(r['response']['quotes']['quote'])
# TODO(jkoelker) market/timesales
# TODO(jkoelker) market/quotes (iterator)
class TradeKing(object):
def __init__(self, consumer_key, consumer_secret,
oauth_token, oauth_secret):
self._api = API(consumer_key=consumer_key,
consumer_secret=consumer_secret,
oauth_token=oauth_token,
oauth_secret=oauth_secret)
self.market = Market(self._api)
def _accounts(self, **kwargs):
path = urllib.parse.urljoin(BASE_URL, 'accounts')
return self._api.get(path, **kwargs)
def account(self, account_id):
return Account(self._api, account_id)
# TODO(jkoelker) member/profile
# TODO(jkoelker) utility/status
# TODO(jkoelker) utility/version
# TODO(jkoelker) utility/version
# TODO(jkoelker) watchlists | tradeking/api.py |
import urllib.parse
import requests_oauthlib as roauth
import pandas as pd
from tradeking import utils
BASE_URL = 'https://api.tradeking.com/v1'
_DATE_KEYS = ('date', 'datetime', 'divexdate', 'divpaydt', 'timestamp',
'pr_date', 'wk52hidate', 'wk52lodate', 'xdate')
_FLOAT_KEYS = ('ask', 'bid', 'chg', 'cl', 'div', 'dollar_value', 'eps',
'hi', 'iad', 'idelta', 'igamma', 'imp_volatility', 'irho',
'itheta', 'ivega', 'last', 'lo', 'opn', 'opt_val', 'pchg',
'pcls', 'pe', 'phi', 'plo', 'popn', 'pr_adp_100', 'pr_adp_200',
'pr_adp_50', 'prbook', 'prchg', 'strikeprice', 'volatility12',
'vwap', 'wk52hi', 'wk52lo', 'yield')
_INT_KEYS = ('asksz', 'basis', 'bidsz', 'bidtick', 'days_to_expiration',
'incr_vl', 'openinterest', 'pr_openinterest', 'prem_mult', 'pvol',
'sho', 'tr_num', 'vl', 'xday', 'xmonth', 'xyear')
def _quotes_to_df(quotes):
if not isinstance(quotes, list):
quotes = [quotes]
df = pd.DataFrame.from_records(quotes, index='symbol')
for col in df.keys().intersection(_DATE_KEYS):
kwargs = {}
if col == 'timestamp':
kwargs['unit'] = 's'
try:
df[col] = pd.to_datetime(df[col], **kwargs)
except ValueError:
pass
for col in df.keys().intersection(_INT_KEYS):
cleaned = df[col].str.replace(r'[$,%]', '')
df[col] = cleaned.astype('int', errors='ignore')
for col in df.keys().intersection(_FLOAT_KEYS):
cleaned = df[col].str.replace(r'[$,%]', '')
df[col] = cleaned.astype('float', errors='ignore')
return df
# TODO(jkoelker) Would be nice to do a proper DSL
class OptionQuery(object):
FIELDS = ('strikeprice', 'xdate', 'xmonth', 'xyear', 'put_call', 'unique')
OPS = {'<': 'lt', 'lt': 'lt',
'>': 'gt', 'gt': 'gt',
'>=': 'gte', 'gte': 'gte',
'<=': 'lte', 'lte': 'lte',
'=': 'eq', '==': 'eq', 'eq': 'eq'}
def __init__(self, query):
if isinstance(query, str):
query = [query]
self._query = []
for part in query:
field, op, value = part.split()
field = field.lower()
if field not in self.FIELDS or op not in self.OPS:
continue
if field == 'xdate':
value = pd.to_datetime(value).strftime('%Y%m%d')
self._query.append((field, self.OPS[op], value))
def __str__(self):
return ' AND '.join(['%s-%s:%s' % (field, op, value)
for field, op, value in self._query])
class API(object):
def __init__(self, consumer_key, consumer_secret,
oauth_token, oauth_secret):
self._api = roauth.OAuth1Session(client_key=consumer_key,
client_secret=consumer_secret,
resource_owner_key=oauth_token,
resource_owner_secret=oauth_secret)
def join(self, *paths, **kwargs):
if len(paths) == 1:
paths = paths[0]
if kwargs.get('clean', True):
paths = [p.rstrip('/') for p in paths]
return '/'.join(paths)
def request(self, method, url, format='json', decode=True, **kwargs):
if format:
url = '.'.join((url, format))
r = self._api.request(method, url, **kwargs)
if decode:
r = r.json()
return r
def get(self, url, format='json', decode=True, **kwargs):
return self.request('GET', url=url, format=format, decode=decode,
**kwargs)
def post(self, url, format='json', decode=True, **kwargs):
return self.request('POST', url=url, format=format, decode=decode,
**kwargs)
class Account(object):
def __init__(self, api, account_id):
self._api = api
self.account_id = account_id
def _get(self, what=None, **kwargs):
params = [BASE_URL, 'accounts', self.account_id]
if what is not None:
params.append(what)
path = self._api.join(params)
return self._api.get(path, **kwargs)
def _balances(self, **kwargs):
return self._get('balances', **kwargs)
def _history(self, date_range='all', transactions='all', **kwargs):
params = {'range': date_range, 'transactions': transactions}
return self._get('history', params=params, **kwargs)
def _holdings(self, **kwargs):
return self._get('holdings', **kwargs)
def _orders(self, **kwargs):
return self._get('orders', **kwargs)
@property
def balances(self):
r = self._balances()
return r['response']['accountbalance']
def history(self, date_range='all', transactions='all'):
r = self._history(date_range=date_range, transactions=transactions)
return r['response']['transactions']['transaction']
@property
def holdings(self):
r = self._holdings()
return r['response']['accountholdings']['holding']
# TODO(jkoelker)
def order(self, order, preview=True):
pass
@property
def orders(self):
r = self._orders()
return r['response']['orderstatus']
class News(object):
def __init__(self, api):
self._api = api
def _article(self, article_id, **kwargs):
path = self._api.join(BASE_URL, 'market', 'news', article_id)
return self._api.get(path, **kwargs)
def _search(self, keywords=None, symbols=None, maxhits=None,
startdate=None, enddate=None, **kwargs):
if not keywords and not symbols:
raise ValueError('Either keywords or symbols are required')
data = {}
if keywords:
if isinstance(keywords, str):
keywords = [keywords]
data['keywords'] = ','.join(keywords)
if symbols:
if isinstance(symbols, str):
symbols = [symbols]
data['symbols'] = ','.join(symbols)
if maxhits:
data['maxhits'] = maxhits
# TODO(jkoelker) calculate enddate to be now()
if (not startdate and enddate) or (not enddate and startdate):
raise ValueError('Both startdate and endate are required if one '
'is specified')
if startdate and enddate:
data['startdate'] = startdate
data['enddate'] = enddate
path = self._api.join(BASE_URL, 'market', 'news', 'search')
return self._api.post(path, data=data, **kwargs)
def article(self, article_id):
r = self._article(article_id=article_id)
return r['response']['article']
def search(self, keywords=None, symbols=None, maxhits=None, startdate=None,
enddate=None):
r = self._search(keywords=keywords, symbols=symbols, maxhits=maxhits,
startdate=startdate, enddate=enddate)
return r['response']['articles']['article']
class Options(object):
def __init__(self, api, market):
self._api = api
self._market = market
symbol = staticmethod(utils.option_symbol)
symbols = staticmethod(utils.option_symbols)
decode = staticmethod(utils.parse_option_symbol)
def _expirations(self, symbol, **kwargs):
params = {'symbol': symbol}
path = self._api.join(BASE_URL, 'market', 'options', 'expirations')
return self._api.get(path, params=params, **kwargs)
def _search(self, symbol, query, fields=None, query_is_prepared=False,
**kwargs):
if not isinstance(query, OptionQuery) and not query_is_prepared:
query = OptionQuery(query)
data = {'symbol': symbol, 'query': query}
if fields is not None:
data['fids'] = ','.join(fields)
path = self._api.join(BASE_URL, 'market', 'options', 'search')
return self._api.post(path, data=data, **kwargs)
def _strikes(self, symbol, **kwargs):
params = {'symbol': symbol}
path = self._api.join(BASE_URL, 'market', 'options', 'strikes')
return self._api.get(path, params=params, **kwargs)
def expirations(self, symbol):
r = self._expirations(symbol=symbol)
expirations = r['response']['expirationdates']['date']
return pd.to_datetime(pd.Series(expirations))
def search(self, symbol, query, fields=None):
r = self._search(symbol=symbol, query=query, fields=fields)
return _quotes_to_df(r['response']['quotes']['quote'])
def strikes(self, symbol):
r = self._strikes(symbol=symbol)
strikes = r['response']['prices']['price']
return pd.Series(strikes, dtype=float)
def quote(self, symbol, strikes=None, expirations=None, calls=True,
puts=True, fields=None):
if strikes is None:
strikes = self.strikes(symbol)
if expirations is None:
expirations = self.expirations(symbol)
symbols = utils.option_symbols(symbol, expirations, strikes, calls,
puts)
return self._market.quotes(symbols=symbols, fields=fields)
class Market(object):
def __init__(self, api):
self._api = api
self.news = News(self._api)
self.options = Options(self._api, self)
def _clock(self, **kwargs):
path = self._api.join(BASE_URL, 'market', 'clock')
return self._api.get(path, **kwargs)
def _quotes(self, symbols, fields=None, **kwargs):
if isinstance(symbols, (list, tuple)):
symbols = ','.join(symbols)
params = {'symbols': symbols}
if fields is not None:
params['fids'] = ','.join(fields)
path = self._api.join(BASE_URL, 'market', 'ext', 'quotes')
return self._api.post(path, data=params, **kwargs)
def _toplist(self, list_type='toppctgainers', **kwargs):
path = self._api.join(BASE_URL, 'market', 'toplists', list_type)
return self._api.get(path, **kwargs)
@property
def clock(self):
r = self._clock()
r = r['response']
del r['@id']
return r
def quotes(self, symbols, fields=None):
r = self._quotes(symbols=symbols, fields=fields)
return _quotes_to_df(r['response']['quotes']['quote'])
def toplist(self, list_type='toppctgainers'):
r = self._toplist(list_type=list_type)
return _quotes_to_df(r['response']['quotes']['quote'])
# TODO(jkoelker) market/timesales
# TODO(jkoelker) market/quotes (iterator)
class TradeKing(object):
def __init__(self, consumer_key, consumer_secret,
oauth_token, oauth_secret):
self._api = API(consumer_key=consumer_key,
consumer_secret=consumer_secret,
oauth_token=oauth_token,
oauth_secret=oauth_secret)
self.market = Market(self._api)
def _accounts(self, **kwargs):
path = urllib.parse.urljoin(BASE_URL, 'accounts')
return self._api.get(path, **kwargs)
def account(self, account_id):
return Account(self._api, account_id)
# TODO(jkoelker) member/profile
# TODO(jkoelker) utility/status
# TODO(jkoelker) utility/version
# TODO(jkoelker) utility/version
# TODO(jkoelker) watchlists | 0.416559 | 0.245582 |
"""Tests for meteofrance module. Helpers."""
from typing import List
import pytest
from meteofrance_api.helpers import get_phenomenon_name_from_indice
from meteofrance_api.helpers import get_warning_text_status_from_indice_color
from meteofrance_api.helpers import is_coastal_department
from meteofrance_api.helpers import is_valid_warning_department
from meteofrance_api.helpers import readeable_phenomenoms_dict
from meteofrance_api.helpers import sort_places_versus_distance_from_coordinates
from meteofrance_api.model import Place
from meteofrance_api.model.place import PlaceData
def test_text_helpers_fr() -> None:
"""Test helpers to have readable alert type and alert level in French."""
assert get_warning_text_status_from_indice_color(1) == "Vert"
assert get_phenomenon_name_from_indice(2) == "Pluie-inondation"
def test_get_warning_text_status_from_indice_color_en() -> None:
"""Test helpers to have readable alert type and alert level in English."""
assert get_warning_text_status_from_indice_color(4, "en") == "Red"
assert get_phenomenon_name_from_indice(4, "en") == "Flood"
@pytest.mark.parametrize("dep, res", [("03", False), ("06", True), ("2B", True)])
def test_is_coastal_department(dep: str, res: bool) -> None:
"""Test the helper checking if an additional coastal departement bulletin exist."""
assert is_coastal_department(dep) == res
@pytest.mark.parametrize(
"dep, res", [("03", True), ("98", False), ("2B", True), ("test", False)]
)
def test_is_valid_warning_department(dep: str, res: bool) -> None:
"""Test the helper checking if departent has a weather alert bulletin."""
assert is_valid_warning_department(dep) == res
def test_readeable_phenomenoms_dict() -> None:
"""Test the helper constructing a human readable dictionary for phenomenom."""
api_list = [
{"phenomenon_id": 4, "phenomenon_max_color_id": 1},
{"phenomenon_id": 5, "phenomenon_max_color_id": 1},
{"phenomenon_id": 3, "phenomenon_max_color_id": 2},
{"phenomenon_id": 2, "phenomenon_max_color_id": 1},
{"phenomenon_id": 1, "phenomenon_max_color_id": 3},
]
expected_dictionary = {
"Inondation": "Vert",
"Neige-verglas": "Vert",
"Pluie-inondation": "Vert",
"Orages": "Jaune",
"Vent violent": "Orange",
}
assert readeable_phenomenoms_dict(api_list) == expected_dictionary
def test_sort_places_versus_distance_from_coordinates() -> None:
"""Test the helper to order the Places list return by the search."""
json_places: List[PlaceData] = [
{
"insee": "11254",
"name": "Montréal",
"lat": 43.2,
"lon": 2.14083,
"country": "FR",
"admin": "Languedoc-Roussillon",
"admin2": "11",
"postCode": "11290",
},
{
"insee": "32290",
"name": "Montréal",
"lat": 43.95,
"lon": 0.20222,
"country": "FR",
"admin": "Midi-Pyrénées",
"admin2": "32",
"postCode": "32250",
},
{
"insee": "07162",
"name": "Montréal",
"lat": 44.5284,
"lon": 4.2938,
"country": "FR",
"admin": "Rhône-Alpes",
"admin2": "07",
"postCode": "07110",
},
{
"insee": "89267",
"name": "Montréal",
"lat": 47.54222,
"lon": 4.03611,
"country": "FR",
"admin": "Bourgogne",
"admin2": "89",
"postCode": "89420",
},
{
"insee": "null",
"name": "Montréal",
"lat": 45.50884,
"lon": -73.58781,
"country": "CA",
"admin": "Quebec",
"admin2": "06",
"postCode": "null",
},
{
"insee": "01265",
"name": "Montréal-la-Cluse",
"lat": 46.1871,
"lon": 5.5709,
"country": "FR",
"admin": "Rhône-Alpes",
"admin2": "01",
"postCode": "01460",
},
{
"insee": "26209",
"name": "Montréal-les-Sources",
"lat": 44.40139,
"lon": 5.3,
"country": "FR",
"admin": "Rhône-Alpes",
"admin2": "26",
"postCode": "26510",
},
{
"insee": "null",
"name": "Montréal-Ouest",
"lat": 45.45286,
"lon": -73.64918,
"country": "CA",
"admin": "Quebec",
"admin2": "null",
"postCode": "null",
},
{
"insee": "null",
"name": "Montréal-Est",
"lat": 45.63202,
"lon": -73.5075,
"country": "CA",
"admin": "Quebec",
"admin2": "null",
"postCode": "null",
},
{
"insee": "11432",
"name": "Villeneuve-lès-Montréal",
"lat": 43.18,
"lon": 2.11139,
"country": "FR",
"admin": "Languedoc-Roussillon",
"admin2": "11",
"postCode": "11290",
},
{
"insee": "null",
"name": "<NAME>",
"lat": 46.1511,
"lon": 12.64771,
"country": "IT",
"admin": "<NAME>",
"admin2": "PN",
"postCode": "null",
},
{
"insee": "null",
"name": "Mont-ral",
"lat": 41.28333,
"lon": 1.1,
"country": "ES",
"admin": "Catalonia",
"admin2": "T",
"postCode": "null",
},
]
list_places = [Place(place_data) for place_data in json_places]
# Sort Places by distance from Auch (32) coordinates.
list_places_ordered = sort_places_versus_distance_from_coordinates(
list_places, (43.64528, 0.58861)
)
# first one should be in Gers
assert list_places_ordered[0].admin2 == "32"
# second in Aude
assert list_places_ordered[1].admin2 == "11" | tests/test_helpers.py | """Tests for meteofrance module. Helpers."""
from typing import List
import pytest
from meteofrance_api.helpers import get_phenomenon_name_from_indice
from meteofrance_api.helpers import get_warning_text_status_from_indice_color
from meteofrance_api.helpers import is_coastal_department
from meteofrance_api.helpers import is_valid_warning_department
from meteofrance_api.helpers import readeable_phenomenoms_dict
from meteofrance_api.helpers import sort_places_versus_distance_from_coordinates
from meteofrance_api.model import Place
from meteofrance_api.model.place import PlaceData
def test_text_helpers_fr() -> None:
"""Test helpers to have readable alert type and alert level in French."""
assert get_warning_text_status_from_indice_color(1) == "Vert"
assert get_phenomenon_name_from_indice(2) == "Pluie-inondation"
def test_get_warning_text_status_from_indice_color_en() -> None:
"""Test helpers to have readable alert type and alert level in English."""
assert get_warning_text_status_from_indice_color(4, "en") == "Red"
assert get_phenomenon_name_from_indice(4, "en") == "Flood"
@pytest.mark.parametrize("dep, res", [("03", False), ("06", True), ("2B", True)])
def test_is_coastal_department(dep: str, res: bool) -> None:
"""Test the helper checking if an additional coastal departement bulletin exist."""
assert is_coastal_department(dep) == res
@pytest.mark.parametrize(
"dep, res", [("03", True), ("98", False), ("2B", True), ("test", False)]
)
def test_is_valid_warning_department(dep: str, res: bool) -> None:
"""Test the helper checking if departent has a weather alert bulletin."""
assert is_valid_warning_department(dep) == res
def test_readeable_phenomenoms_dict() -> None:
"""Test the helper constructing a human readable dictionary for phenomenom."""
api_list = [
{"phenomenon_id": 4, "phenomenon_max_color_id": 1},
{"phenomenon_id": 5, "phenomenon_max_color_id": 1},
{"phenomenon_id": 3, "phenomenon_max_color_id": 2},
{"phenomenon_id": 2, "phenomenon_max_color_id": 1},
{"phenomenon_id": 1, "phenomenon_max_color_id": 3},
]
expected_dictionary = {
"Inondation": "Vert",
"Neige-verglas": "Vert",
"Pluie-inondation": "Vert",
"Orages": "Jaune",
"Vent violent": "Orange",
}
assert readeable_phenomenoms_dict(api_list) == expected_dictionary
def test_sort_places_versus_distance_from_coordinates() -> None:
"""Test the helper to order the Places list return by the search."""
json_places: List[PlaceData] = [
{
"insee": "11254",
"name": "Montréal",
"lat": 43.2,
"lon": 2.14083,
"country": "FR",
"admin": "Languedoc-Roussillon",
"admin2": "11",
"postCode": "11290",
},
{
"insee": "32290",
"name": "Montréal",
"lat": 43.95,
"lon": 0.20222,
"country": "FR",
"admin": "Midi-Pyrénées",
"admin2": "32",
"postCode": "32250",
},
{
"insee": "07162",
"name": "Montréal",
"lat": 44.5284,
"lon": 4.2938,
"country": "FR",
"admin": "Rhône-Alpes",
"admin2": "07",
"postCode": "07110",
},
{
"insee": "89267",
"name": "Montréal",
"lat": 47.54222,
"lon": 4.03611,
"country": "FR",
"admin": "Bourgogne",
"admin2": "89",
"postCode": "89420",
},
{
"insee": "null",
"name": "Montréal",
"lat": 45.50884,
"lon": -73.58781,
"country": "CA",
"admin": "Quebec",
"admin2": "06",
"postCode": "null",
},
{
"insee": "01265",
"name": "Montréal-la-Cluse",
"lat": 46.1871,
"lon": 5.5709,
"country": "FR",
"admin": "Rhône-Alpes",
"admin2": "01",
"postCode": "01460",
},
{
"insee": "26209",
"name": "Montréal-les-Sources",
"lat": 44.40139,
"lon": 5.3,
"country": "FR",
"admin": "Rhône-Alpes",
"admin2": "26",
"postCode": "26510",
},
{
"insee": "null",
"name": "Montréal-Ouest",
"lat": 45.45286,
"lon": -73.64918,
"country": "CA",
"admin": "Quebec",
"admin2": "null",
"postCode": "null",
},
{
"insee": "null",
"name": "Montréal-Est",
"lat": 45.63202,
"lon": -73.5075,
"country": "CA",
"admin": "Quebec",
"admin2": "null",
"postCode": "null",
},
{
"insee": "11432",
"name": "Villeneuve-lès-Montréal",
"lat": 43.18,
"lon": 2.11139,
"country": "FR",
"admin": "Languedoc-Roussillon",
"admin2": "11",
"postCode": "11290",
},
{
"insee": "null",
"name": "<NAME>",
"lat": 46.1511,
"lon": 12.64771,
"country": "IT",
"admin": "<NAME>",
"admin2": "PN",
"postCode": "null",
},
{
"insee": "null",
"name": "Mont-ral",
"lat": 41.28333,
"lon": 1.1,
"country": "ES",
"admin": "Catalonia",
"admin2": "T",
"postCode": "null",
},
]
list_places = [Place(place_data) for place_data in json_places]
# Sort Places by distance from Auch (32) coordinates.
list_places_ordered = sort_places_versus_distance_from_coordinates(
list_places, (43.64528, 0.58861)
)
# first one should be in Gers
assert list_places_ordered[0].admin2 == "32"
# second in Aude
assert list_places_ordered[1].admin2 == "11" | 0.888976 | 0.427098 |
from blacksheep import Content, Request, Response
from blacksheep.client import ClientSession
from blacksheep.server import Application
from modules.rand import randimg
from modules import ip_todo,sql_todo,qq_todo,randimg_todo,yiyan_todo
from app import docs,service,router
from dataclass import sql,httpclient,config
import orjson
from blacksheep.plugins import json
from config import _ApiConfig
Config:_ApiConfig = service.build_provider().get(config).config
# 初始化
app = Application(router=router,services=service)
# redoc文档
docs.bind_app(app)
# CORS跨域问题
app.use_cors(
allow_methods="*",
allow_origins="*",
allow_headers="*",
max_age=2592000,
)
# 使用orjson
def serialize(value) -> str:
return orjson.dumps(value).decode("utf8")
# 使用orjson格式化输出
def pretty_json_dumps(obj):
return orjson.dumps(obj,option=orjson.OPT_INDENT_2).decode("utf8")
# 使Blacksheep绑定
json.use(
loads = orjson.loads,
dumps = serialize,
pretty_dumps=pretty_json_dumps
)
# 发生错误时返回
def handler_error(request: Request, exc: Exception) -> Response:
return Response(
status = 500,
content = Content(
b"application/json",
orjson.dumps({'status':500,'error':f'{exc}'})
)
)
app.handle_internal_server_error = handler_error
# 生命周期:启动前
@app.on_start
async def before_start(app: Application) -> None:
http_client = ClientSession(follow_redirects=False)
app.services.add_instance(http_client, declared_class=httpclient)
app.services.add_instance(sql_todo.sqlite(), declared_class=sql)
provider = app.services.build_provider()
app.services.add_instance(qq_todo._qq(http_client,provider.get(sql)))
app.services.add_instance(ip_todo._ip(http_client, Config.module['ip']['key'] ,provider.get(sql)))
app.services.add_instance(yiyan_todo._yiyan(http_client))
app.services.add_instance(randimg.Randimg())
# 生命周期:启动后
@app.after_start
async def after_start(app: Application) -> None:
provider = app.services.build_provider()
yiyan:yiyan_todo._yiyan = provider.get('_yiyan')
await yiyan.init()
# 生命周期:停止时
@app.on_stop
async def on_stop(app: Application) -> None:
await app.service_provider[sql].close()
await app.service_provider[httpclient].close()
if __name__ == '__main__':
import uvicorn
uvicorn.run(app=app, port = Config._global['port'] ,limit_concurrency=500) | main.py | from blacksheep import Content, Request, Response
from blacksheep.client import ClientSession
from blacksheep.server import Application
from modules.rand import randimg
from modules import ip_todo,sql_todo,qq_todo,randimg_todo,yiyan_todo
from app import docs,service,router
from dataclass import sql,httpclient,config
import orjson
from blacksheep.plugins import json
from config import _ApiConfig
Config:_ApiConfig = service.build_provider().get(config).config
# 初始化
app = Application(router=router,services=service)
# redoc文档
docs.bind_app(app)
# CORS跨域问题
app.use_cors(
allow_methods="*",
allow_origins="*",
allow_headers="*",
max_age=2592000,
)
# 使用orjson
def serialize(value) -> str:
return orjson.dumps(value).decode("utf8")
# 使用orjson格式化输出
def pretty_json_dumps(obj):
return orjson.dumps(obj,option=orjson.OPT_INDENT_2).decode("utf8")
# 使Blacksheep绑定
json.use(
loads = orjson.loads,
dumps = serialize,
pretty_dumps=pretty_json_dumps
)
# 发生错误时返回
def handler_error(request: Request, exc: Exception) -> Response:
return Response(
status = 500,
content = Content(
b"application/json",
orjson.dumps({'status':500,'error':f'{exc}'})
)
)
app.handle_internal_server_error = handler_error
# 生命周期:启动前
@app.on_start
async def before_start(app: Application) -> None:
http_client = ClientSession(follow_redirects=False)
app.services.add_instance(http_client, declared_class=httpclient)
app.services.add_instance(sql_todo.sqlite(), declared_class=sql)
provider = app.services.build_provider()
app.services.add_instance(qq_todo._qq(http_client,provider.get(sql)))
app.services.add_instance(ip_todo._ip(http_client, Config.module['ip']['key'] ,provider.get(sql)))
app.services.add_instance(yiyan_todo._yiyan(http_client))
app.services.add_instance(randimg.Randimg())
# 生命周期:启动后
@app.after_start
async def after_start(app: Application) -> None:
provider = app.services.build_provider()
yiyan:yiyan_todo._yiyan = provider.get('_yiyan')
await yiyan.init()
# 生命周期:停止时
@app.on_stop
async def on_stop(app: Application) -> None:
await app.service_provider[sql].close()
await app.service_provider[httpclient].close()
if __name__ == '__main__':
import uvicorn
uvicorn.run(app=app, port = Config._global['port'] ,limit_concurrency=500) | 0.221267 | 0.069258 |
from app.api.models.LXDModule import LXDModule
from app.lib.conf import MetaConf
from app.api.utils.firebaseAuthentication import firebaseLogin
from app import __metadata__ as meta
import logging
import requests
import subprocess
import shutil
import os
import yaml
import tarfile
logging = logging.getLogger(__name__)
class LXCImage(LXDModule):
def __init__(self, input):
self.data = {}
if not input.get('remoteHost'):
self.remoteHost = '127.0.0.1'
else:
self.remoteHost = input.get('remoteHost')
if not input.get('fingerprint'):
logging.error('Image fingerprint is required for any image operation')
raise ValueError('Missing image fingerprint.')
self.setFingerprint(input.get('fingerprint'))
if input.get('image'):
self.setImage(input.get('image'))
logging.info('Connecting to LXD')
super(LXCImage, self).__init__(remoteHost=self.remoteHost)
def setAlias(self, input):
logging.debug('Setting image alias to {}'.format(input))
self.data['alias'] = input
def setFingerprint(self, input):
logging.debug('Setting image fingerprint to {}'.format(input))
self.data['fingerprint'] = input
def setImage(self, input):
logging.debug('Setting image to {}'.format(input))
self.data['image'] = input
def getImage(self):
try:
logging.info('Reading image {} details'.format(self.data.get('fingerprint')))
return self.client.api.images[self.data.get('fingerprint')].get().json()['metadata']
except Exception as e:
logging.error('Failed to retrieve information for image {}'.format(self.data.get('fingerprint')))
logging.exception(e)
raise ValueError(e)
def deleteImage(self):
try:
logging.info('Deleting image {}'.format(self.data.get('fingerprint')))
image = self.client.images.get(self.data.get('fingerprint'))
image.delete()
except Exception as e:
logging.error('Failed to delete the image {}'.format(self.data.get('fingerprint')))
logging.exception(e)
raise ValueError(e)
#TODO Refactor this part
def exportImage(self, input, logo=None):
try:
#Check if image exists & Update the fingerprint with the full fingerprint
self.data['fingerprint'] = self.client.images.get(self.data.get('fingerprint')).fingerprint
logging.info('Exporting image {}'.format(self.data.get('fingerprint')))
p2 = subprocess.Popen(["lxc", "image", "export", self.data.get('fingerprint')], stdout=subprocess.PIPE)
output_rez = p2.stdout.read()
#Make dir for the export
shutil.rmtree('tmp/images/{}/'.format(self.data.get('fingerprint')), ignore_errors=True)
os.makedirs('tmp/images/{}'.format(self.data.get('fingerprint')), exist_ok=True)
#Move the export - Check for both extenstion .tar.gz & .tar.xz
if os.path.exists('{}.tar.gz'.format(self.data.get('fingerprint'))):
shutil.move('{}.tar.gz'.format(self.data.get('fingerprint')), 'tmp/images/{}/'.format(self.data.get('fingerprint')))
input['image'] = '{}.tar.gz'.format(self.data.get('fingerprint'))
if os.path.exists('{}.tar.xz'.format(self.data.get('fingerprint'))):
shutil.move('{}.tar.xz'.format(self.data.get('fingerprint')), 'tmp/images/{}/'.format(self.data.get('fingerprint')))
input['image'] = '{}.tar.xz'.format(self.data.get('fingerprint'))
if os.path.exists('{}.squashfs'.format(self.data.get('fingerprint'))):
shutil.move('{}.squashfs'.format(self.data.get('fingerprint')), 'tmp/images/{}/'.format(self.data.get('fingerprint')))
input['image'] = '{}.squashfs'.format(self.data.get('fingerprint'))
if os.path.exists('meta-{}.tar.gz'.format(self.data.get('fingerprint'))):
shutil.move('meta-{}.tar.gz'.format(self.data.get('fingerprint')), 'tmp/images/{}/'.format(self.data.get('fingerprint')))
input['metadata'] = 'meta-{}.tar.gz'.format(self.data.get('fingerprint'))
if os.path.exists('meta-{}.tar.xz'.format(self.data.get('fingerprint'))):
shutil.move('meta-{}.tar.xz'.format(self.data.get('fingerprint')), 'tmp/images/{}/'.format(self.data.get('fingerprint')))
input['metadata'] = 'meta-{}.tar.xz'.format(self.data.get('fingerprint'))
if os.path.exists('meta-{}.tar.xz'.format(self.data.get('fingerprint'))):
shutil.move('meta-{}.squashfs'.format(self.data.get('fingerprint')), 'tmp/images/{}/'.format(self.data.get('fingerprint')))
input['metadata'] = 'meta-{}.squashfs'.format(self.data.get('fingerprint'))
#Prepare & Move the yaml file
self.prepareImageYAML(input)
shutil.move('image.yaml', 'tmp/images/{}/'.format(self.data.get('fingerprint')))
#TODO Prepare README.md
file = open('tmp/images/{}/README.md'.format(self.data.get('fingerprint')), 'a')
file.write('#README\n')
file.write(input.get('documentation'))
file.close()
#TODO Prepare Logo
if logo:
logo.save('tmp/images/{}/{}'.format(self.data.get('fingerprint'), 'logo.png'))
return MetaConf().getConfRoot() + '/tmp/images/{}'.format(self.data.get('fingerprint'))
except Exception as e:
logging.error('Failed to export the image {}'.format(self.data.get('fingerprint')))
logging.exception(e)
raise ValueError(e)
def prepareImageYAML(self, input):
if input.get('metadata') == None: input['metadata'] = ''
data = {
'title': input.get('imageAlias', ''),
'description': input.get('imageDescription', ''),
'author': {
'name': input.get('authorName', ''),
'alias': '',
'email': input.get('authorEmail', '')
},
'license': input.get('license', ''),
'readme': 'README.md',
'tags': input.get('imageTags').split(','),
'logo': 'logo.png',
'image': input.get('image'),
'metadata': input.get('metadata'),
'fingerprint': self.data.get('fingerprint'),
'public': True
}
data.update(self.client.api.images[self.data.get('fingerprint')].get().json()['metadata'])
with open('image.yaml', 'w') as yamlFile:
yaml.dump(data, yamlFile, default_flow_style=False)
def pushImage(self, input):
try:
#Login
result = firebaseLogin(input.get('username'), input.get('password'))
if result.ok:
token = result.json()['idToken']
else:
raise ValueError('Login failed: {}'.format(result.json()['error']['message']))
self.data['fingerprint'] = self.client.images.get(self.data.get('fingerprint')).fingerprint
if os.path.exists('tmp/images/{}'.format(self.data.get('fingerprint'))):
logging.info('Image exists. Ready for push.')
print ("Image exists. Ready for push.")
#Prepare the files for upload.
with open('tmp/images/{}/image.yaml'.format(self.data.get('fingerprint'))) as stream:
yamlData = yaml.load(stream)
files = {
'yaml': open('tmp/images/{}/image.yaml'.format(self.data.get('fingerprint'), 'rb'))
}
headers = {'Authorization': token}
response = requests.post('{}/cliAddPackage'.format(meta.IMAGE_HUB), headers=headers, files=files, data={'id': self.data.get('fingerprint')})
if response.ok == False:
logging.error('Failed to push the image {}'.format(self.data.get('fingerprint')))
raise ValueError(
response.json()['message'])
return
print("yaml uploaded successfully.")
print("Uploading:")
for file in response.json()['filesRequired']:
for key in file:
files = {}
if file[key] != '':
if os.path.exists('tmp/images/{}/{}'.format(self.data.get('fingerprint'), file[key])):
files['file'] = open('tmp/images/{}/{}'.format(self.data.get('fingerprint'), file[key]), 'rb')
requests.post('{}/cliAddFile'.format(meta.IMAGE_HUB), headers=headers, files=files, data={'id': self.data.get('fingerprint')}).json()
print('File {} uploaded successfully'.format(file[key]))
else:
print('File {} does not exist'.format(file[key]))
else:
logging.error('Failed to push the image {}'.format(self.data.get('fingerprint')))
logging.exception('Image is not prepared. Please prepare the image using the command lxdui image prep <fingerprint>')
raise ValueError('Image is not prepared. Please prepare the image using the command: lxdui image prep <fingerprint>')
except Exception as e:
logging.error('Failed to push the image {}'.format(self.data.get('fingerprint')))
logging.exception(e)
raise ValueError(e)
def importImage(self, input):
logging.info('Importing image {}'.format(self.data.get('fingerprint')))
shutil.rmtree('tmp/downloaded/{}/'.format(self.data.get('fingerprint')), ignore_errors=True)
os.makedirs('tmp/downloaded/{}'.format(self.data.get('fingerprint')), exist_ok=True)
# Download and extract the file
r = requests.get('{}/cliDownloadRepo/{}'.format(meta.IMAGE_HUB, self.data.get('fingerprint')), stream=True)
with open('tmp/downloaded/{}/package.tar.gz'.format(self.data.get('fingerprint')), 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
tfile = tarfile.open('tmp/downloaded/{}/package.tar.gz'.format(self.data.get('fingerprint')), 'r:gz')
tfile.extractall('tmp/downloaded/{}/'.format(self.data.get('fingerprint')))
with open('tmp/downloaded/{}/image.yaml'.format(self.data.get('fingerprint'))) as stream:
yamlData = yaml.load(stream)
if os.path.exists("tmp/downloaded/{0}/meta-{0}.tar.xz".format(self.data.get('fingerprint'))) and os.path.exists("tmp/downloaded/{0}/{0}.tar.xz".format(self.data.get('fingerprint'))):
p2 = subprocess.Popen(["lxc", "image", "import",
"tmp/downloaded/{0}/meta-{0}.tar.xz".format(self.data.get('fingerprint')),
"tmp/downloaded/{0}/{0}.tar.xz".format(self.data.get('fingerprint'))], stdout=subprocess.PIPE)
output_rez = p2.stdout.read()
elif os.path.exists("tmp/downloaded/{0}/meta-{0}.tar.gz".format(self.data.get('fingerprint'))) and os.path.exists("tmp/downloaded/{0}/{0}.tar.gz".format(self.data.get('fingerprint'))):
p2 = subprocess.Popen(["lxc", "image", "import",
"tmp/downloaded/{0}/meta-{0}.tar.gz".format(self.data.get('fingerprint')),
"tmp/downloaded/{0}/{0}.tar.gz".format(self.data.get('fingerprint'))], stdout=subprocess.PIPE)
output_rez = p2.stdout.read()
elif os.path.exists("tmp/downloaded/{0}/meta-{0}.tar.gz".format(self.data.get('fingerprint'))) == False and os.path.exists("tmp/downloaded/{0}/{0}.tar.gz".format(self.data.get('fingerprint'))):
p2 = subprocess.Popen(["lxc", "image", "import",
"tmp/downloaded/{0}/{0}.tar.gz".format(self.data.get('fingerprint'))], stdout=subprocess.PIPE)
output_rez = p2.stdout.read()
elif os.path.exists("tmp/downloaded/{0}/meta-{0}.tar.xz".format(self.data.get('fingerprint'))) == False and os.path.exists("tmp/downloaded/{0}/{0}.tar.gz".format(self.data.get('fingerprint'))):
p2 = subprocess.Popen(["lxc", "image", "import",
"tmp/downloaded/{0}/{0}.tar.xz".format(self.data.get('fingerprint'))], stdout=subprocess.PIPE)
output_rez = p2.stdout.read()
elif os.path.exists("tmp/downloaded/{0}/meta-{0}.tar.xz".format(self.data.get('fingerprint'))) == False and os.path.exists("tmp/downloaded/{0}/{0}.squashfs".format(self.data.get('fingerprint'))):
p2 = subprocess.Popen(["lxc", "image", "import",
"tmp/downloaded/{0}/{0}.squashfs".format(self.data.get('fingerprint'))], stdout=subprocess.PIPE)
output_rez = p2.stdout.read()
elif os.path.exists("tmp/downloaded/{0}/meta-{0}.tar.xz".format(self.data.get('fingerprint'))) and os.path.exists("tmp/downloaded/{0}/{0}.squashfs".format(self.data.get('fingerprint'))):
p2 = subprocess.Popen(["lxc", "image", "import",
"tmp/downloaded/{0}/meta-{0}.tar.xz".format(self.data.get('fingerprint')),
"tmp/downloaded/{0}/{0}.squashfs".format(self.data.get('fingerprint'))], stdout=subprocess.PIPE)
output_rez = p2.stdout.read()
elif os.path.exists("tmp/downloaded/{0}/meta-{0}.squashfs".format(self.data.get('fingerprint'))) and os.path.exists("tmp/downloaded/{0}/{0}.squashfs".format(self.data.get('fingerprint'))):
p2 = subprocess.Popen(["lxc", "image", "import",
"tmp/downloaded/{0}/meta-{0}.squashfs".format(self.data.get('fingerprint')),
"tmp/downloaded/{0}/{0}.squashfs".format(self.data.get('fingerprint'))], stdout=subprocess.PIPE)
output_rez = p2.stdout.read()
shutil.rmtree('tmp/downloaded/{}/'.format(self.data.get('fingerprint')), ignore_errors=True)
image = self.client.images.get(self.data.get('fingerprint'))
image.add_alias(yamlData['title'], yamlData['title'])
# self.client.images.create(image_data='tmp/images/394986c986a778f64903fa043a3e280bda41e4793580b22c5d991ec948ced6dd/394986c986a778f64903fa043a3e280bda41e4793580b22c5d991ec948ced6dd.tar.xz',
# metadata='tmp/images/394986c986a778f64903fa043a3e280bda41e4793580b22c5d991ec948ced6dd/meta-394986c986a778f64903fa043a3e280bda41e4793580b22c5d991ec948ced6dd.tar.xz')
def listHub(self, input):
try:
logging.info('Listing images')
output = "# | Title | Fingerprint | OS | Author\n"
result = requests.get('{}/cliListRepos'.format(meta.IMAGE_HUB))
i = 1
for r in result.json():
output += '{} | {} | {} | {} | {}\n'.format(i, r['title'], r['fingerprint'], r['properties'].get('name'), r['author']['name'])
i+=1
return output
except Exception as e:
logging.error('Failed to list images from kuti.io')
logging.exception(e)
raise ValueError(e) | app/api/models/LXCImage.py | from app.api.models.LXDModule import LXDModule
from app.lib.conf import MetaConf
from app.api.utils.firebaseAuthentication import firebaseLogin
from app import __metadata__ as meta
import logging
import requests
import subprocess
import shutil
import os
import yaml
import tarfile
logging = logging.getLogger(__name__)
class LXCImage(LXDModule):
def __init__(self, input):
self.data = {}
if not input.get('remoteHost'):
self.remoteHost = '127.0.0.1'
else:
self.remoteHost = input.get('remoteHost')
if not input.get('fingerprint'):
logging.error('Image fingerprint is required for any image operation')
raise ValueError('Missing image fingerprint.')
self.setFingerprint(input.get('fingerprint'))
if input.get('image'):
self.setImage(input.get('image'))
logging.info('Connecting to LXD')
super(LXCImage, self).__init__(remoteHost=self.remoteHost)
def setAlias(self, input):
logging.debug('Setting image alias to {}'.format(input))
self.data['alias'] = input
def setFingerprint(self, input):
logging.debug('Setting image fingerprint to {}'.format(input))
self.data['fingerprint'] = input
def setImage(self, input):
logging.debug('Setting image to {}'.format(input))
self.data['image'] = input
def getImage(self):
try:
logging.info('Reading image {} details'.format(self.data.get('fingerprint')))
return self.client.api.images[self.data.get('fingerprint')].get().json()['metadata']
except Exception as e:
logging.error('Failed to retrieve information for image {}'.format(self.data.get('fingerprint')))
logging.exception(e)
raise ValueError(e)
def deleteImage(self):
try:
logging.info('Deleting image {}'.format(self.data.get('fingerprint')))
image = self.client.images.get(self.data.get('fingerprint'))
image.delete()
except Exception as e:
logging.error('Failed to delete the image {}'.format(self.data.get('fingerprint')))
logging.exception(e)
raise ValueError(e)
#TODO Refactor this part
def exportImage(self, input, logo=None):
try:
#Check if image exists & Update the fingerprint with the full fingerprint
self.data['fingerprint'] = self.client.images.get(self.data.get('fingerprint')).fingerprint
logging.info('Exporting image {}'.format(self.data.get('fingerprint')))
p2 = subprocess.Popen(["lxc", "image", "export", self.data.get('fingerprint')], stdout=subprocess.PIPE)
output_rez = p2.stdout.read()
#Make dir for the export
shutil.rmtree('tmp/images/{}/'.format(self.data.get('fingerprint')), ignore_errors=True)
os.makedirs('tmp/images/{}'.format(self.data.get('fingerprint')), exist_ok=True)
#Move the export - Check for both extenstion .tar.gz & .tar.xz
if os.path.exists('{}.tar.gz'.format(self.data.get('fingerprint'))):
shutil.move('{}.tar.gz'.format(self.data.get('fingerprint')), 'tmp/images/{}/'.format(self.data.get('fingerprint')))
input['image'] = '{}.tar.gz'.format(self.data.get('fingerprint'))
if os.path.exists('{}.tar.xz'.format(self.data.get('fingerprint'))):
shutil.move('{}.tar.xz'.format(self.data.get('fingerprint')), 'tmp/images/{}/'.format(self.data.get('fingerprint')))
input['image'] = '{}.tar.xz'.format(self.data.get('fingerprint'))
if os.path.exists('{}.squashfs'.format(self.data.get('fingerprint'))):
shutil.move('{}.squashfs'.format(self.data.get('fingerprint')), 'tmp/images/{}/'.format(self.data.get('fingerprint')))
input['image'] = '{}.squashfs'.format(self.data.get('fingerprint'))
if os.path.exists('meta-{}.tar.gz'.format(self.data.get('fingerprint'))):
shutil.move('meta-{}.tar.gz'.format(self.data.get('fingerprint')), 'tmp/images/{}/'.format(self.data.get('fingerprint')))
input['metadata'] = 'meta-{}.tar.gz'.format(self.data.get('fingerprint'))
if os.path.exists('meta-{}.tar.xz'.format(self.data.get('fingerprint'))):
shutil.move('meta-{}.tar.xz'.format(self.data.get('fingerprint')), 'tmp/images/{}/'.format(self.data.get('fingerprint')))
input['metadata'] = 'meta-{}.tar.xz'.format(self.data.get('fingerprint'))
if os.path.exists('meta-{}.tar.xz'.format(self.data.get('fingerprint'))):
shutil.move('meta-{}.squashfs'.format(self.data.get('fingerprint')), 'tmp/images/{}/'.format(self.data.get('fingerprint')))
input['metadata'] = 'meta-{}.squashfs'.format(self.data.get('fingerprint'))
#Prepare & Move the yaml file
self.prepareImageYAML(input)
shutil.move('image.yaml', 'tmp/images/{}/'.format(self.data.get('fingerprint')))
#TODO Prepare README.md
file = open('tmp/images/{}/README.md'.format(self.data.get('fingerprint')), 'a')
file.write('#README\n')
file.write(input.get('documentation'))
file.close()
#TODO Prepare Logo
if logo:
logo.save('tmp/images/{}/{}'.format(self.data.get('fingerprint'), 'logo.png'))
return MetaConf().getConfRoot() + '/tmp/images/{}'.format(self.data.get('fingerprint'))
except Exception as e:
logging.error('Failed to export the image {}'.format(self.data.get('fingerprint')))
logging.exception(e)
raise ValueError(e)
def prepareImageYAML(self, input):
if input.get('metadata') == None: input['metadata'] = ''
data = {
'title': input.get('imageAlias', ''),
'description': input.get('imageDescription', ''),
'author': {
'name': input.get('authorName', ''),
'alias': '',
'email': input.get('authorEmail', '')
},
'license': input.get('license', ''),
'readme': 'README.md',
'tags': input.get('imageTags').split(','),
'logo': 'logo.png',
'image': input.get('image'),
'metadata': input.get('metadata'),
'fingerprint': self.data.get('fingerprint'),
'public': True
}
data.update(self.client.api.images[self.data.get('fingerprint')].get().json()['metadata'])
with open('image.yaml', 'w') as yamlFile:
yaml.dump(data, yamlFile, default_flow_style=False)
def pushImage(self, input):
try:
#Login
result = firebaseLogin(input.get('username'), input.get('password'))
if result.ok:
token = result.json()['idToken']
else:
raise ValueError('Login failed: {}'.format(result.json()['error']['message']))
self.data['fingerprint'] = self.client.images.get(self.data.get('fingerprint')).fingerprint
if os.path.exists('tmp/images/{}'.format(self.data.get('fingerprint'))):
logging.info('Image exists. Ready for push.')
print ("Image exists. Ready for push.")
#Prepare the files for upload.
with open('tmp/images/{}/image.yaml'.format(self.data.get('fingerprint'))) as stream:
yamlData = yaml.load(stream)
files = {
'yaml': open('tmp/images/{}/image.yaml'.format(self.data.get('fingerprint'), 'rb'))
}
headers = {'Authorization': token}
response = requests.post('{}/cliAddPackage'.format(meta.IMAGE_HUB), headers=headers, files=files, data={'id': self.data.get('fingerprint')})
if response.ok == False:
logging.error('Failed to push the image {}'.format(self.data.get('fingerprint')))
raise ValueError(
response.json()['message'])
return
print("yaml uploaded successfully.")
print("Uploading:")
for file in response.json()['filesRequired']:
for key in file:
files = {}
if file[key] != '':
if os.path.exists('tmp/images/{}/{}'.format(self.data.get('fingerprint'), file[key])):
files['file'] = open('tmp/images/{}/{}'.format(self.data.get('fingerprint'), file[key]), 'rb')
requests.post('{}/cliAddFile'.format(meta.IMAGE_HUB), headers=headers, files=files, data={'id': self.data.get('fingerprint')}).json()
print('File {} uploaded successfully'.format(file[key]))
else:
print('File {} does not exist'.format(file[key]))
else:
logging.error('Failed to push the image {}'.format(self.data.get('fingerprint')))
logging.exception('Image is not prepared. Please prepare the image using the command lxdui image prep <fingerprint>')
raise ValueError('Image is not prepared. Please prepare the image using the command: lxdui image prep <fingerprint>')
except Exception as e:
logging.error('Failed to push the image {}'.format(self.data.get('fingerprint')))
logging.exception(e)
raise ValueError(e)
def importImage(self, input):
logging.info('Importing image {}'.format(self.data.get('fingerprint')))
shutil.rmtree('tmp/downloaded/{}/'.format(self.data.get('fingerprint')), ignore_errors=True)
os.makedirs('tmp/downloaded/{}'.format(self.data.get('fingerprint')), exist_ok=True)
# Download and extract the file
r = requests.get('{}/cliDownloadRepo/{}'.format(meta.IMAGE_HUB, self.data.get('fingerprint')), stream=True)
with open('tmp/downloaded/{}/package.tar.gz'.format(self.data.get('fingerprint')), 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
tfile = tarfile.open('tmp/downloaded/{}/package.tar.gz'.format(self.data.get('fingerprint')), 'r:gz')
tfile.extractall('tmp/downloaded/{}/'.format(self.data.get('fingerprint')))
with open('tmp/downloaded/{}/image.yaml'.format(self.data.get('fingerprint'))) as stream:
yamlData = yaml.load(stream)
if os.path.exists("tmp/downloaded/{0}/meta-{0}.tar.xz".format(self.data.get('fingerprint'))) and os.path.exists("tmp/downloaded/{0}/{0}.tar.xz".format(self.data.get('fingerprint'))):
p2 = subprocess.Popen(["lxc", "image", "import",
"tmp/downloaded/{0}/meta-{0}.tar.xz".format(self.data.get('fingerprint')),
"tmp/downloaded/{0}/{0}.tar.xz".format(self.data.get('fingerprint'))], stdout=subprocess.PIPE)
output_rez = p2.stdout.read()
elif os.path.exists("tmp/downloaded/{0}/meta-{0}.tar.gz".format(self.data.get('fingerprint'))) and os.path.exists("tmp/downloaded/{0}/{0}.tar.gz".format(self.data.get('fingerprint'))):
p2 = subprocess.Popen(["lxc", "image", "import",
"tmp/downloaded/{0}/meta-{0}.tar.gz".format(self.data.get('fingerprint')),
"tmp/downloaded/{0}/{0}.tar.gz".format(self.data.get('fingerprint'))], stdout=subprocess.PIPE)
output_rez = p2.stdout.read()
elif os.path.exists("tmp/downloaded/{0}/meta-{0}.tar.gz".format(self.data.get('fingerprint'))) == False and os.path.exists("tmp/downloaded/{0}/{0}.tar.gz".format(self.data.get('fingerprint'))):
p2 = subprocess.Popen(["lxc", "image", "import",
"tmp/downloaded/{0}/{0}.tar.gz".format(self.data.get('fingerprint'))], stdout=subprocess.PIPE)
output_rez = p2.stdout.read()
elif os.path.exists("tmp/downloaded/{0}/meta-{0}.tar.xz".format(self.data.get('fingerprint'))) == False and os.path.exists("tmp/downloaded/{0}/{0}.tar.gz".format(self.data.get('fingerprint'))):
p2 = subprocess.Popen(["lxc", "image", "import",
"tmp/downloaded/{0}/{0}.tar.xz".format(self.data.get('fingerprint'))], stdout=subprocess.PIPE)
output_rez = p2.stdout.read()
elif os.path.exists("tmp/downloaded/{0}/meta-{0}.tar.xz".format(self.data.get('fingerprint'))) == False and os.path.exists("tmp/downloaded/{0}/{0}.squashfs".format(self.data.get('fingerprint'))):
p2 = subprocess.Popen(["lxc", "image", "import",
"tmp/downloaded/{0}/{0}.squashfs".format(self.data.get('fingerprint'))], stdout=subprocess.PIPE)
output_rez = p2.stdout.read()
elif os.path.exists("tmp/downloaded/{0}/meta-{0}.tar.xz".format(self.data.get('fingerprint'))) and os.path.exists("tmp/downloaded/{0}/{0}.squashfs".format(self.data.get('fingerprint'))):
p2 = subprocess.Popen(["lxc", "image", "import",
"tmp/downloaded/{0}/meta-{0}.tar.xz".format(self.data.get('fingerprint')),
"tmp/downloaded/{0}/{0}.squashfs".format(self.data.get('fingerprint'))], stdout=subprocess.PIPE)
output_rez = p2.stdout.read()
elif os.path.exists("tmp/downloaded/{0}/meta-{0}.squashfs".format(self.data.get('fingerprint'))) and os.path.exists("tmp/downloaded/{0}/{0}.squashfs".format(self.data.get('fingerprint'))):
p2 = subprocess.Popen(["lxc", "image", "import",
"tmp/downloaded/{0}/meta-{0}.squashfs".format(self.data.get('fingerprint')),
"tmp/downloaded/{0}/{0}.squashfs".format(self.data.get('fingerprint'))], stdout=subprocess.PIPE)
output_rez = p2.stdout.read()
shutil.rmtree('tmp/downloaded/{}/'.format(self.data.get('fingerprint')), ignore_errors=True)
image = self.client.images.get(self.data.get('fingerprint'))
image.add_alias(yamlData['title'], yamlData['title'])
# self.client.images.create(image_data='tmp/images/394986c986a778f64903fa043a3e280bda41e4793580b22c5d991ec948ced6dd/394986c986a778f64903fa043a3e280bda41e4793580b22c5d991ec948ced6dd.tar.xz',
# metadata='tmp/images/394986c986a778f64903fa043a3e280bda41e4793580b22c5d991ec948ced6dd/meta-394986c986a778f64903fa043a3e280bda41e4793580b22c5d991ec948ced6dd.tar.xz')
def listHub(self, input):
try:
logging.info('Listing images')
output = "# | Title | Fingerprint | OS | Author\n"
result = requests.get('{}/cliListRepos'.format(meta.IMAGE_HUB))
i = 1
for r in result.json():
output += '{} | {} | {} | {} | {}\n'.format(i, r['title'], r['fingerprint'], r['properties'].get('name'), r['author']['name'])
i+=1
return output
except Exception as e:
logging.error('Failed to list images from kuti.io')
logging.exception(e)
raise ValueError(e) | 0.193452 | 0.05151 |
import csv
import random
import numpy as np
import matplotlib.pyplot as plt
import scipy.spatial
def loadCsv(filename):
'''
load data.
https://stackoverflow.com/questions/4315506/load-csv-into-2d-matrix-with-numpy-for-plotting
https://machinelearningmastery.com/index-slice-reshape-numpy-arrays-machine-learning-python/
'''
lines = csv.reader(open(filename, 'rt', encoding = 'utf-8'))
next(lines, None) # skip the headers
dataset = list(lines)
result = np.array(dataset).astype("float")
np.random.shuffle(result) # randomly re-arrange the rows of the data samples
X = result[:, 0:12]
mean_X = np.mean(X, axis = 0) # normalize the features
X -= mean_X
y = result[:, -1]
y[y>0] = 1
return [X, y]
trainfile = 'train.csv'
[X_train, y_train]= loadCsv(trainfile)
testfile = 'test.csv'
[X_test, y_test]= loadCsv(testfile)
def train(X, y):
X_train = X
y_train = y
def compute_distances(X_test): # l2 norm, eucleadian distance
num_test = X_test.shape[0]
num_train = X_train.shape[0]
dists = np.zeros((num_test, num_train))
dists = (np.sum(X_test**2,axis=1)[:,np.newaxis] -2 * np.dot(X_test,X_train.T) + np.sum(X_train**2,axis=1))**0.5
return dists
def compute_distances_v2(X_test): # hamming distance
num_test = X_test.shape[0]
num_train = X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in range(num_test):
for j in range(num_train):
dists[i][j] = scipy.spatial.distance.hamming(X_test[i], X_train[j])
return dists
def predict_labels(dists, k=1):
num_test = dists.shape[0]
y_pred = np.zeros(num_test)
for i in range(num_test):
closest_y = []
sort_dists_row = np.argsort(dists[i])
k_nearest = sort_dists_row[0:k]
for j in range(k):
closest_y.append(y_train[k_nearest[j]])
label_num = {}
most_common = 0
for l in range(k):
if closest_y[l] in label_num:
label_num[closest_y[l]] = label_num[closest_y[l]] + 1
else:
label_num[closest_y[l]] = 1
for key in label_num:
if (label_num[key] > most_common):
most_common = label_num[key]
y_pred[i] = key
return y_pred
train(X_train, y_train)
#dists = compute_distances(X_test)
dists = compute_distances_v2(X_test)
y_test_pred = predict_labels(dists, k=8)
num_correct = np.sum(y_test_pred == y_test)
num_test = dists.shape[0]
accuracy = float(num_correct) / num_test
print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy))
'''
Cross validation
'''
num_folds = 5
k_choices = [3, 5, 8, 10, 12, 15, 20, 50, 100]
X_train_folds = []
y_train_folds = []
X_train_folds = np.array_split(X_train, 5)
y_train_folds = np.array_split(y_train, 5)
k_to_accuracies = {}
for k in k_choices:
k_to_accuracies[k] = []
for fold in range(num_folds):
includes = [x for x in range(num_folds) if x is not fold]
ls_X_train = []
ls_y_train = []
for i in includes:
ls_X_train.append(X_train_folds[i])
ls_y_train.append(y_train_folds[i])
X_train_v = np.concatenate(ls_X_train, axis=0)
y_train_v = np.concatenate(ls_y_train, axis=0)
X_test_v = X_train_folds[fold]
y_test_v = y_train_folds[fold]
train(X_train_v, y_train_v)
dists = compute_distances(X_test_v)
y_valid_pred = predict_labels(dists, k)
num_correct = np.sum(y_valid_pred == y_test_v)
num_valid = len(y_test_v)
accuracy = float(num_correct) / num_valid
k_to_accuracies[k].append(accuracy)
for k in k_choices:
accuracies = k_to_accuracies[k]
plt.scatter([k] * len(accuracies), accuracies)
accuracies_mean = np.array([np.mean(v) for k,v in sorted(k_to_accuracies.items())])
accuracies_std = np.array([np.std(v) for k,v in sorted(k_to_accuracies.items())])
plt.errorbar(k_choices, accuracies_mean, yerr=accuracies_std)
plt.title('Cross-validation on k')
plt.xlabel('k')
plt.ylabel('Cross-validation accuracy')
plt.show() | knn.py | import csv
import random
import numpy as np
import matplotlib.pyplot as plt
import scipy.spatial
def loadCsv(filename):
'''
load data.
https://stackoverflow.com/questions/4315506/load-csv-into-2d-matrix-with-numpy-for-plotting
https://machinelearningmastery.com/index-slice-reshape-numpy-arrays-machine-learning-python/
'''
lines = csv.reader(open(filename, 'rt', encoding = 'utf-8'))
next(lines, None) # skip the headers
dataset = list(lines)
result = np.array(dataset).astype("float")
np.random.shuffle(result) # randomly re-arrange the rows of the data samples
X = result[:, 0:12]
mean_X = np.mean(X, axis = 0) # normalize the features
X -= mean_X
y = result[:, -1]
y[y>0] = 1
return [X, y]
trainfile = 'train.csv'
[X_train, y_train]= loadCsv(trainfile)
testfile = 'test.csv'
[X_test, y_test]= loadCsv(testfile)
def train(X, y):
X_train = X
y_train = y
def compute_distances(X_test): # l2 norm, eucleadian distance
num_test = X_test.shape[0]
num_train = X_train.shape[0]
dists = np.zeros((num_test, num_train))
dists = (np.sum(X_test**2,axis=1)[:,np.newaxis] -2 * np.dot(X_test,X_train.T) + np.sum(X_train**2,axis=1))**0.5
return dists
def compute_distances_v2(X_test): # hamming distance
num_test = X_test.shape[0]
num_train = X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in range(num_test):
for j in range(num_train):
dists[i][j] = scipy.spatial.distance.hamming(X_test[i], X_train[j])
return dists
def predict_labels(dists, k=1):
num_test = dists.shape[0]
y_pred = np.zeros(num_test)
for i in range(num_test):
closest_y = []
sort_dists_row = np.argsort(dists[i])
k_nearest = sort_dists_row[0:k]
for j in range(k):
closest_y.append(y_train[k_nearest[j]])
label_num = {}
most_common = 0
for l in range(k):
if closest_y[l] in label_num:
label_num[closest_y[l]] = label_num[closest_y[l]] + 1
else:
label_num[closest_y[l]] = 1
for key in label_num:
if (label_num[key] > most_common):
most_common = label_num[key]
y_pred[i] = key
return y_pred
train(X_train, y_train)
#dists = compute_distances(X_test)
dists = compute_distances_v2(X_test)
y_test_pred = predict_labels(dists, k=8)
num_correct = np.sum(y_test_pred == y_test)
num_test = dists.shape[0]
accuracy = float(num_correct) / num_test
print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy))
'''
Cross validation
'''
num_folds = 5
k_choices = [3, 5, 8, 10, 12, 15, 20, 50, 100]
X_train_folds = []
y_train_folds = []
X_train_folds = np.array_split(X_train, 5)
y_train_folds = np.array_split(y_train, 5)
k_to_accuracies = {}
for k in k_choices:
k_to_accuracies[k] = []
for fold in range(num_folds):
includes = [x for x in range(num_folds) if x is not fold]
ls_X_train = []
ls_y_train = []
for i in includes:
ls_X_train.append(X_train_folds[i])
ls_y_train.append(y_train_folds[i])
X_train_v = np.concatenate(ls_X_train, axis=0)
y_train_v = np.concatenate(ls_y_train, axis=0)
X_test_v = X_train_folds[fold]
y_test_v = y_train_folds[fold]
train(X_train_v, y_train_v)
dists = compute_distances(X_test_v)
y_valid_pred = predict_labels(dists, k)
num_correct = np.sum(y_valid_pred == y_test_v)
num_valid = len(y_test_v)
accuracy = float(num_correct) / num_valid
k_to_accuracies[k].append(accuracy)
for k in k_choices:
accuracies = k_to_accuracies[k]
plt.scatter([k] * len(accuracies), accuracies)
accuracies_mean = np.array([np.mean(v) for k,v in sorted(k_to_accuracies.items())])
accuracies_std = np.array([np.std(v) for k,v in sorted(k_to_accuracies.items())])
plt.errorbar(k_choices, accuracies_mean, yerr=accuracies_std)
plt.title('Cross-validation on k')
plt.xlabel('k')
plt.ylabel('Cross-validation accuracy')
plt.show() | 0.538983 | 0.651604 |
from odoo import api
from odoo.addons.mail.tests.common import TestMail
class TestTracking(TestMail):
def test_message_track(self):
""" Testing auto tracking of fields. Warning, it has not be cleaned and
should probably be. """
Subtype = self.env['mail.message.subtype']
Data = self.env['ir.model.data']
note_subtype = self.env.ref('mail.mt_note')
group_system = self.env.ref('base.group_system')
group_user = self.env.ref('base.group_user')
self.group_pigs.write({'channel_partner_ids': [(4, self.user_employee.partner_id.id)]})
# mt_private: public field (tracked as onchange) set to 'private' (selection)
mt_private = Subtype.create({
'name': 'private',
'description': 'Public field set to private'
})
Data.create({
'name': 'mt_private',
'model': 'mail.message.subtype',
'module': 'mail',
'res_id': mt_private.id
})
# mt_name_supername: name field (tracked as always) set to 'supername' (char)
mt_name_supername = Subtype.create({
'name': 'name_supername',
'description': 'Name field set to supername'
})
Data.create({
'name': 'mt_name_supername',
'model': 'mail.message.subtype',
'module': 'mail',
'res_id': mt_name_supername.id
})
# mt_group_public_set: group_public field (tracked as onchange) set to something (m2o)
mt_group_public_set = Subtype.create({
'name': 'group_public_set',
'description': 'Group_public field set'
})
Data.create({
'name': 'mt_group_public_set',
'model': 'mail.message.subtype',
'module': 'mail',
'res_id': mt_group_public_set.id
})
# mt_group_public_set: group_public field (tracked as onchange) set to nothing (m2o)
mt_group_public_unset = Subtype.create({
'name': 'group_public_unset',
'description': 'Group_public field unset'
})
Data.create({
'name': 'mt_group_public_unset',
'model': 'mail.message.subtype',
'module': 'mail',
'res_id': mt_group_public_unset.id
})
@api.multi
def _track_subtype(self, init_values):
if 'public' in init_values and self.public == 'private':
return 'mail.mt_private'
elif 'name' in init_values and self.name == 'supername':
return 'mail.mt_name_supername'
elif 'group_public_id' in init_values and self.group_public_id:
return 'mail.mt_group_public_set'
elif 'group_public_id' in init_values and not self.group_public_id:
return 'mail.mt_group_public_unset'
return False
self.registry('mail.channel')._patch_method('_track_subtype', _track_subtype)
visibility = {
'public': 'onchange',
'name': 'always',
'group_public_id': 'onchange'
}
cls = type(self.env['mail.channel'])
for key in visibility:
self.assertFalse(hasattr(getattr(cls, key), 'track_visibility'))
getattr(cls, key).track_visibility = visibility[key]
@self.addCleanup
def cleanup():
for key in visibility:
del getattr(cls, key).track_visibility
# Test: change name -> always tracked, not related to a subtype
self.group_pigs.sudo(self.user_employee).write({'name': 'my_name'})
self.assertEqual(len(self.group_pigs.message_ids), 1)
last_msg = self.group_pigs.message_ids[-1]
self.assertEqual(last_msg.subtype_id, note_subtype)
self.assertEqual(len(last_msg.tracking_value_ids), 1)
self.assertEqual(last_msg.tracking_value_ids.field, 'name')
self.assertEqual(last_msg.tracking_value_ids.field_desc, 'Name')
self.assertEqual(last_msg.tracking_value_ids.old_value_char, 'Pigs')
self.assertEqual(last_msg.tracking_value_ids.new_value_char, 'my_name')
# Test: change name as supername, public as private -> 1 subtype, private
self.group_pigs.sudo(self.user_employee).write({'name': 'supername', 'public': 'private'})
self.group_pigs.invalidate_cache()
self.assertEqual(len(self.group_pigs.message_ids.ids), 2)
last_msg = self.group_pigs.message_ids[0]
self.assertEqual(last_msg.subtype_id, mt_private)
self.assertEqual(len(last_msg.tracking_value_ids), 2)
self.assertEqual(set(last_msg.tracking_value_ids.mapped('field')), set(['name', 'public']))
self.assertEqual(set(last_msg.tracking_value_ids.mapped('field_desc')), set(['Name', 'Privacy']))
self.assertEqual(set(last_msg.tracking_value_ids.mapped('old_value_char')), set(['my_name', 'Selected group of users']))
self.assertEqual(set(last_msg.tracking_value_ids.mapped('new_value_char')), set(['supername', 'Invited people only']))
# Test: change public as public, group_public_id -> 1 subtype, group public set
self.group_pigs.sudo(self.user_employee).write({'public': 'public', 'group_public_id': group_system.id})
self.group_pigs.invalidate_cache()
self.assertEqual(len(self.group_pigs.message_ids), 3)
last_msg = self.group_pigs.message_ids[0]
self.assertEqual(last_msg.subtype_id, mt_group_public_set)
self.assertEqual(len(last_msg.tracking_value_ids), 3)
self.assertEqual(set(last_msg.tracking_value_ids.mapped('field')), set(['group_public_id', 'public', 'name']))
self.assertEqual(set(last_msg.tracking_value_ids.mapped('field_desc')), set(['Authorized Group', 'Privacy', 'Name']))
self.assertEqual(set(last_msg.tracking_value_ids.mapped('old_value_char')), set([group_user.name_get()[0][1], 'Invited people only', 'supername']))
self.assertEqual(set(last_msg.tracking_value_ids.mapped('new_value_char')), set([group_system.name_get()[0][1], 'Everyone', 'supername']))
self.assertEqual(set(last_msg.tracking_value_ids.mapped('old_value_integer')), set([0, group_user.id]))
self.assertEqual(set(last_msg.tracking_value_ids.mapped('new_value_integer')), set([0, group_system.id])) | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/mail/tests/test_message_track.py |
from odoo import api
from odoo.addons.mail.tests.common import TestMail
class TestTracking(TestMail):
def test_message_track(self):
""" Testing auto tracking of fields. Warning, it has not be cleaned and
should probably be. """
Subtype = self.env['mail.message.subtype']
Data = self.env['ir.model.data']
note_subtype = self.env.ref('mail.mt_note')
group_system = self.env.ref('base.group_system')
group_user = self.env.ref('base.group_user')
self.group_pigs.write({'channel_partner_ids': [(4, self.user_employee.partner_id.id)]})
# mt_private: public field (tracked as onchange) set to 'private' (selection)
mt_private = Subtype.create({
'name': 'private',
'description': 'Public field set to private'
})
Data.create({
'name': 'mt_private',
'model': 'mail.message.subtype',
'module': 'mail',
'res_id': mt_private.id
})
# mt_name_supername: name field (tracked as always) set to 'supername' (char)
mt_name_supername = Subtype.create({
'name': 'name_supername',
'description': 'Name field set to supername'
})
Data.create({
'name': 'mt_name_supername',
'model': 'mail.message.subtype',
'module': 'mail',
'res_id': mt_name_supername.id
})
# mt_group_public_set: group_public field (tracked as onchange) set to something (m2o)
mt_group_public_set = Subtype.create({
'name': 'group_public_set',
'description': 'Group_public field set'
})
Data.create({
'name': 'mt_group_public_set',
'model': 'mail.message.subtype',
'module': 'mail',
'res_id': mt_group_public_set.id
})
# mt_group_public_set: group_public field (tracked as onchange) set to nothing (m2o)
mt_group_public_unset = Subtype.create({
'name': 'group_public_unset',
'description': 'Group_public field unset'
})
Data.create({
'name': 'mt_group_public_unset',
'model': 'mail.message.subtype',
'module': 'mail',
'res_id': mt_group_public_unset.id
})
@api.multi
def _track_subtype(self, init_values):
if 'public' in init_values and self.public == 'private':
return 'mail.mt_private'
elif 'name' in init_values and self.name == 'supername':
return 'mail.mt_name_supername'
elif 'group_public_id' in init_values and self.group_public_id:
return 'mail.mt_group_public_set'
elif 'group_public_id' in init_values and not self.group_public_id:
return 'mail.mt_group_public_unset'
return False
self.registry('mail.channel')._patch_method('_track_subtype', _track_subtype)
visibility = {
'public': 'onchange',
'name': 'always',
'group_public_id': 'onchange'
}
cls = type(self.env['mail.channel'])
for key in visibility:
self.assertFalse(hasattr(getattr(cls, key), 'track_visibility'))
getattr(cls, key).track_visibility = visibility[key]
@self.addCleanup
def cleanup():
for key in visibility:
del getattr(cls, key).track_visibility
# Test: change name -> always tracked, not related to a subtype
self.group_pigs.sudo(self.user_employee).write({'name': 'my_name'})
self.assertEqual(len(self.group_pigs.message_ids), 1)
last_msg = self.group_pigs.message_ids[-1]
self.assertEqual(last_msg.subtype_id, note_subtype)
self.assertEqual(len(last_msg.tracking_value_ids), 1)
self.assertEqual(last_msg.tracking_value_ids.field, 'name')
self.assertEqual(last_msg.tracking_value_ids.field_desc, 'Name')
self.assertEqual(last_msg.tracking_value_ids.old_value_char, 'Pigs')
self.assertEqual(last_msg.tracking_value_ids.new_value_char, 'my_name')
# Test: change name as supername, public as private -> 1 subtype, private
self.group_pigs.sudo(self.user_employee).write({'name': 'supername', 'public': 'private'})
self.group_pigs.invalidate_cache()
self.assertEqual(len(self.group_pigs.message_ids.ids), 2)
last_msg = self.group_pigs.message_ids[0]
self.assertEqual(last_msg.subtype_id, mt_private)
self.assertEqual(len(last_msg.tracking_value_ids), 2)
self.assertEqual(set(last_msg.tracking_value_ids.mapped('field')), set(['name', 'public']))
self.assertEqual(set(last_msg.tracking_value_ids.mapped('field_desc')), set(['Name', 'Privacy']))
self.assertEqual(set(last_msg.tracking_value_ids.mapped('old_value_char')), set(['my_name', 'Selected group of users']))
self.assertEqual(set(last_msg.tracking_value_ids.mapped('new_value_char')), set(['supername', 'Invited people only']))
# Test: change public as public, group_public_id -> 1 subtype, group public set
self.group_pigs.sudo(self.user_employee).write({'public': 'public', 'group_public_id': group_system.id})
self.group_pigs.invalidate_cache()
self.assertEqual(len(self.group_pigs.message_ids), 3)
last_msg = self.group_pigs.message_ids[0]
self.assertEqual(last_msg.subtype_id, mt_group_public_set)
self.assertEqual(len(last_msg.tracking_value_ids), 3)
self.assertEqual(set(last_msg.tracking_value_ids.mapped('field')), set(['group_public_id', 'public', 'name']))
self.assertEqual(set(last_msg.tracking_value_ids.mapped('field_desc')), set(['Authorized Group', 'Privacy', 'Name']))
self.assertEqual(set(last_msg.tracking_value_ids.mapped('old_value_char')), set([group_user.name_get()[0][1], 'Invited people only', 'supername']))
self.assertEqual(set(last_msg.tracking_value_ids.mapped('new_value_char')), set([group_system.name_get()[0][1], 'Everyone', 'supername']))
self.assertEqual(set(last_msg.tracking_value_ids.mapped('old_value_integer')), set([0, group_user.id]))
self.assertEqual(set(last_msg.tracking_value_ids.mapped('new_value_integer')), set([0, group_system.id])) | 0.395951 | 0.207857 |
errors_semantic = []
def append_error_semantic(fila, column, error):
errors_semantic.append(f'({fila}, {column}) - {error}')
class Method:
def __init__(self, id, parametros, returned_type):
self.returnedType = returned_type
self.id = id
self.args = parametros
class Attribute:
def __init__(self, id, atributoT, expression=None):
self.id = id
self.attrType = atributoT
self.expression = expression
class Tipos:
def __init__(self, nombre, tipo_padre, inherit=True):
self.attributes = {}
self.methods = {}
self.childs = set()
self.name = nombre
self.order = 0
self.min_order = 0
self.parent = tipo_padre
self.inherit = inherit
def __str__(self):
return f'{self.name}'
def __repr__(self):
return f'{self.name}'
def __repr__(self):
return f'{self.id}{self.args}:{self.returnedType}'
def my_attribute(self):
return self.attributes.values()
def my_method(self):
return self.methods
def add_method(self, id, typo_argumento, returned_type):
type = self
while type:
try:
method_type = type.methods[id]
if len(method_type.args) != len(typo_argumento):
return False, f'SemanticError: invalid redefinition of {id}'
for i in range(0, len(typo_argumento)):
type = TypesByName[typo_argumento[i]]
if type != method_type.args[i]:
return False, f'SemanticError: In redefined method {id}, parameter type {type} is different from original type {method_type.args[i]}.'
respuesta = get_type(returned_type)
if method_type.returnedType != respuesta:
return False, f'SemanticError: In redefined method {id}, return type {respuesta} is different from original return type {method_type.returnedType}.'
break
except KeyError:
type = type.parent
definido, error_msg = self.definelo(id)
if definido:
list_arg = []
for arg in typo_argumento:
arg_type = get_type(arg)
if arg_type is None:
return False, f'TypeError: Class {arg} of formal parameter is undefined.'
list_arg.append(arg_type)
respuesta = get_type(returned_type)
if respuesta is None:
return False, f'TypeError: Undefined return type {returned_type} in method {id}.'
self.methods[id] = Method(id, list_arg, respuesta)
return True, None
else:
return False, error_msg
def add_attr(self, id, atributoT, expression):
attribute, _ = get_attribute(self, id)
if attribute is not None:
return False, f'SemanticError: Attribute {id} is an attribute of an inherited class.'
try:
x = self.attributes[id]
return False, f'SemanticError: Attribute {id} is an attribute of an inherited class.'
except KeyError:
atributo = get_type(atributoT)
if atributo is None:
return False, f'TypeError: Class {atributoT} of attribute {id} is undefined.'
self.attributes[id] = Attribute(id, atributo, expression)
return True, None
def get_method(self, id, parametros):
try:
return self.metodos_no_heredados(id, parametros)
except Exception:
if self.parent:
return self.parent.get_method(id, parametros)
else:
return None, None, f'AttributeError: Dispatch to undefined method {id}.'
def atributos(self):
type = self
result = []
while type:
atributo = []
for attr in type.attributes.values():
atributo.append(attr)
result.append(atributo)
type = type.parent
return [elem for sublist in result[::-1] for elem in sublist]
def definelo(self, id):
if id in self.methods.keys():
return False, f'SemanticError: Method {id} is multiply defined.'
return True, None
def metodos_heredados(self):
type = self.parent
result = []
while type:
metodo = []
for method in type.methods.values():
method.owner = type.name
metodo.append(method)
result.append(metodo)
type = type.parent
return [elem for sublist in result[::-1] for elem in sublist]
def metodos_no_heredados(self, id, parametros):
try:
method = self.methods[id]
if len(parametros) != len(method.args):
return None, None, f'SemanticError: Method {id} called with wrong number of arguments.'
for i, a in enumerate(parametros):
if not check_herencia(a, method.args[i]):
return None, None, f'TypeError: In call of method {id}, type {a} does not conform to declared type {method.args[i]}.'
return method, self, None
except KeyError:
raise Exception(f'type {self.name} don\'t have a method {id}')
def join_set(a, b):
h = jerarquia(b)
while a is not None:
if a in h:
break
a = a.parent
return a
def get_type(type_name):
try:
return TypesByName[type_name]
except KeyError:
return None
def get_attribute(c: Tipos, id: str):
while c is not None:
try:
return c.attributes[id], c
except KeyError:
c = c.parent
return None, None
def ordenado(type: Tipos, lugar: int) -> int:
type.min_order = lugar
for t in type.childs:
lugar = ordenado(t, lugar)
type.order = lugar
return lugar + 1
def jerarquia(x):
h = []
while x is not None:
h.append(x)
x = x.parent
return h
def check_herencia(a: Tipos, b: Tipos):
index = a
while index != b:
if index is None:
return False
index = index.parent
return True
def check_jerarquia(node):
for c in node.classes:
classT = TypesByName[c.type]
if c.parent_type:
try:
padreT = TypesByName[c.parent_type]
if padreT.inherit:
classT.parent = padreT
padreT.childs.add(classT)
x = padreT
while x:
if x == classT:
append_error_semantic(
c.lineno, c.colno, f'SemanticError: Class {classT.name}, or an ancestor of {classT.name}, is involved in an inheritance cycle.')
return False
x = x.parent
else:
append_error_semantic(c.lineno, c.colno, f'SemanticError: Class {classT} cannot inherit class {padreT.name}.')
return False
except KeyError:
append_error_semantic(c.lineno, c.colno, f'TypeError: Class {classT} inherits from an undefined class {c.parent_type}.')
return False
else:
classT.parent = ObjectType
ObjectType.childs.add(classT)
ordenado(ObjectType, 1)
return True
def check_type(node):
for c in node.classes:
try:
x = TypesByName[c.type]
append_error_semantic(c.lineno, c.colno,
f'SemanticError: Redefinition of basic class {c.type}.')
return False
except KeyError:
TypesByName[c.type] = Tipos(c.type, None)
return True
SelfType = Tipos('SELF_TYPE', None, False)
ObjectType = Tipos('Object', None)
ObjectType.order = 0
IOType = Tipos('IO', ObjectType)
IOType.order = -1
IntType = Tipos('Int', ObjectType, False)
IntType.order = -2
StringType = Tipos('String', ObjectType, False)
StringType.order = -2
BoolType = Tipos('Bool', ObjectType, False)
BoolType.order = -2
TypesByName = {
'SELF_TYPE': SelfType,
'Object': ObjectType,
'IO': IOType,
'Int': IntType,
'String': StringType,
'Bool': BoolType
}
ObjectType.childs = set([IOType, IntType, StringType, BoolType])
ObjectType.add_method('abort', [], 'Object')
ObjectType.add_method('type_name', [], 'String')
ObjectType.add_method('copy', [], 'SELF_TYPE')
IOType.add_method('out_string', ['String'], 'SELF_TYPE')
IOType.add_method('out_int', ['Int'], 'SELF_TYPE')
IOType.add_method('in_string', [], 'String')
IOType.add_method('in_int', [], 'Int')
StringType.add_method('length', [], 'Int')
StringType.add_method('concat', ['String'], 'String')
StringType.add_method('substr', ['Int', 'Int'], 'String')
IntType.add_method('abort', [], 'Object')
BoolType.add_method('abort', [], 'Object')
StringType.add_method('abort', [], 'Object')
StringType.add_method('type_name', [], 'String')
IntType.add_method('type_name', [], 'String')
BoolType.add_method('type_name', [], 'String') | src/semantic/types.py | errors_semantic = []
def append_error_semantic(fila, column, error):
errors_semantic.append(f'({fila}, {column}) - {error}')
class Method:
def __init__(self, id, parametros, returned_type):
self.returnedType = returned_type
self.id = id
self.args = parametros
class Attribute:
def __init__(self, id, atributoT, expression=None):
self.id = id
self.attrType = atributoT
self.expression = expression
class Tipos:
def __init__(self, nombre, tipo_padre, inherit=True):
self.attributes = {}
self.methods = {}
self.childs = set()
self.name = nombre
self.order = 0
self.min_order = 0
self.parent = tipo_padre
self.inherit = inherit
def __str__(self):
return f'{self.name}'
def __repr__(self):
return f'{self.name}'
def __repr__(self):
return f'{self.id}{self.args}:{self.returnedType}'
def my_attribute(self):
return self.attributes.values()
def my_method(self):
return self.methods
def add_method(self, id, typo_argumento, returned_type):
type = self
while type:
try:
method_type = type.methods[id]
if len(method_type.args) != len(typo_argumento):
return False, f'SemanticError: invalid redefinition of {id}'
for i in range(0, len(typo_argumento)):
type = TypesByName[typo_argumento[i]]
if type != method_type.args[i]:
return False, f'SemanticError: In redefined method {id}, parameter type {type} is different from original type {method_type.args[i]}.'
respuesta = get_type(returned_type)
if method_type.returnedType != respuesta:
return False, f'SemanticError: In redefined method {id}, return type {respuesta} is different from original return type {method_type.returnedType}.'
break
except KeyError:
type = type.parent
definido, error_msg = self.definelo(id)
if definido:
list_arg = []
for arg in typo_argumento:
arg_type = get_type(arg)
if arg_type is None:
return False, f'TypeError: Class {arg} of formal parameter is undefined.'
list_arg.append(arg_type)
respuesta = get_type(returned_type)
if respuesta is None:
return False, f'TypeError: Undefined return type {returned_type} in method {id}.'
self.methods[id] = Method(id, list_arg, respuesta)
return True, None
else:
return False, error_msg
def add_attr(self, id, atributoT, expression):
attribute, _ = get_attribute(self, id)
if attribute is not None:
return False, f'SemanticError: Attribute {id} is an attribute of an inherited class.'
try:
x = self.attributes[id]
return False, f'SemanticError: Attribute {id} is an attribute of an inherited class.'
except KeyError:
atributo = get_type(atributoT)
if atributo is None:
return False, f'TypeError: Class {atributoT} of attribute {id} is undefined.'
self.attributes[id] = Attribute(id, atributo, expression)
return True, None
def get_method(self, id, parametros):
try:
return self.metodos_no_heredados(id, parametros)
except Exception:
if self.parent:
return self.parent.get_method(id, parametros)
else:
return None, None, f'AttributeError: Dispatch to undefined method {id}.'
def atributos(self):
type = self
result = []
while type:
atributo = []
for attr in type.attributes.values():
atributo.append(attr)
result.append(atributo)
type = type.parent
return [elem for sublist in result[::-1] for elem in sublist]
def definelo(self, id):
if id in self.methods.keys():
return False, f'SemanticError: Method {id} is multiply defined.'
return True, None
def metodos_heredados(self):
type = self.parent
result = []
while type:
metodo = []
for method in type.methods.values():
method.owner = type.name
metodo.append(method)
result.append(metodo)
type = type.parent
return [elem for sublist in result[::-1] for elem in sublist]
def metodos_no_heredados(self, id, parametros):
try:
method = self.methods[id]
if len(parametros) != len(method.args):
return None, None, f'SemanticError: Method {id} called with wrong number of arguments.'
for i, a in enumerate(parametros):
if not check_herencia(a, method.args[i]):
return None, None, f'TypeError: In call of method {id}, type {a} does not conform to declared type {method.args[i]}.'
return method, self, None
except KeyError:
raise Exception(f'type {self.name} don\'t have a method {id}')
def join_set(a, b):
h = jerarquia(b)
while a is not None:
if a in h:
break
a = a.parent
return a
def get_type(type_name):
try:
return TypesByName[type_name]
except KeyError:
return None
def get_attribute(c: Tipos, id: str):
while c is not None:
try:
return c.attributes[id], c
except KeyError:
c = c.parent
return None, None
def ordenado(type: Tipos, lugar: int) -> int:
type.min_order = lugar
for t in type.childs:
lugar = ordenado(t, lugar)
type.order = lugar
return lugar + 1
def jerarquia(x):
h = []
while x is not None:
h.append(x)
x = x.parent
return h
def check_herencia(a: Tipos, b: Tipos):
index = a
while index != b:
if index is None:
return False
index = index.parent
return True
def check_jerarquia(node):
for c in node.classes:
classT = TypesByName[c.type]
if c.parent_type:
try:
padreT = TypesByName[c.parent_type]
if padreT.inherit:
classT.parent = padreT
padreT.childs.add(classT)
x = padreT
while x:
if x == classT:
append_error_semantic(
c.lineno, c.colno, f'SemanticError: Class {classT.name}, or an ancestor of {classT.name}, is involved in an inheritance cycle.')
return False
x = x.parent
else:
append_error_semantic(c.lineno, c.colno, f'SemanticError: Class {classT} cannot inherit class {padreT.name}.')
return False
except KeyError:
append_error_semantic(c.lineno, c.colno, f'TypeError: Class {classT} inherits from an undefined class {c.parent_type}.')
return False
else:
classT.parent = ObjectType
ObjectType.childs.add(classT)
ordenado(ObjectType, 1)
return True
def check_type(node):
for c in node.classes:
try:
x = TypesByName[c.type]
append_error_semantic(c.lineno, c.colno,
f'SemanticError: Redefinition of basic class {c.type}.')
return False
except KeyError:
TypesByName[c.type] = Tipos(c.type, None)
return True
SelfType = Tipos('SELF_TYPE', None, False)
ObjectType = Tipos('Object', None)
ObjectType.order = 0
IOType = Tipos('IO', ObjectType)
IOType.order = -1
IntType = Tipos('Int', ObjectType, False)
IntType.order = -2
StringType = Tipos('String', ObjectType, False)
StringType.order = -2
BoolType = Tipos('Bool', ObjectType, False)
BoolType.order = -2
TypesByName = {
'SELF_TYPE': SelfType,
'Object': ObjectType,
'IO': IOType,
'Int': IntType,
'String': StringType,
'Bool': BoolType
}
ObjectType.childs = set([IOType, IntType, StringType, BoolType])
ObjectType.add_method('abort', [], 'Object')
ObjectType.add_method('type_name', [], 'String')
ObjectType.add_method('copy', [], 'SELF_TYPE')
IOType.add_method('out_string', ['String'], 'SELF_TYPE')
IOType.add_method('out_int', ['Int'], 'SELF_TYPE')
IOType.add_method('in_string', [], 'String')
IOType.add_method('in_int', [], 'Int')
StringType.add_method('length', [], 'Int')
StringType.add_method('concat', ['String'], 'String')
StringType.add_method('substr', ['Int', 'Int'], 'String')
IntType.add_method('abort', [], 'Object')
BoolType.add_method('abort', [], 'Object')
StringType.add_method('abort', [], 'Object')
StringType.add_method('type_name', [], 'String')
IntType.add_method('type_name', [], 'String')
BoolType.add_method('type_name', [], 'String') | 0.431105 | 0.163212 |
class Node(object):
def __init__(self, val=None, next=None):
self.val = val
self.next = next
class SolutionTwoPointersIter(object):
def insert(self, head, insertVal):
"""
:type head: Node
:type insertVal: int
:rtype: Node
Time complexity: O(n).
Space complexity: O(1s).
"""
# Edge case for empty head with circular link.
if not head:
head = Node(val=insertVal)
head.next = head
return head
# Iterate to move two pointers: previous & current, until
# insert is in the middle of previous & current, or
# insert is out of range of previous & current.
previous = head
current = head.next
while (not (previous.val <= insertVal and insertVal <= current.val) and
not (previous.val > current.val and insertVal < current.val) and
not (previous.val > current.val and insertVal > previous.val)):
previous = previous.next
current = current.next
if current is head:
break
# Connect previous to new node to current.
previous.next = Node(val=insertVal, next=current)
return head
def main():
# Input: head = [3,4,1]
# Output: [3,4,1,2]
head = Node(val=3)
head.next = Node(val=4)
head.next.next = Node(val=1)
head.next.next.next = head
insertVal = 2
new_head = SolutionTwoPointersIter().insert(head, insertVal)
print (new_head.val, new_head.next.val,
new_head.next.next.val, new_head.next.next.next.val,
new_head.next.next.next.next.val)
# Input: head = []
# Output: [1]
head = None
insertVal = 1
new_head = SolutionTwoPointersIter().insert(head, insertVal)
print (new_head.val, new_head.next.val)
# Input: head = [1]
# Output: [1,0]
head = Node(1)
head.next = head
insertVal = 0
new_head = SolutionTwoPointersIter().insert(head, insertVal)
print (new_head.val, new_head.next.val, new_head.next.next.val)
# Input: head = [3,5,1]
# Output: [3,5,0,1]
head = Node(val=3)
head.next = Node(val=5)
head.next.next = Node(val=1)
head.next.next.next = head
insertVal = 0
new_head = SolutionTwoPointersIter().insert(head, insertVal)
print (new_head.val, new_head.next.val,
new_head.next.next.val, new_head.next.next.next.val,
new_head.next.next.next.next.val)
if __name__ == '__main__':
main() | lc0708_insert_into_a_sorted_circular_linked_list.py | class Node(object):
def __init__(self, val=None, next=None):
self.val = val
self.next = next
class SolutionTwoPointersIter(object):
def insert(self, head, insertVal):
"""
:type head: Node
:type insertVal: int
:rtype: Node
Time complexity: O(n).
Space complexity: O(1s).
"""
# Edge case for empty head with circular link.
if not head:
head = Node(val=insertVal)
head.next = head
return head
# Iterate to move two pointers: previous & current, until
# insert is in the middle of previous & current, or
# insert is out of range of previous & current.
previous = head
current = head.next
while (not (previous.val <= insertVal and insertVal <= current.val) and
not (previous.val > current.val and insertVal < current.val) and
not (previous.val > current.val and insertVal > previous.val)):
previous = previous.next
current = current.next
if current is head:
break
# Connect previous to new node to current.
previous.next = Node(val=insertVal, next=current)
return head
def main():
# Input: head = [3,4,1]
# Output: [3,4,1,2]
head = Node(val=3)
head.next = Node(val=4)
head.next.next = Node(val=1)
head.next.next.next = head
insertVal = 2
new_head = SolutionTwoPointersIter().insert(head, insertVal)
print (new_head.val, new_head.next.val,
new_head.next.next.val, new_head.next.next.next.val,
new_head.next.next.next.next.val)
# Input: head = []
# Output: [1]
head = None
insertVal = 1
new_head = SolutionTwoPointersIter().insert(head, insertVal)
print (new_head.val, new_head.next.val)
# Input: head = [1]
# Output: [1,0]
head = Node(1)
head.next = head
insertVal = 0
new_head = SolutionTwoPointersIter().insert(head, insertVal)
print (new_head.val, new_head.next.val, new_head.next.next.val)
# Input: head = [3,5,1]
# Output: [3,5,0,1]
head = Node(val=3)
head.next = Node(val=5)
head.next.next = Node(val=1)
head.next.next.next = head
insertVal = 0
new_head = SolutionTwoPointersIter().insert(head, insertVal)
print (new_head.val, new_head.next.val,
new_head.next.next.val, new_head.next.next.next.val,
new_head.next.next.next.next.val)
if __name__ == '__main__':
main() | 0.766031 | 0.28582 |
import os
import subprocess
from contextlib import contextmanager
from pathlib import Path
from pydockenv import definitions
BIN_PATH = str(Path(definitions.ROOT_DIR, 'bin', 'pydockenv'))
class Commander:
_instance = None
def __init__(self, env=None):
self._bin_path = BIN_PATH
self._env = env or {}
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = Commander()
return cls._instance
def add_env_var(self, k, v):
self._env[k] = v
def run(self, cmd, env=None):
args = cmd.split(' ')
env = self._prepare_env(env)
return subprocess.run(
[self._bin_path, *args],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env
)
@contextmanager
def active_env(self, env_name):
env_diff = self.activate_env(env_name)
env = os.environ.copy()
env.update({k: v[1] for k, v in env_diff.items()})
try:
yield env
finally:
self.deactivate_env(env=env)
def activate_env(self, env_name, env=None):
return self.source(f'activate {env_name}', env=env)
def deactivate_env(self, env=None):
return self.source('deactivate', env=env)
def source(self, cmd, env=None):
env = self._prepare_env(env)
proc = subprocess.Popen('env', stdout=subprocess.PIPE, shell=True,
env=env)
initial_env = self._get_env(proc.stdout)
proc.communicate()
command = f"bash -c 'PYDOCKENV_DEBUG=1 source {self._bin_path} {cmd}'"
proc = subprocess.Popen(command, stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE, shell=True, env=env)
post_env = self._get_env(proc.stderr)
proc.communicate()
env_diff = {}
for k in set().union(initial_env.keys(), post_env.keys()):
initial_value, post_value = initial_env.get(k), post_env.get(k)
if initial_value != post_value:
env_diff[k] = (initial_value, post_value)
return env_diff
def _get_env(self, stdout):
env = {}
for line in stdout:
(key, _, value) = line.decode('utf8').strip().partition("=")
env[key] = value
return env
def _prepare_env(self, env):
env = {**self._env, **(env or {})}
if env:
env = {k: v for k, v in {**os.environ, **env}.items()
if v is not None}
else:
env = None
return env | tests/commander.py | import os
import subprocess
from contextlib import contextmanager
from pathlib import Path
from pydockenv import definitions
BIN_PATH = str(Path(definitions.ROOT_DIR, 'bin', 'pydockenv'))
class Commander:
_instance = None
def __init__(self, env=None):
self._bin_path = BIN_PATH
self._env = env or {}
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = Commander()
return cls._instance
def add_env_var(self, k, v):
self._env[k] = v
def run(self, cmd, env=None):
args = cmd.split(' ')
env = self._prepare_env(env)
return subprocess.run(
[self._bin_path, *args],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env
)
@contextmanager
def active_env(self, env_name):
env_diff = self.activate_env(env_name)
env = os.environ.copy()
env.update({k: v[1] for k, v in env_diff.items()})
try:
yield env
finally:
self.deactivate_env(env=env)
def activate_env(self, env_name, env=None):
return self.source(f'activate {env_name}', env=env)
def deactivate_env(self, env=None):
return self.source('deactivate', env=env)
def source(self, cmd, env=None):
env = self._prepare_env(env)
proc = subprocess.Popen('env', stdout=subprocess.PIPE, shell=True,
env=env)
initial_env = self._get_env(proc.stdout)
proc.communicate()
command = f"bash -c 'PYDOCKENV_DEBUG=1 source {self._bin_path} {cmd}'"
proc = subprocess.Popen(command, stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE, shell=True, env=env)
post_env = self._get_env(proc.stderr)
proc.communicate()
env_diff = {}
for k in set().union(initial_env.keys(), post_env.keys()):
initial_value, post_value = initial_env.get(k), post_env.get(k)
if initial_value != post_value:
env_diff[k] = (initial_value, post_value)
return env_diff
def _get_env(self, stdout):
env = {}
for line in stdout:
(key, _, value) = line.decode('utf8').strip().partition("=")
env[key] = value
return env
def _prepare_env(self, env):
env = {**self._env, **(env or {})}
if env:
env = {k: v for k, v in {**os.environ, **env}.items()
if v is not None}
else:
env = None
return env | 0.371593 | 0.084682 |
# License for THIS FILE ONLY: CC0 Public Domain Dedication
# http://creativecommons.org/publicdomain/zero/1.0/
from __future__ import absolute_import, division, with_statement
from textwrap import dedent
from pyflyby._format import FormatParams, fill, pyfill
def test_fill_1():
result = fill(["'hello world'", "'hello two'"],
prefix=("print ", " "), suffix=(" \\", ""),
max_line_length=25)
expected = "print 'hello world', \\\n 'hello two'\n"
assert result == expected
def test_pyfill_1():
result = pyfill('print ', ["foo.bar", "baz", "quux", "quuuuux"])
expected = 'print foo.bar, baz, quux, quuuuux\n'
assert result == expected
def test_pyfill_2():
result = pyfill('print ', ["foo.bar", "baz", "quux", "quuuuux"],
FormatParams(max_line_length=15))
expected = dedent("""
print (foo.bar,
baz,
quux,
quuuuux)
""").lstrip()
assert result == expected
def test_pyfill_3():
result = pyfill('print ', ["foo.bar", "baz", "quux", "quuuuux"],
FormatParams(max_line_length=14, hanging_indent='always'))
expected = dedent("""
print (
foo.bar,
baz, quux,
quuuuux)
""").lstrip()
assert result == expected
def test_pyfill_4():
result = pyfill('print ', ["foo.bar", "baz", "quux", "quuuuux"],
FormatParams(max_line_length=14, hanging_indent='always'))
expected = dedent("""
print (
foo.bar,
baz, quux,
quuuuux)
""").lstrip()
assert result == expected
def test_pyfill_5():
result = pyfill('print ', ["foo.bar", "baz", "quux", "quuuuux"],
FormatParams(max_line_length=14, hanging_indent='auto'))
expected = dedent("""
print (
foo.bar,
baz, quux,
quuuuux)
""").lstrip()
assert result == expected
def test_pyfill_hanging_indent_never_1():
prefix = 'from foo import '
# <---------------39 chars-------------->
tokens = ['x23456789a123456789b123456789c123456789','z1','z2']
params = FormatParams(max_line_length=79, hanging_indent='never')
result = pyfill(prefix, tokens, params)
expected = dedent("""
from foo import (x23456789a123456789b123456789c123456789,
z1, z2)
""").lstrip()
assert result == expected
def test_pyfill_hanging_indent_always_1():
prefix = 'from foo import '
# <---------------39 chars-------------->
tokens = ['x23456789a123456789b123456789c123456789','z1','z2']
params = FormatParams(max_line_length=79, hanging_indent='always')
result = pyfill(prefix, tokens, params)
expected = dedent("""
from foo import (
x23456789a123456789b123456789c123456789, z1, z2)
""").lstrip()
assert result == expected
def test_pyfill_hanging_indent_auto_yes_1():
prefix = 'from foo import '
# <---------------39 chars-------------->
tokens = ['x23456789a123456789b123456789c123456789','z1','z2']
params = FormatParams(max_line_length=79, hanging_indent='auto')
result = pyfill(prefix, tokens, params)
expected = dedent("""
from foo import (
x23456789a123456789b123456789c123456789, z1, z2)
""").lstrip()
assert result == expected
def test_pyfill_hanging_indent_auto_no_1():
prefix = 'from foo import '
# <---------------38 chars-------------->
tokens = ['x23456789a123456789b123456789c12345678','z1','z2']
params = FormatParams(max_line_length=79, hanging_indent='auto')
result = pyfill(prefix, tokens, params)
expected = dedent("""
from foo import (x23456789a123456789b123456789c12345678,
z1, z2)
""").lstrip()
assert result == expected | tests/test_format.py |
# License for THIS FILE ONLY: CC0 Public Domain Dedication
# http://creativecommons.org/publicdomain/zero/1.0/
from __future__ import absolute_import, division, with_statement
from textwrap import dedent
from pyflyby._format import FormatParams, fill, pyfill
def test_fill_1():
result = fill(["'hello world'", "'hello two'"],
prefix=("print ", " "), suffix=(" \\", ""),
max_line_length=25)
expected = "print 'hello world', \\\n 'hello two'\n"
assert result == expected
def test_pyfill_1():
result = pyfill('print ', ["foo.bar", "baz", "quux", "quuuuux"])
expected = 'print foo.bar, baz, quux, quuuuux\n'
assert result == expected
def test_pyfill_2():
result = pyfill('print ', ["foo.bar", "baz", "quux", "quuuuux"],
FormatParams(max_line_length=15))
expected = dedent("""
print (foo.bar,
baz,
quux,
quuuuux)
""").lstrip()
assert result == expected
def test_pyfill_3():
result = pyfill('print ', ["foo.bar", "baz", "quux", "quuuuux"],
FormatParams(max_line_length=14, hanging_indent='always'))
expected = dedent("""
print (
foo.bar,
baz, quux,
quuuuux)
""").lstrip()
assert result == expected
def test_pyfill_4():
result = pyfill('print ', ["foo.bar", "baz", "quux", "quuuuux"],
FormatParams(max_line_length=14, hanging_indent='always'))
expected = dedent("""
print (
foo.bar,
baz, quux,
quuuuux)
""").lstrip()
assert result == expected
def test_pyfill_5():
result = pyfill('print ', ["foo.bar", "baz", "quux", "quuuuux"],
FormatParams(max_line_length=14, hanging_indent='auto'))
expected = dedent("""
print (
foo.bar,
baz, quux,
quuuuux)
""").lstrip()
assert result == expected
def test_pyfill_hanging_indent_never_1():
prefix = 'from foo import '
# <---------------39 chars-------------->
tokens = ['x23456789a123456789b123456789c123456789','z1','z2']
params = FormatParams(max_line_length=79, hanging_indent='never')
result = pyfill(prefix, tokens, params)
expected = dedent("""
from foo import (x23456789a123456789b123456789c123456789,
z1, z2)
""").lstrip()
assert result == expected
def test_pyfill_hanging_indent_always_1():
prefix = 'from foo import '
# <---------------39 chars-------------->
tokens = ['x23456789a123456789b123456789c123456789','z1','z2']
params = FormatParams(max_line_length=79, hanging_indent='always')
result = pyfill(prefix, tokens, params)
expected = dedent("""
from foo import (
x23456789a123456789b123456789c123456789, z1, z2)
""").lstrip()
assert result == expected
def test_pyfill_hanging_indent_auto_yes_1():
prefix = 'from foo import '
# <---------------39 chars-------------->
tokens = ['x23456789a123456789b123456789c123456789','z1','z2']
params = FormatParams(max_line_length=79, hanging_indent='auto')
result = pyfill(prefix, tokens, params)
expected = dedent("""
from foo import (
x23456789a123456789b123456789c123456789, z1, z2)
""").lstrip()
assert result == expected
def test_pyfill_hanging_indent_auto_no_1():
prefix = 'from foo import '
# <---------------38 chars-------------->
tokens = ['x23456789a123456789b123456789c12345678','z1','z2']
params = FormatParams(max_line_length=79, hanging_indent='auto')
result = pyfill(prefix, tokens, params)
expected = dedent("""
from foo import (x23456789a123456789b123456789c12345678,
z1, z2)
""").lstrip()
assert result == expected | 0.712232 | 0.474266 |
from ffi_navigator import langserver
from ffi_navigator.util import join_path, normalize_path
import logging
import os
curr_path = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))
def run_find_definition(server, path, line, character):
uri = langserver.path2uri(path)
res = server.m_text_document__definition(
textDocument={"uri": uri},
position={"line": line, "character": character })
return res
def run_find_references(server, path, line, character):
uri = langserver.path2uri(path)
res = server.m_text_document__references(
textDocument={"uri": uri},
position={"line": line, "character": character })
return res
def test_tvm_dialect():
def test_dummy_repo():
# test and verify against dummy repo
tvm_path = os.path.join(curr_path, "..", "dummy_repo", "tvm")
server = langserver.BaseServer()
server.m_initialize(rootUri=langserver.path2uri(tvm_path))
# Constant
res = run_find_definition(server,
join_path(tvm_path, "python/tvm/relay/expr.py"),
15, 14)
assert(len(res) == 1)
assert(res[0]['uri'].endswith("expr.h"))
assert(res[0]['range']['start']['line'] == 33)
# _make.ProducerConsumer
res = run_find_definition(server,
join_path(tvm_path, "python/tvm/stmt.py"),
26, 30)
assert(len(res) == 1)
assert(res[0]['uri'].endswith("api_ir.cc"))
assert(res[0]['range']['start']['line'] == 14)
# _make.LetStmt
res = run_find_definition(server,
join_path(tvm_path, "python/tvm/stmt.py"),
46, 20)
assert(len(res) == 1)
assert(res[0]['uri'].endswith("api_ir.cc"))
assert(res[0]['range']['start']['line'] == 15)
# Get("relay.backend.lower") from c++ to python
res = run_find_definition(server,
join_path(tvm_path, "src/relay/backend/compile_engine.cc"),
74, 59)
assert(len(res) == 1)
assert(res[0]['uri'].endswith("_backend.py"))
assert(res[0]['range']['start']['line'] == 8)
# Variable
res = run_find_references(server,
join_path(tvm_path, "include/tvm/expr.h"),
15, 49)
assert(len(res) == 2)
assert(res[1]['uri'].endswith("expr.py"))
assert(res[1]['range']['start']['line'] == 15)
# TVM_REGISTER_GLOBAL("_min_value")
res = run_find_references(server,
join_path(tvm_path, "src/api/api_lang.cc"),
15, 33)
assert(len(res) == 2)
assert(res[1]['uri'].endswith("api.py"))
assert(res[1]['range']['start']['line'] == 24)
# _make.Constant
res = run_find_references(server,
join_path(tvm_path, "src/relay/ir/expr.cc"),
16, 33)
assert(len(res) == 2)
assert(res[1]['uri'].endswith("expr.py"))
assert(res[1]['range']['start']['line'] == 24)
# REGISTER_MAKE(ProducerConsumer)
res = run_find_references(server,
join_path(tvm_path, "src/api/api_ir.cc"),
14, 25)
assert(len(res) == 3)
assert(res[1]['uri'].endswith("stmt.py"))
assert(res[1]['range']['start']['line'] == 26)
# REGISTER_MAKE(LetStmt)
res = run_find_references(server,
join_path(tvm_path, "src/api/api_ir.cc"),
15, 18)
assert(len(res) == 2)
assert(res[1]['uri'].endswith("stmt.py"))
assert(res[1]['range']['start']['line'] == 46)
# @register_func("relay.backend.build")
res = run_find_references(server,
join_path(tvm_path, "python/tvm/relay/backend/_backend.py"),
26, 30)
assert(len(res) == 3)
assert(res[1]['uri'].endswith("compile_engine.cc"))
assert(res[1]['range']['start']['line'] == 90)
assert(res[2]['uri'].endswith("interpreter.cc"))
assert(res[2]['range']['start']['line'] == 115)
# _pass.Simplify(end - begin)
res = run_find_references(server,
join_path(tvm_path, "python/tvm/ir_builder.py"),
20, 48)
assert(len(res) == 6)
assert(res[0]['uri'].endswith("api_pass.cc"))
assert(res[0]['range']['start']['line'] == 10)
assert(res[1]['uri'].endswith(normalize_path("autotvm/util.py")))
assert(res[1]['range']['start']['line'] == 26)
assert(res[2]['uri'].endswith(normalize_path("autotvm/util.py")))
assert(res[2]['range']['start']['line'] == 50)
assert(res[3]['uri'].endswith("build_module.py"))
assert(res[3]['range']['start']['line'] == 98)
assert(res[4]['uri'].endswith(normalize_path("hybrid/parser.py")))
assert(res[4]['range']['start']['line'] == 43)
# REGISTER_MAKE(Provide);
res = run_find_references(server,
join_path(tvm_path, "src/api/api_ir.cc"),
16, 15)
assert(len(res) == 6)
assert(res[1]['uri'].endswith(normalize_path("hybrid/parser.py")))
assert(res[1]['range']['start']['line'] == 75)
assert(res[2]['uri'].endswith(normalize_path("hybrid/parser.py")))
assert(res[2]['range']['start']['line'] == 81)
assert(res[3]['uri'].endswith(normalize_path("hybrid/parser.py")))
assert(res[3]['range']['start']['line'] == 97)
assert(res[4]['uri'].endswith(normalize_path("hybrid/util.py")))
assert(res[4]['range']['start']['line'] == 20)
assert(res[5]['uri'].endswith("stmt.py"))
assert(res[5]['range']['start']['line'] == 68)
def test_real_repo():
# tested on tvm git tag e69bd1284b50630df570b3a5779a801982203756
tvm_path = os.path.join(curr_path, "..", "..", "..", "tvm")
if not os.path.exists(tvm_path):
logging.info("Skip tvm tests")
return
server = langserver.BaseServer()
server.m_initialize(rootUri=langserver.path2uri(tvm_path))
run_find_references(server,
join_path(tvm_path, "src/runtime/module.cc"),
198, 34)
run_find_references(server,
join_path(tvm_path, "python/tvm/api.py"),
58, 33)
run_find_definition(server,
join_path(tvm_path, "python/tvm/relay/expr.py"),
177, 14)
run_find_references(server,
join_path(tvm_path, "src/relay/ir/expr.cc"),
39, 33)
run_find_definition(server,
join_path(tvm_path, "python/tvm/stmt.py"),
96, 34)
run_find_references(server,
join_path(tvm_path, "python/tvm/stmt.py"),
96, 34)
run_find_definition(server,
join_path(tvm_path, "python/tvm/stmt.py"),
56, 18)
run_find_references(server,
join_path(tvm_path, "python/tvm/stmt.py"),
56, 18)
run_find_definition(server,
join_path(tvm_path, "src/relay/backend/compile_engine.cc"),
730, 59)
run_find_references(server,
join_path(tvm_path, "src/relay/backend/compile_engine.cc"),
730, 59)
# TVM_REGISTER_API("ir_pass.Simplify")
res = run_find_references(server,
join_path(tvm_path, "src/api/api_pass.cc"),
33, 30)
assert(len(res) == 6)
# _pass.Simplify(end - begin)
res = run_find_references(server,
join_path(tvm_path, "python/tvm/ir_builder.py"),
214, 48)
assert(len(res) == 6)
# REGISTER_MAKE(Provide);
res = run_find_references(server,
join_path(tvm_path, "src/api/api_ir.cc"),
156, 15)
assert(len(res) == 6)
#test_real_repo()
test_dummy_repo()
def test_torch_dialect():
pytorch_path = os.path.join(curr_path, "..", "dummy_repo", "pytorch")
server = langserver.BaseServer()
uri = langserver.path2uri(pytorch_path)
server.m_initialize(rootUri=uri)
# ops.quantized.conv2d
res = run_find_definition(server,
join_path(pytorch_path, "torch/nn/quantized/modules/conv.py"),
38, 28)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("qconv.cpp"))
assert(res[0]['range']['start']['line'] == 2)
# torch._C._jit_script_class_compile
res = run_find_definition(server,
join_path(pytorch_path, "torch/jit/__init__.py"),
20, 50)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("init.cpp"))
assert(res[0]['range']['start']['line'] == 126)
# torch._C.CompilationUnit()
res = run_find_definition(server,
join_path(pytorch_path, "torch/jit/__init__.py"),
25, 30)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("init.cpp"))
assert(res[0]['range']['start']['line'] == 1)
assert(res[0]['range']['end']['character'] == 27)
# torch.conv2d
res = run_find_definition(server,
join_path(pytorch_path, "torch/nn/functional.py"),
16, 30)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("python_torch_functions.cpp"))
assert(res[0]['range']['start']['line'] == 2)
# module._c._create_method_from_trace
res = run_find_definition(server,
join_path(pytorch_path, "torch/jit/__init__.py"),
61, 30)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("init.cpp"))
assert(res[0]['range']['start']['line'] == 105)
# self._c._get_method(attr)
res = run_find_definition(server,
join_path(pytorch_path, "torch/jit/__init__.py"),
106, 30)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("init.cpp"))
assert(res[0]['range']['start']['line'] == 21)
# self._c._define(self._concrete_type, src, rcb)
res = run_find_definition(server,
join_path(pytorch_path, "torch/jit/__init__.py"),
98, 18)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("init.cpp"))
assert(res[0]['range']['start']['line'] == 94)
# Variable._execution_engine.run_backward
res = run_find_definition(server,
join_path(pytorch_path, "torch/autograd/__init__.py"),
24, 40)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("python_engine.cpp"))
assert(res[0]['range']['start']['line'] == 13)
# _C._FunctionBase._do_forward
res = run_find_definition(server,
join_path(pytorch_path, "torch/autograd/function.py"),
5, 40)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("python_function.cpp"))
assert(res[0]['range']['start']['line'] == 11)
# torch._C._get_qengine()
res = run_find_definition(server,
join_path(pytorch_path, "torch/backends/quantized/__init__.py"),
6, 45)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("Module.cpp"))
assert(res[0]['range']['start']['line'] == 46)
def test_mxnet_dialect():
mx_path = os.path.join(curr_path, "..", "dummy_repo", "mxnet")
server = langserver.BaseServer()
uri = langserver.path2uri(mx_path)
server.m_initialize(rootUri=uri)
res = run_find_definition(server,
join_path(mx_path, "python/mxnet/executor.py"),
55, 35)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("c_api_executor.cc"))
assert(res[0]['range']['start']['line'] == 25)
def test_dgl_dialect():
dgl_path = os.path.join(curr_path, "..", "dummy_repo", "dgl")
server = langserver.BaseServer()
uri = langserver.path2uri(dgl_path)
server.m_initialize(rootUri=uri)
res = run_find_definition(server,
join_path(dgl_path, "python/dgl/nodeflow.py"),
16, 20)
# assert(len(res) > 0)
def test_taichi_dialect():
ti_path = os.path.join(curr_path, "..", "dummy_repo", "taichi")
server = langserver.BaseServer()
uri = langserver.path2uri(ti_path)
server.m_initialize(rootUri=uri)
# ti.core.global_var_expr_from_snode
res = run_find_definition(server,
join_path(ti_path, "python/taichi/lang/snode.py"),
4, 40)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("python_bindings.cpp"))
assert(res[0]['range']['start']['line'] == 8)
# taichi_lang_core.create_kernel
res = run_find_definition(server,
join_path(ti_path, "python/taichi/lang/kernel.py"),
74, 40)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("python_bindings.cpp"))
assert(res[0]['range']['start']['line'] == 11)
# tc_core.Array2DVector4
res = run_find_definition(server,
join_path(ti_path, "python/taichi/misc/util.py"),
34, 40)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("export_math.cpp"))
assert(res[0]['range']['start']['line'] == 12)
# core.get_current_program()
res = run_find_definition(server,
join_path(ti_path, "python/taichi/lang/__init__.py"),
10, 30)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("python_bindings.cpp"))
assert(res[0]['range']['start']['line'] == 15)
if __name__ == "__main__":
# eyeballing test script
logging.basicConfig(level=logging.INFO, format="[%(asctime)-15s] %(message)s")
test_tvm_dialect()
test_torch_dialect()
test_mxnet_dialect()
test_dgl_dialect()
test_taichi_dialect() | tests/python/test_langserver.py | from ffi_navigator import langserver
from ffi_navigator.util import join_path, normalize_path
import logging
import os
curr_path = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))
def run_find_definition(server, path, line, character):
uri = langserver.path2uri(path)
res = server.m_text_document__definition(
textDocument={"uri": uri},
position={"line": line, "character": character })
return res
def run_find_references(server, path, line, character):
uri = langserver.path2uri(path)
res = server.m_text_document__references(
textDocument={"uri": uri},
position={"line": line, "character": character })
return res
def test_tvm_dialect():
def test_dummy_repo():
# test and verify against dummy repo
tvm_path = os.path.join(curr_path, "..", "dummy_repo", "tvm")
server = langserver.BaseServer()
server.m_initialize(rootUri=langserver.path2uri(tvm_path))
# Constant
res = run_find_definition(server,
join_path(tvm_path, "python/tvm/relay/expr.py"),
15, 14)
assert(len(res) == 1)
assert(res[0]['uri'].endswith("expr.h"))
assert(res[0]['range']['start']['line'] == 33)
# _make.ProducerConsumer
res = run_find_definition(server,
join_path(tvm_path, "python/tvm/stmt.py"),
26, 30)
assert(len(res) == 1)
assert(res[0]['uri'].endswith("api_ir.cc"))
assert(res[0]['range']['start']['line'] == 14)
# _make.LetStmt
res = run_find_definition(server,
join_path(tvm_path, "python/tvm/stmt.py"),
46, 20)
assert(len(res) == 1)
assert(res[0]['uri'].endswith("api_ir.cc"))
assert(res[0]['range']['start']['line'] == 15)
# Get("relay.backend.lower") from c++ to python
res = run_find_definition(server,
join_path(tvm_path, "src/relay/backend/compile_engine.cc"),
74, 59)
assert(len(res) == 1)
assert(res[0]['uri'].endswith("_backend.py"))
assert(res[0]['range']['start']['line'] == 8)
# Variable
res = run_find_references(server,
join_path(tvm_path, "include/tvm/expr.h"),
15, 49)
assert(len(res) == 2)
assert(res[1]['uri'].endswith("expr.py"))
assert(res[1]['range']['start']['line'] == 15)
# TVM_REGISTER_GLOBAL("_min_value")
res = run_find_references(server,
join_path(tvm_path, "src/api/api_lang.cc"),
15, 33)
assert(len(res) == 2)
assert(res[1]['uri'].endswith("api.py"))
assert(res[1]['range']['start']['line'] == 24)
# _make.Constant
res = run_find_references(server,
join_path(tvm_path, "src/relay/ir/expr.cc"),
16, 33)
assert(len(res) == 2)
assert(res[1]['uri'].endswith("expr.py"))
assert(res[1]['range']['start']['line'] == 24)
# REGISTER_MAKE(ProducerConsumer)
res = run_find_references(server,
join_path(tvm_path, "src/api/api_ir.cc"),
14, 25)
assert(len(res) == 3)
assert(res[1]['uri'].endswith("stmt.py"))
assert(res[1]['range']['start']['line'] == 26)
# REGISTER_MAKE(LetStmt)
res = run_find_references(server,
join_path(tvm_path, "src/api/api_ir.cc"),
15, 18)
assert(len(res) == 2)
assert(res[1]['uri'].endswith("stmt.py"))
assert(res[1]['range']['start']['line'] == 46)
# @register_func("relay.backend.build")
res = run_find_references(server,
join_path(tvm_path, "python/tvm/relay/backend/_backend.py"),
26, 30)
assert(len(res) == 3)
assert(res[1]['uri'].endswith("compile_engine.cc"))
assert(res[1]['range']['start']['line'] == 90)
assert(res[2]['uri'].endswith("interpreter.cc"))
assert(res[2]['range']['start']['line'] == 115)
# _pass.Simplify(end - begin)
res = run_find_references(server,
join_path(tvm_path, "python/tvm/ir_builder.py"),
20, 48)
assert(len(res) == 6)
assert(res[0]['uri'].endswith("api_pass.cc"))
assert(res[0]['range']['start']['line'] == 10)
assert(res[1]['uri'].endswith(normalize_path("autotvm/util.py")))
assert(res[1]['range']['start']['line'] == 26)
assert(res[2]['uri'].endswith(normalize_path("autotvm/util.py")))
assert(res[2]['range']['start']['line'] == 50)
assert(res[3]['uri'].endswith("build_module.py"))
assert(res[3]['range']['start']['line'] == 98)
assert(res[4]['uri'].endswith(normalize_path("hybrid/parser.py")))
assert(res[4]['range']['start']['line'] == 43)
# REGISTER_MAKE(Provide);
res = run_find_references(server,
join_path(tvm_path, "src/api/api_ir.cc"),
16, 15)
assert(len(res) == 6)
assert(res[1]['uri'].endswith(normalize_path("hybrid/parser.py")))
assert(res[1]['range']['start']['line'] == 75)
assert(res[2]['uri'].endswith(normalize_path("hybrid/parser.py")))
assert(res[2]['range']['start']['line'] == 81)
assert(res[3]['uri'].endswith(normalize_path("hybrid/parser.py")))
assert(res[3]['range']['start']['line'] == 97)
assert(res[4]['uri'].endswith(normalize_path("hybrid/util.py")))
assert(res[4]['range']['start']['line'] == 20)
assert(res[5]['uri'].endswith("stmt.py"))
assert(res[5]['range']['start']['line'] == 68)
def test_real_repo():
# tested on tvm git tag e69bd1284b50630df570b3a5779a801982203756
tvm_path = os.path.join(curr_path, "..", "..", "..", "tvm")
if not os.path.exists(tvm_path):
logging.info("Skip tvm tests")
return
server = langserver.BaseServer()
server.m_initialize(rootUri=langserver.path2uri(tvm_path))
run_find_references(server,
join_path(tvm_path, "src/runtime/module.cc"),
198, 34)
run_find_references(server,
join_path(tvm_path, "python/tvm/api.py"),
58, 33)
run_find_definition(server,
join_path(tvm_path, "python/tvm/relay/expr.py"),
177, 14)
run_find_references(server,
join_path(tvm_path, "src/relay/ir/expr.cc"),
39, 33)
run_find_definition(server,
join_path(tvm_path, "python/tvm/stmt.py"),
96, 34)
run_find_references(server,
join_path(tvm_path, "python/tvm/stmt.py"),
96, 34)
run_find_definition(server,
join_path(tvm_path, "python/tvm/stmt.py"),
56, 18)
run_find_references(server,
join_path(tvm_path, "python/tvm/stmt.py"),
56, 18)
run_find_definition(server,
join_path(tvm_path, "src/relay/backend/compile_engine.cc"),
730, 59)
run_find_references(server,
join_path(tvm_path, "src/relay/backend/compile_engine.cc"),
730, 59)
# TVM_REGISTER_API("ir_pass.Simplify")
res = run_find_references(server,
join_path(tvm_path, "src/api/api_pass.cc"),
33, 30)
assert(len(res) == 6)
# _pass.Simplify(end - begin)
res = run_find_references(server,
join_path(tvm_path, "python/tvm/ir_builder.py"),
214, 48)
assert(len(res) == 6)
# REGISTER_MAKE(Provide);
res = run_find_references(server,
join_path(tvm_path, "src/api/api_ir.cc"),
156, 15)
assert(len(res) == 6)
#test_real_repo()
test_dummy_repo()
def test_torch_dialect():
pytorch_path = os.path.join(curr_path, "..", "dummy_repo", "pytorch")
server = langserver.BaseServer()
uri = langserver.path2uri(pytorch_path)
server.m_initialize(rootUri=uri)
# ops.quantized.conv2d
res = run_find_definition(server,
join_path(pytorch_path, "torch/nn/quantized/modules/conv.py"),
38, 28)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("qconv.cpp"))
assert(res[0]['range']['start']['line'] == 2)
# torch._C._jit_script_class_compile
res = run_find_definition(server,
join_path(pytorch_path, "torch/jit/__init__.py"),
20, 50)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("init.cpp"))
assert(res[0]['range']['start']['line'] == 126)
# torch._C.CompilationUnit()
res = run_find_definition(server,
join_path(pytorch_path, "torch/jit/__init__.py"),
25, 30)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("init.cpp"))
assert(res[0]['range']['start']['line'] == 1)
assert(res[0]['range']['end']['character'] == 27)
# torch.conv2d
res = run_find_definition(server,
join_path(pytorch_path, "torch/nn/functional.py"),
16, 30)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("python_torch_functions.cpp"))
assert(res[0]['range']['start']['line'] == 2)
# module._c._create_method_from_trace
res = run_find_definition(server,
join_path(pytorch_path, "torch/jit/__init__.py"),
61, 30)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("init.cpp"))
assert(res[0]['range']['start']['line'] == 105)
# self._c._get_method(attr)
res = run_find_definition(server,
join_path(pytorch_path, "torch/jit/__init__.py"),
106, 30)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("init.cpp"))
assert(res[0]['range']['start']['line'] == 21)
# self._c._define(self._concrete_type, src, rcb)
res = run_find_definition(server,
join_path(pytorch_path, "torch/jit/__init__.py"),
98, 18)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("init.cpp"))
assert(res[0]['range']['start']['line'] == 94)
# Variable._execution_engine.run_backward
res = run_find_definition(server,
join_path(pytorch_path, "torch/autograd/__init__.py"),
24, 40)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("python_engine.cpp"))
assert(res[0]['range']['start']['line'] == 13)
# _C._FunctionBase._do_forward
res = run_find_definition(server,
join_path(pytorch_path, "torch/autograd/function.py"),
5, 40)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("python_function.cpp"))
assert(res[0]['range']['start']['line'] == 11)
# torch._C._get_qengine()
res = run_find_definition(server,
join_path(pytorch_path, "torch/backends/quantized/__init__.py"),
6, 45)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("Module.cpp"))
assert(res[0]['range']['start']['line'] == 46)
def test_mxnet_dialect():
mx_path = os.path.join(curr_path, "..", "dummy_repo", "mxnet")
server = langserver.BaseServer()
uri = langserver.path2uri(mx_path)
server.m_initialize(rootUri=uri)
res = run_find_definition(server,
join_path(mx_path, "python/mxnet/executor.py"),
55, 35)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("c_api_executor.cc"))
assert(res[0]['range']['start']['line'] == 25)
def test_dgl_dialect():
dgl_path = os.path.join(curr_path, "..", "dummy_repo", "dgl")
server = langserver.BaseServer()
uri = langserver.path2uri(dgl_path)
server.m_initialize(rootUri=uri)
res = run_find_definition(server,
join_path(dgl_path, "python/dgl/nodeflow.py"),
16, 20)
# assert(len(res) > 0)
def test_taichi_dialect():
ti_path = os.path.join(curr_path, "..", "dummy_repo", "taichi")
server = langserver.BaseServer()
uri = langserver.path2uri(ti_path)
server.m_initialize(rootUri=uri)
# ti.core.global_var_expr_from_snode
res = run_find_definition(server,
join_path(ti_path, "python/taichi/lang/snode.py"),
4, 40)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("python_bindings.cpp"))
assert(res[0]['range']['start']['line'] == 8)
# taichi_lang_core.create_kernel
res = run_find_definition(server,
join_path(ti_path, "python/taichi/lang/kernel.py"),
74, 40)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("python_bindings.cpp"))
assert(res[0]['range']['start']['line'] == 11)
# tc_core.Array2DVector4
res = run_find_definition(server,
join_path(ti_path, "python/taichi/misc/util.py"),
34, 40)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("export_math.cpp"))
assert(res[0]['range']['start']['line'] == 12)
# core.get_current_program()
res = run_find_definition(server,
join_path(ti_path, "python/taichi/lang/__init__.py"),
10, 30)
assert(len(res) > 0)
assert(res[0]['uri'].endswith("python_bindings.cpp"))
assert(res[0]['range']['start']['line'] == 15)
if __name__ == "__main__":
# eyeballing test script
logging.basicConfig(level=logging.INFO, format="[%(asctime)-15s] %(message)s")
test_tvm_dialect()
test_torch_dialect()
test_mxnet_dialect()
test_dgl_dialect()
test_taichi_dialect() | 0.393851 | 0.316184 |
from __future__ import absolute_import
from central.decoders import Decoder
from central.exceptions import DecoderError
from datetime import date, datetime, time
from unittest import TestCase
class TestDecoder(TestCase):
def test_get_instance(self):
self.assertEqual(Decoder, type(Decoder.instance()))
self.assertEqual(Decoder.instance(), Decoder.instance())
def test_get_converters(self):
self.assertEqual(dict, type(Decoder().converters))
def test_decode_with_none_as_value(self):
decoder = Decoder()
self.assertRaises(ValueError, decoder.decode, None, str)
self.assertRaises(ValueError, decoder.decode, 'aa', None)
def test_decode_with_invalid_cast(self):
decoder = Decoder()
self.assertRaises(DecoderError, decoder.decode, 'aa', 'invalid_cast')
def test_decode_with_non_possible_cast(self):
decoder = Decoder()
with self.assertRaises(DecoderError):
decoder.decode('aa', int)
def test_decode_bool_with_bool_value(self):
self.assertEqual(True, Decoder().decode(True, bool))
def test_decode_bool_with_true_values(self):
for value in Decoder.true_values:
self.assertEqual(True, Decoder().decode(value, bool))
def test_decode_bool_with_false_values(self):
for value in Decoder.false_values:
self.assertEqual(False, Decoder().decode(value, bool))
def test_decode_bool_with_dict_value(self):
with self.assertRaises(DecoderError):
Decoder().decode({}, bool)
def test_decode_float_with_str_value(self):
self.assertEqual(1.0, Decoder().decode('1', float))
def test_decode_float_with_int_value(self):
self.assertEqual(1.0, Decoder().decode(1, float))
def test_decode_float_with_dict_value(self):
with self.assertRaises(DecoderError):
Decoder().decode({}, float)
def test_decode_int_with_str_value(self):
self.assertEqual(1, Decoder().decode('1', int))
def test_decode_int_with_float_value(self):
self.assertEqual(1, Decoder().decode(1.0, float))
def test_decode_int_with_dict_value(self):
with self.assertRaises(DecoderError):
Decoder().decode({}, int)
def test_decode_str_with_int_value(self):
self.assertEqual('1', Decoder().decode(1, str))
def test_decode_str_with_float_value(self):
self.assertEqual('1.0', Decoder().decode(1.0, str))
def test_decode_str_with_dict_value(self):
self.assertEqual('{}', Decoder().decode({}, str))
def test_decode_list_with_list_value(self):
self.assertEqual([1], Decoder().decode([1], list))
def test_decode_list_with_tuple_value(self):
self.assertEqual([1], Decoder().decode((1,), list))
def test_decode_list_with_str_value(self):
self.assertEqual(['1', '2', '3'], Decoder().decode('1,2,3', list))
def test_decode_list_with_int_value(self):
with self.assertRaises(DecoderError):
Decoder().decode(1, list)
def test_decode_dict_with_dict_value(self):
self.assertEqual({'key': 1}, Decoder().decode({'key': 1}, dict))
def test_decode_dict_with_str_value(self):
self.assertEqual({'key1': '1', 'key2': '2'}, Decoder().decode('key1=1;key2=2', dict))
def test_decode_dict_with_int_value(self):
with self.assertRaises(DecoderError):
Decoder().decode(1, dict)
def test_decode_date_with_date_value(self):
d = date(2017, 3, 8)
self.assertEqual(d, Decoder().decode(d, date))
def test_decode_date_with_datetime_value(self):
d = datetime(2017, 3, 8, 14, 42, 30)
self.assertEqual(d.date(), Decoder().decode(d, date))
def test_decode_date_with_int_value(self):
with self.assertRaises(DecoderError):
Decoder().decode(1, date)
def test_decode_date_with_str_value(self):
with self.assertRaises(DecoderError):
Decoder().decode('some value', date)
def test_decode_date_with_iso_str_value(self):
expected = date(2017, 3, 8)
s = expected.isoformat()
self.assertEqual(expected, Decoder().decode(s, date))
def test_decode_datetime_with_date_value(self):
d = datetime.now().date()
expected = datetime(d.year, d.month, d.day)
self.assertEqual(expected, Decoder().decode(d, datetime))
def test_decode_datetime_with_datetime_value(self):
d = datetime.now()
self.assertEqual(d, Decoder().decode(d, datetime))
def test_decode_datetime_with_int_value(self):
with self.assertRaises(DecoderError):
Decoder().decode(1, datetime)
def test_decode_datetime_with_str_value(self):
with self.assertRaises(DecoderError):
Decoder().decode('some value', datetime)
def test_decode_datetime_with_iso_str_value(self):
expected = datetime(2017, 3, 8, 12, 40, 30)
s = expected.isoformat()
self.assertEqual(expected, Decoder().decode(s, datetime))
def test_decode_time_with_time_value(self):
expected = time(12, 40, 30)
self.assertEqual(expected, Decoder().decode(expected, time))
def test_decode_time_with_datetime_value(self):
d = datetime(2017, 3, 8, 12, 40, 30)
expected = time(12, 40, 30)
self.assertEqual(expected, Decoder().decode(d, time))
def test_decode_time_with_int_value(self):
with self.assertRaises(DecoderError):
Decoder().decode(1, time)
def test_decode_time_with_str_value(self):
with self.assertRaises(DecoderError):
Decoder().decode('some value', time)
def test_decode_time_with_iso_str_value(self):
expected = time(12, 40, 30)
s = expected.isoformat()
self.assertEqual(expected, Decoder().decode(s, time))
expected = time(12, 40, 30, 800)
s = expected.isoformat()
self.assertEqual(expected, Decoder().decode(s, time)) | tests/test_decoders.py | from __future__ import absolute_import
from central.decoders import Decoder
from central.exceptions import DecoderError
from datetime import date, datetime, time
from unittest import TestCase
class TestDecoder(TestCase):
def test_get_instance(self):
self.assertEqual(Decoder, type(Decoder.instance()))
self.assertEqual(Decoder.instance(), Decoder.instance())
def test_get_converters(self):
self.assertEqual(dict, type(Decoder().converters))
def test_decode_with_none_as_value(self):
decoder = Decoder()
self.assertRaises(ValueError, decoder.decode, None, str)
self.assertRaises(ValueError, decoder.decode, 'aa', None)
def test_decode_with_invalid_cast(self):
decoder = Decoder()
self.assertRaises(DecoderError, decoder.decode, 'aa', 'invalid_cast')
def test_decode_with_non_possible_cast(self):
decoder = Decoder()
with self.assertRaises(DecoderError):
decoder.decode('aa', int)
def test_decode_bool_with_bool_value(self):
self.assertEqual(True, Decoder().decode(True, bool))
def test_decode_bool_with_true_values(self):
for value in Decoder.true_values:
self.assertEqual(True, Decoder().decode(value, bool))
def test_decode_bool_with_false_values(self):
for value in Decoder.false_values:
self.assertEqual(False, Decoder().decode(value, bool))
def test_decode_bool_with_dict_value(self):
with self.assertRaises(DecoderError):
Decoder().decode({}, bool)
def test_decode_float_with_str_value(self):
self.assertEqual(1.0, Decoder().decode('1', float))
def test_decode_float_with_int_value(self):
self.assertEqual(1.0, Decoder().decode(1, float))
def test_decode_float_with_dict_value(self):
with self.assertRaises(DecoderError):
Decoder().decode({}, float)
def test_decode_int_with_str_value(self):
self.assertEqual(1, Decoder().decode('1', int))
def test_decode_int_with_float_value(self):
self.assertEqual(1, Decoder().decode(1.0, float))
def test_decode_int_with_dict_value(self):
with self.assertRaises(DecoderError):
Decoder().decode({}, int)
def test_decode_str_with_int_value(self):
self.assertEqual('1', Decoder().decode(1, str))
def test_decode_str_with_float_value(self):
self.assertEqual('1.0', Decoder().decode(1.0, str))
def test_decode_str_with_dict_value(self):
self.assertEqual('{}', Decoder().decode({}, str))
def test_decode_list_with_list_value(self):
self.assertEqual([1], Decoder().decode([1], list))
def test_decode_list_with_tuple_value(self):
self.assertEqual([1], Decoder().decode((1,), list))
def test_decode_list_with_str_value(self):
self.assertEqual(['1', '2', '3'], Decoder().decode('1,2,3', list))
def test_decode_list_with_int_value(self):
with self.assertRaises(DecoderError):
Decoder().decode(1, list)
def test_decode_dict_with_dict_value(self):
self.assertEqual({'key': 1}, Decoder().decode({'key': 1}, dict))
def test_decode_dict_with_str_value(self):
self.assertEqual({'key1': '1', 'key2': '2'}, Decoder().decode('key1=1;key2=2', dict))
def test_decode_dict_with_int_value(self):
with self.assertRaises(DecoderError):
Decoder().decode(1, dict)
def test_decode_date_with_date_value(self):
d = date(2017, 3, 8)
self.assertEqual(d, Decoder().decode(d, date))
def test_decode_date_with_datetime_value(self):
d = datetime(2017, 3, 8, 14, 42, 30)
self.assertEqual(d.date(), Decoder().decode(d, date))
def test_decode_date_with_int_value(self):
with self.assertRaises(DecoderError):
Decoder().decode(1, date)
def test_decode_date_with_str_value(self):
with self.assertRaises(DecoderError):
Decoder().decode('some value', date)
def test_decode_date_with_iso_str_value(self):
expected = date(2017, 3, 8)
s = expected.isoformat()
self.assertEqual(expected, Decoder().decode(s, date))
def test_decode_datetime_with_date_value(self):
d = datetime.now().date()
expected = datetime(d.year, d.month, d.day)
self.assertEqual(expected, Decoder().decode(d, datetime))
def test_decode_datetime_with_datetime_value(self):
d = datetime.now()
self.assertEqual(d, Decoder().decode(d, datetime))
def test_decode_datetime_with_int_value(self):
with self.assertRaises(DecoderError):
Decoder().decode(1, datetime)
def test_decode_datetime_with_str_value(self):
with self.assertRaises(DecoderError):
Decoder().decode('some value', datetime)
def test_decode_datetime_with_iso_str_value(self):
expected = datetime(2017, 3, 8, 12, 40, 30)
s = expected.isoformat()
self.assertEqual(expected, Decoder().decode(s, datetime))
def test_decode_time_with_time_value(self):
expected = time(12, 40, 30)
self.assertEqual(expected, Decoder().decode(expected, time))
def test_decode_time_with_datetime_value(self):
d = datetime(2017, 3, 8, 12, 40, 30)
expected = time(12, 40, 30)
self.assertEqual(expected, Decoder().decode(d, time))
def test_decode_time_with_int_value(self):
with self.assertRaises(DecoderError):
Decoder().decode(1, time)
def test_decode_time_with_str_value(self):
with self.assertRaises(DecoderError):
Decoder().decode('some value', time)
def test_decode_time_with_iso_str_value(self):
expected = time(12, 40, 30)
s = expected.isoformat()
self.assertEqual(expected, Decoder().decode(s, time))
expected = time(12, 40, 30, 800)
s = expected.isoformat()
self.assertEqual(expected, Decoder().decode(s, time)) | 0.779867 | 0.604778 |
import os
import sys
import environs
import re
import datetime
import numpy as np
import math
import locale
import pandas as pd
from pandas import DataFrame
import click
import lib.utils as utils
from config import get_configs_by_filename
from zephir_db_utils import createZephirItemDetailsFileFromDB
from zephir_db_utils import find_marcxml_records_by_htid
from output_zephir_records import output_xmlrecords_by_htid
def test_zephir_search(db_connect_str):
id = "pur1.32754075735872"
results = find_marcxml_records_by_htid(db_connect_str, id)
for result in results:
print (result)
@click.command()
@click.option('-e', '--env', default="dev")
@click.option('-H', '--input-htid-file')
@click.option('-S', '--search-zephir-database', is_flag=True, help="Get Zephir items data from database.")
@click.option('-Z', '--zephir-items-file', default="./data/zephir_items.csv")
@click.option('-C', '--oclc-concordance-file', default="./data/zephir_concordance.csv")
@click.option('-o', '--output-marc-file', default="./output/marc_records_for_reload.xml")
@click.option('-c', '--output-cid-file', default="./output/cids_for_auto_split.txt")
def main(env, input_htid_file, search_zephir_database,
zephir_items_file, oclc_concordance_file, output_marc_file, output_cid_file):
print(env)
print(input_htid_file)
print(search_zephir_database)
print(zephir_items_file)
print(oclc_concordance_file)
print(output_marc_file)
print(output_cid_file)
configs= get_configs_by_filename('config', 'zephir_db')
db_connect_str = str(utils.db_connect_url(configs[env]))
#test_zephir_search(db_connect_str)
#exit()
if input_htid_file:
print("Output marc records from HTIDs defined in: {}".format(input_htid_file))
output_xmlrecords_by_htid(input_htid_file, output_marc_file, db_connect_str)
print("The marcxml records are save in file: {}".format(output_marc_file))
print("Finished processing.")
exit()
if search_zephir_database:
print("Get Zephir item details from the database")
print("Data will be saved in file {}".format(zephir_items_file))
createZephirItemDetailsFileFromDB(db_connect_str, zephir_items_file)
else:
print("Get Zephir item details from prepared file: {}".format(zephir_items_file))
print("Zephir item data contains fields: cid, oclc, contribsys_id, htid, z_record_autoid")
print("The data file does not contain a header line.")
f_output_split_clusters(zephir_items_file, oclc_concordance_file, output_cid_file)
print("CIDs for auto-split are saved in file: {}".format(output_cid_file))
def f_output_split_clusters(zephir_items_file, oclc_concordance_file, output_cid_file):
# memory use: 543 MB
print("")
print("Read in data to DF: cid, oclc, contribsys_id")
raw_zephir_item_detail = pd.read_csv(zephir_items_file, header=0, usecols=[0, 1, 2], names=["cid", "oclc", "contribsys_id"], dtype={"cid":int, "oclc":object, "contribsys_id":object}, error_bad_lines=False)
print("raw_zephir_item_detail: before drop duplicates")
print(raw_zephir_item_detail.info())
print(raw_zephir_item_detail.head())
raw_zephir_item_detail.drop_duplicates(inplace=True)
print("raw_zephir_item_detail: after drop duplicates")
print(raw_zephir_item_detail.info())
print(raw_zephir_item_detail.head())
# memory use: 724 MB
print("")
print("Cleanup Data")
zephir_item_detail = cleanupData(raw_zephir_item_detail)
del raw_zephir_item_detail
zephir_item_detail.drop_duplicates(inplace=True)
print("zephir_item_detail: after drop duplicates")
print(zephir_item_detail.info())
print(zephir_item_detail.head())
# 150 MB
print("")
print("Get Concordance data")
zephir_concordance_df = readCsvFileToDataFrame(oclc_concordance_file)
print(zephir_concordance_df.loc[zephir_concordance_df['primary'] == 569])
print(zephir_concordance_df.loc[zephir_concordance_df['oclc'] == 569])
# 980 MB
print("")
print("Join data frames")
analysis_df = createAnalysisDataframe(zephir_concordance_df, zephir_item_detail)
del zephir_item_detail
del zephir_concordance_df
analysis_df.drop_duplicates(inplace=True)
print("analysis_df: after drop duplicates")
print(analysis_df.info())
print(analysis_df.head(10))
print(analysis_df.loc[analysis_df['cid'] == 11])
print("")
print("Find CIDs with multiple OCLC primary numbers - Full Collection")
cids_with_multi_primary_fc_df = findCIDsWithMultipleOCNs(analysis_df)
print("")
print("Find Contributor system IDs with multiple OCLC primary numbers")
contribsys_ids_with_multi_primary_fc_df = findContribIDsWithWithMultipleOCNs(analysis_df)
del analysis_df
print("")
print("CIDs with multiple OCLCs, but no overlapping contribsys records")
auto_splitable_cids = subsetCIDsWithMultipleOCNs(cids_with_multi_primary_fc_df, contribsys_ids_with_multi_primary_fc_df)
del cids_with_multi_primary_fc_df
del contribsys_ids_with_multi_primary_fc_df
with open(output_cid_file, "w") as output_f:
for ind in auto_splitable_cids.index :
output_f.write("cid=" + ("000000000" + str(auto_splitable_cids["cid"][ind]))[-9:] + "\n")
def readCsvFileToDataFrame(file_path):
zephir_concordance_df = pd.read_csv(file_path, names=["oclc", "primary"], header=0)
print(zephir_concordance_df.info())
print(zephir_concordance_df.head())
return zephir_concordance_df
def cleanupData(zephir_item_detail):
#Step 5 - CLEANUP DATA
# coerce identifier data from objects, to numeric
zephir_item_detail["oclc"] = zephir_item_detail["oclc"].apply(lambda x: int(x) if str(x).isdigit() else None)
zephir_item_detail["oclc"] = zephir_item_detail["oclc"].apply(pd.to_numeric, errors='coerce')
# drop null rows
zephir_item_detail = zephir_item_detail.dropna()
# cast data as integers (the "to_numberic" causes change in type) - drops leading zeros
zephir_item_detail["oclc"] = zephir_item_detail["oclc"].astype(int)
print(zephir_item_detail.info())
print(zephir_item_detail.head())
return zephir_item_detail
def createAnalysisDataframe(zephir_concordance_df, zephir_item_detail):
print("## Step 6 - Analysis - join DFs by oclc")
# Create analysis table by joining with zephir concordance
# this results in a zephir table with both the original oclc and the primary oclc
analysis_df = zephir_item_detail.merge(zephir_concordance_df, on='oclc', how='left')
analysis_df["oclc"] = analysis_df["oclc"].astype('Int64')
analysis_df["primary"] = analysis_df["primary"].astype('Int64')
print(analysis_df.info())
print(analysis_df.head())
return analysis_df
def findCIDsWithMultipleOCNs(analysis_df):
print("## Step 7 - find CIDs with multiple OCNs")
df = analysis_df.copy()
df = df[['cid','primary']]
df = df.dropna()
df = df[~df.duplicated(subset=['cid','primary'],keep='first')]
fc_cids = len(df["cid"].unique())
df = df.groupby(["cid"]).size().reset_index().rename(columns={0:'primary_count'})
df = df[df['primary_count'] > 1]
print(df.info())
print(df.head())
return df
def findContribIDsWithWithMultipleOCNs(analysis_df):
print("## Step 8 - Contributor system IDs with multiple OCLC primary numbers")
df = analysis_df.copy()
df = df[['cid','primary','contribsys_id']]
df = df.dropna()
df = df[~df.duplicated(subset=['cid','primary','contribsys_id'],keep='first')]
df = df.groupby(["contribsys_id","cid"]).size().reset_index().rename(columns={0:'primary_count'})
df = df[df['primary_count'] > 1]
print(df.info())
print(df.head())
return df
def subsetCIDsWithMultipleOCNs(cid_df, contrib_df):
df = cid_df[~cid_df["cid"].isin(contrib_df["cid"].unique())]
print(df.info())
print(df.head())
return df
if __name__ == '__main__':
main()
#temp_run_output_xml_only() | marctools/output_zephir_records_for_auto_split.py | import os
import sys
import environs
import re
import datetime
import numpy as np
import math
import locale
import pandas as pd
from pandas import DataFrame
import click
import lib.utils as utils
from config import get_configs_by_filename
from zephir_db_utils import createZephirItemDetailsFileFromDB
from zephir_db_utils import find_marcxml_records_by_htid
from output_zephir_records import output_xmlrecords_by_htid
def test_zephir_search(db_connect_str):
id = "pur1.32754075735872"
results = find_marcxml_records_by_htid(db_connect_str, id)
for result in results:
print (result)
@click.command()
@click.option('-e', '--env', default="dev")
@click.option('-H', '--input-htid-file')
@click.option('-S', '--search-zephir-database', is_flag=True, help="Get Zephir items data from database.")
@click.option('-Z', '--zephir-items-file', default="./data/zephir_items.csv")
@click.option('-C', '--oclc-concordance-file', default="./data/zephir_concordance.csv")
@click.option('-o', '--output-marc-file', default="./output/marc_records_for_reload.xml")
@click.option('-c', '--output-cid-file', default="./output/cids_for_auto_split.txt")
def main(env, input_htid_file, search_zephir_database,
zephir_items_file, oclc_concordance_file, output_marc_file, output_cid_file):
print(env)
print(input_htid_file)
print(search_zephir_database)
print(zephir_items_file)
print(oclc_concordance_file)
print(output_marc_file)
print(output_cid_file)
configs= get_configs_by_filename('config', 'zephir_db')
db_connect_str = str(utils.db_connect_url(configs[env]))
#test_zephir_search(db_connect_str)
#exit()
if input_htid_file:
print("Output marc records from HTIDs defined in: {}".format(input_htid_file))
output_xmlrecords_by_htid(input_htid_file, output_marc_file, db_connect_str)
print("The marcxml records are save in file: {}".format(output_marc_file))
print("Finished processing.")
exit()
if search_zephir_database:
print("Get Zephir item details from the database")
print("Data will be saved in file {}".format(zephir_items_file))
createZephirItemDetailsFileFromDB(db_connect_str, zephir_items_file)
else:
print("Get Zephir item details from prepared file: {}".format(zephir_items_file))
print("Zephir item data contains fields: cid, oclc, contribsys_id, htid, z_record_autoid")
print("The data file does not contain a header line.")
f_output_split_clusters(zephir_items_file, oclc_concordance_file, output_cid_file)
print("CIDs for auto-split are saved in file: {}".format(output_cid_file))
def f_output_split_clusters(zephir_items_file, oclc_concordance_file, output_cid_file):
# memory use: 543 MB
print("")
print("Read in data to DF: cid, oclc, contribsys_id")
raw_zephir_item_detail = pd.read_csv(zephir_items_file, header=0, usecols=[0, 1, 2], names=["cid", "oclc", "contribsys_id"], dtype={"cid":int, "oclc":object, "contribsys_id":object}, error_bad_lines=False)
print("raw_zephir_item_detail: before drop duplicates")
print(raw_zephir_item_detail.info())
print(raw_zephir_item_detail.head())
raw_zephir_item_detail.drop_duplicates(inplace=True)
print("raw_zephir_item_detail: after drop duplicates")
print(raw_zephir_item_detail.info())
print(raw_zephir_item_detail.head())
# memory use: 724 MB
print("")
print("Cleanup Data")
zephir_item_detail = cleanupData(raw_zephir_item_detail)
del raw_zephir_item_detail
zephir_item_detail.drop_duplicates(inplace=True)
print("zephir_item_detail: after drop duplicates")
print(zephir_item_detail.info())
print(zephir_item_detail.head())
# 150 MB
print("")
print("Get Concordance data")
zephir_concordance_df = readCsvFileToDataFrame(oclc_concordance_file)
print(zephir_concordance_df.loc[zephir_concordance_df['primary'] == 569])
print(zephir_concordance_df.loc[zephir_concordance_df['oclc'] == 569])
# 980 MB
print("")
print("Join data frames")
analysis_df = createAnalysisDataframe(zephir_concordance_df, zephir_item_detail)
del zephir_item_detail
del zephir_concordance_df
analysis_df.drop_duplicates(inplace=True)
print("analysis_df: after drop duplicates")
print(analysis_df.info())
print(analysis_df.head(10))
print(analysis_df.loc[analysis_df['cid'] == 11])
print("")
print("Find CIDs with multiple OCLC primary numbers - Full Collection")
cids_with_multi_primary_fc_df = findCIDsWithMultipleOCNs(analysis_df)
print("")
print("Find Contributor system IDs with multiple OCLC primary numbers")
contribsys_ids_with_multi_primary_fc_df = findContribIDsWithWithMultipleOCNs(analysis_df)
del analysis_df
print("")
print("CIDs with multiple OCLCs, but no overlapping contribsys records")
auto_splitable_cids = subsetCIDsWithMultipleOCNs(cids_with_multi_primary_fc_df, contribsys_ids_with_multi_primary_fc_df)
del cids_with_multi_primary_fc_df
del contribsys_ids_with_multi_primary_fc_df
with open(output_cid_file, "w") as output_f:
for ind in auto_splitable_cids.index :
output_f.write("cid=" + ("000000000" + str(auto_splitable_cids["cid"][ind]))[-9:] + "\n")
def readCsvFileToDataFrame(file_path):
zephir_concordance_df = pd.read_csv(file_path, names=["oclc", "primary"], header=0)
print(zephir_concordance_df.info())
print(zephir_concordance_df.head())
return zephir_concordance_df
def cleanupData(zephir_item_detail):
#Step 5 - CLEANUP DATA
# coerce identifier data from objects, to numeric
zephir_item_detail["oclc"] = zephir_item_detail["oclc"].apply(lambda x: int(x) if str(x).isdigit() else None)
zephir_item_detail["oclc"] = zephir_item_detail["oclc"].apply(pd.to_numeric, errors='coerce')
# drop null rows
zephir_item_detail = zephir_item_detail.dropna()
# cast data as integers (the "to_numberic" causes change in type) - drops leading zeros
zephir_item_detail["oclc"] = zephir_item_detail["oclc"].astype(int)
print(zephir_item_detail.info())
print(zephir_item_detail.head())
return zephir_item_detail
def createAnalysisDataframe(zephir_concordance_df, zephir_item_detail):
print("## Step 6 - Analysis - join DFs by oclc")
# Create analysis table by joining with zephir concordance
# this results in a zephir table with both the original oclc and the primary oclc
analysis_df = zephir_item_detail.merge(zephir_concordance_df, on='oclc', how='left')
analysis_df["oclc"] = analysis_df["oclc"].astype('Int64')
analysis_df["primary"] = analysis_df["primary"].astype('Int64')
print(analysis_df.info())
print(analysis_df.head())
return analysis_df
def findCIDsWithMultipleOCNs(analysis_df):
print("## Step 7 - find CIDs with multiple OCNs")
df = analysis_df.copy()
df = df[['cid','primary']]
df = df.dropna()
df = df[~df.duplicated(subset=['cid','primary'],keep='first')]
fc_cids = len(df["cid"].unique())
df = df.groupby(["cid"]).size().reset_index().rename(columns={0:'primary_count'})
df = df[df['primary_count'] > 1]
print(df.info())
print(df.head())
return df
def findContribIDsWithWithMultipleOCNs(analysis_df):
print("## Step 8 - Contributor system IDs with multiple OCLC primary numbers")
df = analysis_df.copy()
df = df[['cid','primary','contribsys_id']]
df = df.dropna()
df = df[~df.duplicated(subset=['cid','primary','contribsys_id'],keep='first')]
df = df.groupby(["contribsys_id","cid"]).size().reset_index().rename(columns={0:'primary_count'})
df = df[df['primary_count'] > 1]
print(df.info())
print(df.head())
return df
def subsetCIDsWithMultipleOCNs(cid_df, contrib_df):
df = cid_df[~cid_df["cid"].isin(contrib_df["cid"].unique())]
print(df.info())
print(df.head())
return df
if __name__ == '__main__':
main()
#temp_run_output_xml_only() | 0.108732 | 0.066539 |
from flask import Module, request, abort, make_response, Response, jsonify, g, send_from_directory, current_app
from werkzeug import secure_filename
from p2ptracker import bencode
import logging
import hashlib
import os
from datetime import datetime
torrents = Module(__name__, url_prefix='/torrents')
log = logging.getLogger('hyves.p2ptracker.torrent')
@torrents.route('/<filename>.<ext>', methods=['DELETE'])
def delete(filename, ext):
torrentfile = "%s.%s" % (filename, ext)
if torrentfile not in get_all_torrents():
log.debug('torrentfile: %s not in %s' % (torrentfile, get_all_torrents()))
return abort(404, 'no such torrent')
if os.path.exists(os.path.join(current_app.config['UPLOAD_PATH'], torrentfile)):
log.debug('Deleting file: %s' % torrentfile)
os.remove(os.path.join(current_app.config['UPLOAD_PATH'], torrentfile))
return make_response(Response('', status=200))
return abort(500, 'something borked horribly')
@torrents.route('/<filename>.<ext>', methods=['POST'])
def post(filename, ext):
"""
Pull filename for torrent and actually uploaded torrentfile from request and register in redis
We add the torrent file to the all torrents set in redis and
register the torrentfile name along with the associated hash
"""
log.debug("Entering post method")
assert isinstance(filename, basestring)
assert isinstance(ext, basestring)
torrentfile = filename + '.' + ext
log.debug("torrentfile: %s" % torrentfile)
if ext != 'torrent':
return abort(501, "Invalid filename specified, needs to end in .torrent")
log.debug('request: %s' % request)
log.debug("%s" % request.files)
if len(request.files.keys()) > 1:
abort(400, 'Bad Request, multiple files uploaded')
save_torrent(torrentfile, request.files.values()[0])
data = request.files.values()[0]
data.seek(0)
torrentdata = bencode.bdecode(data.read())
ihash = get_infohash_from_torrent(torrentdata)
name = get_name_from_torrent(torrentdata)
length = get_length_from_torrent(torrentdata)
activate(ihash, name, length )
log.debug("file: %s" % torrentdata.keys())
return make_response(Response('', status=200))
@torrents.route('/', methods=['GET'])
@torrents.route('/<filename>.<ext>', methods=['GET'])
def get(filename=None, ext=None):
if not filename and not ext:
return jsonify(get_all_torrents())
torrentfile = "%s.%s" % (filename, ext)
if torrentfile not in get_all_torrents():
return abort(404, 'torrentfile not found')
return send_from_directory(current_app.config['UPLOAD_PATH'],
torrentfile, as_attachment=True)
def get_infohash_from_torrent(data):
"""Return the sha1 hash for the torrent"""
return hashlib.sha1(bencode.bencode(data['info'])).hexdigest()
def get_length_from_torrent(data):
"""Return torrent length in bytes"""
length = 0
if 'files' in data['info']:
length = sum(file['length'] for file in data['info']['files'])
else:
length += data['info']['length']
return length
def get_name_from_torrent(data):
return data['info']['name']
def save_torrent(torrentname, file):
assert os.path.exists(current_app.config['UPLOAD_PATH'])
filename = secure_filename(torrentname)
log.debug('filenames: %s, %s' % (file.filename, filename))
if os.path.basename(file.filename) != torrentname:
abort(500, '''filenames don't match''')
g.redis.sadd('torrents', torrentname)
file.save(os.path.join(current_app.config['UPLOAD_PATH'], filename))
log.info('saved file as %s' % os.path.join(current_app.config['UPLOAD_PATH'], filename))
file.seek(0)
ihash = get_infohash_from_torrent(bencode.bdecode(file.read()))
g.redis.set(torrentname, ihash)
def get_torrent(torrentname):
return g.redis.get(torrentname)
def get_all_torrents():
torrentfiles = list(g.redis.smembers('torrents'))
if torrentfiles:
return dict(zip(torrentfiles, list(g.redis.mget(torrentfiles))))
return dict()
def activate(ihash, name, length):
if not g.redis.sismember('transfers', ihash):
log.warning("Transfer: %s hasn't started yet" % ihash)
g.redis.sadd('transfers', ihash)
now = "%s" % datetime.now()
g.redis.sadd('active_transfers', ihash)
args = {
'%s:registered' % ihash: now,
'%s:name' % ihash: name,
'%s:length' % ihash: length
}
g.redis.mset(args ) | p2ptracker/torrents.py | from flask import Module, request, abort, make_response, Response, jsonify, g, send_from_directory, current_app
from werkzeug import secure_filename
from p2ptracker import bencode
import logging
import hashlib
import os
from datetime import datetime
torrents = Module(__name__, url_prefix='/torrents')
log = logging.getLogger('hyves.p2ptracker.torrent')
@torrents.route('/<filename>.<ext>', methods=['DELETE'])
def delete(filename, ext):
torrentfile = "%s.%s" % (filename, ext)
if torrentfile not in get_all_torrents():
log.debug('torrentfile: %s not in %s' % (torrentfile, get_all_torrents()))
return abort(404, 'no such torrent')
if os.path.exists(os.path.join(current_app.config['UPLOAD_PATH'], torrentfile)):
log.debug('Deleting file: %s' % torrentfile)
os.remove(os.path.join(current_app.config['UPLOAD_PATH'], torrentfile))
return make_response(Response('', status=200))
return abort(500, 'something borked horribly')
@torrents.route('/<filename>.<ext>', methods=['POST'])
def post(filename, ext):
"""
Pull filename for torrent and actually uploaded torrentfile from request and register in redis
We add the torrent file to the all torrents set in redis and
register the torrentfile name along with the associated hash
"""
log.debug("Entering post method")
assert isinstance(filename, basestring)
assert isinstance(ext, basestring)
torrentfile = filename + '.' + ext
log.debug("torrentfile: %s" % torrentfile)
if ext != 'torrent':
return abort(501, "Invalid filename specified, needs to end in .torrent")
log.debug('request: %s' % request)
log.debug("%s" % request.files)
if len(request.files.keys()) > 1:
abort(400, 'Bad Request, multiple files uploaded')
save_torrent(torrentfile, request.files.values()[0])
data = request.files.values()[0]
data.seek(0)
torrentdata = bencode.bdecode(data.read())
ihash = get_infohash_from_torrent(torrentdata)
name = get_name_from_torrent(torrentdata)
length = get_length_from_torrent(torrentdata)
activate(ihash, name, length )
log.debug("file: %s" % torrentdata.keys())
return make_response(Response('', status=200))
@torrents.route('/', methods=['GET'])
@torrents.route('/<filename>.<ext>', methods=['GET'])
def get(filename=None, ext=None):
if not filename and not ext:
return jsonify(get_all_torrents())
torrentfile = "%s.%s" % (filename, ext)
if torrentfile not in get_all_torrents():
return abort(404, 'torrentfile not found')
return send_from_directory(current_app.config['UPLOAD_PATH'],
torrentfile, as_attachment=True)
def get_infohash_from_torrent(data):
"""Return the sha1 hash for the torrent"""
return hashlib.sha1(bencode.bencode(data['info'])).hexdigest()
def get_length_from_torrent(data):
"""Return torrent length in bytes"""
length = 0
if 'files' in data['info']:
length = sum(file['length'] for file in data['info']['files'])
else:
length += data['info']['length']
return length
def get_name_from_torrent(data):
return data['info']['name']
def save_torrent(torrentname, file):
assert os.path.exists(current_app.config['UPLOAD_PATH'])
filename = secure_filename(torrentname)
log.debug('filenames: %s, %s' % (file.filename, filename))
if os.path.basename(file.filename) != torrentname:
abort(500, '''filenames don't match''')
g.redis.sadd('torrents', torrentname)
file.save(os.path.join(current_app.config['UPLOAD_PATH'], filename))
log.info('saved file as %s' % os.path.join(current_app.config['UPLOAD_PATH'], filename))
file.seek(0)
ihash = get_infohash_from_torrent(bencode.bdecode(file.read()))
g.redis.set(torrentname, ihash)
def get_torrent(torrentname):
return g.redis.get(torrentname)
def get_all_torrents():
torrentfiles = list(g.redis.smembers('torrents'))
if torrentfiles:
return dict(zip(torrentfiles, list(g.redis.mget(torrentfiles))))
return dict()
def activate(ihash, name, length):
if not g.redis.sismember('transfers', ihash):
log.warning("Transfer: %s hasn't started yet" % ihash)
g.redis.sadd('transfers', ihash)
now = "%s" % datetime.now()
g.redis.sadd('active_transfers', ihash)
args = {
'%s:registered' % ihash: now,
'%s:name' % ihash: name,
'%s:length' % ihash: length
}
g.redis.mset(args ) | 0.29747 | 0.100128 |
from pthat.pthat import Axis
import time
ramp_up_speed = 200
def wait_for_responses(axis, responses_to_check, msg):
responses = axis.get_all_responses()
while not all(x in responses for x in responses_to_check):
responses = responses + axis.get_all_responses()
# Print the responses
print(msg)
axis.parse_responses(responses)
def change_speed(axis, old_rpm, new_rpm, ramp_up):
time.sleep(3)
old_frequency = axis.rpm_to_frequency(rpm=old_rpm, steps_per_rev=steps_per_rev, round_digits=3)
new_frequency = axis.rpm_to_frequency(rpm=new_rpm, steps_per_rev=steps_per_rev, round_digits=3)
resps = None
for x in range(int(old_frequency), int(new_frequency), int(ramp_up)):
axis.send_command(axis.change_speed(x))
# Check for both reply and complete responses to be returned
wait_for_responses(xaxis, ["RI01QX*", "CI01QX*"], "")
# Print the responses
print(f"------- Speed changed to {new_rpm} - command responses -------")
xaxis.parse_responses(resps)
steps_per_rev = int(input("How many steps per revolution [1600]? ") or "1600")
direction = 0 # Forward
rpm = 300 # Start RPM
pulse_count = 4294967295 # Set to max so we can start and stop it when desired
xaxis = Axis("X", command_id=1, serial_device="/dev/ttyS0")
xaxis.debug = True
# Setup the axis with values to start the motor
frequency = xaxis.rpm_to_frequency(rpm=rpm, steps_per_rev=steps_per_rev, round_digits=3)
set_axis_cmd = xaxis.set_axis(frequency=frequency, pulse_count=pulse_count, direction=direction,
start_ramp=1, finish_ramp=1, ramp_divide=100, ramp_pause=10, enable_line_polarity=1)
xaxis.send_command(set_axis_cmd)
# Get the responses - look for both responses to be returned before continuing
wait_for_responses(xaxis, ["RI01CX*", "CI01CX*"], "------- Set axis command responses -------")
# Start the motor
xaxis.send_command(xaxis.start())
# Check for both reply and complete responses to be returned
wait_for_responses(xaxis, ["RI01SX*"], "------- Start command responses -------")
# Change the speed
# First calculate the ramp up frequency for the original speed
frequency = xaxis.rpm_to_frequency(rpm=rpm, steps_per_rev=steps_per_rev, round_digits=3)
# ramp_up_freq = frequency / ramp_up_speed
ramp_up_freq = 30
# Speed up 100 RPM's
new_speed_rpm_100 = rpm + 100
change_speed(xaxis, rpm, new_speed_rpm_100, ramp_up_freq)
# Speed up 200 more RPM's
new_speed_rpm_200 = new_speed_rpm_100 + 200
change_speed(xaxis, new_speed_rpm_100, new_speed_rpm_200, ramp_up_freq)
time.sleep(3)
# Shut it all down
xaxis.send_command(xaxis.stop())
# Check for both reply and complete responses to be returned
wait_for_responses(xaxis, ["RI01TX*", "CI01SX*"], "------- Stop command responses -------") | examples/ChangeSpeed.py | from pthat.pthat import Axis
import time
ramp_up_speed = 200
def wait_for_responses(axis, responses_to_check, msg):
responses = axis.get_all_responses()
while not all(x in responses for x in responses_to_check):
responses = responses + axis.get_all_responses()
# Print the responses
print(msg)
axis.parse_responses(responses)
def change_speed(axis, old_rpm, new_rpm, ramp_up):
time.sleep(3)
old_frequency = axis.rpm_to_frequency(rpm=old_rpm, steps_per_rev=steps_per_rev, round_digits=3)
new_frequency = axis.rpm_to_frequency(rpm=new_rpm, steps_per_rev=steps_per_rev, round_digits=3)
resps = None
for x in range(int(old_frequency), int(new_frequency), int(ramp_up)):
axis.send_command(axis.change_speed(x))
# Check for both reply and complete responses to be returned
wait_for_responses(xaxis, ["RI01QX*", "CI01QX*"], "")
# Print the responses
print(f"------- Speed changed to {new_rpm} - command responses -------")
xaxis.parse_responses(resps)
steps_per_rev = int(input("How many steps per revolution [1600]? ") or "1600")
direction = 0 # Forward
rpm = 300 # Start RPM
pulse_count = 4294967295 # Set to max so we can start and stop it when desired
xaxis = Axis("X", command_id=1, serial_device="/dev/ttyS0")
xaxis.debug = True
# Setup the axis with values to start the motor
frequency = xaxis.rpm_to_frequency(rpm=rpm, steps_per_rev=steps_per_rev, round_digits=3)
set_axis_cmd = xaxis.set_axis(frequency=frequency, pulse_count=pulse_count, direction=direction,
start_ramp=1, finish_ramp=1, ramp_divide=100, ramp_pause=10, enable_line_polarity=1)
xaxis.send_command(set_axis_cmd)
# Get the responses - look for both responses to be returned before continuing
wait_for_responses(xaxis, ["RI01CX*", "CI01CX*"], "------- Set axis command responses -------")
# Start the motor
xaxis.send_command(xaxis.start())
# Check for both reply and complete responses to be returned
wait_for_responses(xaxis, ["RI01SX*"], "------- Start command responses -------")
# Change the speed
# First calculate the ramp up frequency for the original speed
frequency = xaxis.rpm_to_frequency(rpm=rpm, steps_per_rev=steps_per_rev, round_digits=3)
# ramp_up_freq = frequency / ramp_up_speed
ramp_up_freq = 30
# Speed up 100 RPM's
new_speed_rpm_100 = rpm + 100
change_speed(xaxis, rpm, new_speed_rpm_100, ramp_up_freq)
# Speed up 200 more RPM's
new_speed_rpm_200 = new_speed_rpm_100 + 200
change_speed(xaxis, new_speed_rpm_100, new_speed_rpm_200, ramp_up_freq)
time.sleep(3)
# Shut it all down
xaxis.send_command(xaxis.stop())
# Check for both reply and complete responses to be returned
wait_for_responses(xaxis, ["RI01TX*", "CI01SX*"], "------- Stop command responses -------") | 0.595728 | 0.372277 |
import logging
import os
import re
from urllib.parse import urlparse
import click
from .config import parse
from .db import Adapter
from .socket import SocketServer
from .util import wait_for
LOGGER = logging.getLogger(__name__)
# Create thread pool, each worker consumes from a queue
# Each worker is configured for sql; queue passes socket/address tuples
@click.command()
@click.option('--config', '-c', default='/etc/logrdis/logrdis.yml', help='logrdis configuration directives; this file defaults to /etc/logrdis/logrdis.yml')
def run_log_server(config):
"""Entry point function.
:param config_path: str. a filepath to the YAML configuration directive
:raises: OSError, KeyError
"""
if not os.path.exists(config):
raise OSError('{} does not exist'.format(config))
config_directives = parse(config)
if not 'engine' in config_directives:
raise KeyError('engine not defined in configuration')
# Wait for db server if one is configured
parsed = urlparse(config_directives['engine'])
ip_addr = parsed.hostname
port = parsed.port
LOGGER.info('Waiting for {}:{} to be ready'.format(ip_addr, port))
wait_for(ip_addr, port, config_directives['db_timeout'])
sql = Adapter(config_directives['engine'])
for process, directives in config_directives['process'].items():
if directives['action'] == 'store':
if 'pk' not in directives:
directives['pk'] = "_id"
if 'tablename' not in directives:
raise KeyError('No tablename field declared in process config')
if 'schema' not in directives:
raise KeyError('No schema field declared in process config')
raise KeyError('No pk field declared in process config')
sql.declare(directives['tablename'], directives['pk'], directives['schema'])
LOGGER.info('Stored directive {}'.format(directives['tablename']))
socket_server = SocketServer()
socket_server.run_forever(config_directives, sql)
LOGGER.info('Exiting') | logrdis/core.py | import logging
import os
import re
from urllib.parse import urlparse
import click
from .config import parse
from .db import Adapter
from .socket import SocketServer
from .util import wait_for
LOGGER = logging.getLogger(__name__)
# Create thread pool, each worker consumes from a queue
# Each worker is configured for sql; queue passes socket/address tuples
@click.command()
@click.option('--config', '-c', default='/etc/logrdis/logrdis.yml', help='logrdis configuration directives; this file defaults to /etc/logrdis/logrdis.yml')
def run_log_server(config):
"""Entry point function.
:param config_path: str. a filepath to the YAML configuration directive
:raises: OSError, KeyError
"""
if not os.path.exists(config):
raise OSError('{} does not exist'.format(config))
config_directives = parse(config)
if not 'engine' in config_directives:
raise KeyError('engine not defined in configuration')
# Wait for db server if one is configured
parsed = urlparse(config_directives['engine'])
ip_addr = parsed.hostname
port = parsed.port
LOGGER.info('Waiting for {}:{} to be ready'.format(ip_addr, port))
wait_for(ip_addr, port, config_directives['db_timeout'])
sql = Adapter(config_directives['engine'])
for process, directives in config_directives['process'].items():
if directives['action'] == 'store':
if 'pk' not in directives:
directives['pk'] = "_id"
if 'tablename' not in directives:
raise KeyError('No tablename field declared in process config')
if 'schema' not in directives:
raise KeyError('No schema field declared in process config')
raise KeyError('No pk field declared in process config')
sql.declare(directives['tablename'], directives['pk'], directives['schema'])
LOGGER.info('Stored directive {}'.format(directives['tablename']))
socket_server = SocketServer()
socket_server.run_forever(config_directives, sql)
LOGGER.info('Exiting') | 0.393851 | 0.055234 |
import requests
from sport_activities_features.tcx_manipulation import TCXFile
from datetime import datetime, timedelta
from sport_activities_features.weather_objects.AverageWeather import AverageWeather
from sport_activities_features.weather_objects.Weather import Weather
class WeatherIdentification(object):
r"""Identification of Weather data from TCX file.
Attributes:
altitudes: An array of altitude values extracted from TCX file
ascent_threshold (float): Parameter that defines the hill (hill >= ascent_threshold)
"""
def __init__(self, locations, timestamps, vc_api_key, unit_group="metric"):
"""
Args:
locations: list of locations from TCXFile.read_one_file() method
timestamps: list of timestamps from TCXFile.read_one_file() method
vc_api_key: VisualCrossing API key (https://www.visualcrossing.com/)
unit_group: format of measurements. Possible values: 'us', 'metric', 'uk', 'base' (From: https://www.visualcrossing.com/resources/documentation/weather-api/unit-groups-and-measurement-units/)
"""
self.locations = locations
self.timestamps = timestamps
self.vc_api_key = vc_api_key
self.unit_group = unit_group
def get_weather(self, time_delta=30) -> [Weather]:
"""
Args:
time_delta: time between two measurements, default 30 mins
Returns: list of objects Weather from the nearest meteorological station for every 1 hour of training.
"""
time = datetime(1980, 1, 1)
weather_list: [Weather] = []
index = 0
for index in range(len(self.locations)):
if time == datetime(1980, 1, 1):
time = self.timestamps[index]
location = (self.locations[index][0], self.locations[index][1])
weather_response = self.weather_api_call(time, location, index)
weather_list.append(weather_response)
elif time + timedelta(minutes=time_delta) < self.timestamps[index]:
time = self.timestamps[index]
location = (self.locations[index][0], self.locations[index][1])
weather_response = self.weather_api_call(time, location, index)
weather_list.append(weather_response)
if index == len(self.locations) - 1:
time = self.timestamps[index] + timedelta(minutes=60)
location = (self.locations[index][0], self.locations[index][1])
weather_response = self.weather_api_call(time, location, index)
weather_list.append(weather_response)
return weather_list
def weather_api_call(self, time: datetime, location: (float, float), index):
URL = "https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/weatherdata/history?"
time_start = time.strftime('%Y-%m-%dT%H:%M:%S')
time_end = (time + timedelta(hours=1, seconds=0)).strftime('%Y-%m-%dT%H:%M:%S')
location0_str = "{:.5f}".format(location[0])
location1_str = "{:.5f}".format(location[1])
PARAMS = {'aggregateHours': 1, 'combinationMethod': 'aggregate', 'startDateTime': time_start,
'endDateTime': time_start, 'maxStations': -1, 'maxDistance': -1, 'contentType': 'json',
'unitGroup': self.unit_group, 'locationMode': 'single', 'key': self.vc_api_key,
'dataElements': 'all', 'locations': f'{location0_str}, {location1_str}'}
# sending get request and saving the response as response object
r = requests.get(url=URL, params=PARAMS)
# extracting data in json format
data = r.json()
data_values = data['location']['values'][0]
return Weather(temperature=data_values['temp'], maximum_temperature=data_values['maxt'],
minimum_temperature=data_values['mint'], wind_chill=data_values['windchill'],
heat_index=data_values['heatindex'], precipitation=data_values['precip'],
snow_depth=data_values['snowdepth'], wind_speed=data_values['wspd'],
wind_direction=data_values['wdir'], sea_level_pressure=data_values['sealevelpressure'],
visibility=data_values['visibility'], cloud_cover=data_values['cloudcover'],
dew_point=data_values['dew'],
solar_radiation=data_values['solarradiation'],
relative_humidity=data_values['humidity'], weather_type=data_values['weathertype'],
conditions=data_values['conditions'], date=time, location=location, index=index)
def __find_nearest_weathers(self, timestamp, weather_list):
beforeWeathers = list(filter(lambda x: timestamp >= x.date - timedelta(minutes=1), weather_list))
afterWeathers = list(filter(lambda x: timestamp < x.date, weather_list))
before = None
beforeSeconds = 999999999999999999999999999
after = None
afterSeconds = 999999999999999999999999999
for bw in beforeWeathers:
t = timestamp - bw.date if timestamp > bw.date else bw.date - timestamp
if beforeSeconds > t.seconds:
before = bw
beforeSeconds = t.seconds
for aw in afterWeathers:
t = timestamp - aw.date if timestamp > aw.date else aw.date - timestamp
if afterSeconds > t.seconds:
after = aw
afterSeconds = t.seconds
return {'before': {'weather': before, 'seconds': beforeSeconds},
'after': {'weather': after, 'seconds': afterSeconds}}
def get_average_weather_data(self, timestamps, weather:[Weather]):
"""
:param timestamps: Timestamps from read TCX file method
:return: AverageWeather[], averaged out objects of weather
"""
weather_list = weather
extended_weather_list = []
for timestamp in timestamps:
before_after = self.__find_nearest_weathers(timestamp, weather_list)
before = before_after['before']
after = before_after['after']
weight_a = 1 - (before['seconds'] / (after['seconds'] + before['seconds']))
average_weather_at_timestamp = AverageWeather(weather_a=before['weather'],
weather_b=after['weather'],
weight_a=weight_a)
extended_weather_list.append(average_weather_at_timestamp)
return extended_weather_list | sport_activities_features/weather_identification.py | import requests
from sport_activities_features.tcx_manipulation import TCXFile
from datetime import datetime, timedelta
from sport_activities_features.weather_objects.AverageWeather import AverageWeather
from sport_activities_features.weather_objects.Weather import Weather
class WeatherIdentification(object):
r"""Identification of Weather data from TCX file.
Attributes:
altitudes: An array of altitude values extracted from TCX file
ascent_threshold (float): Parameter that defines the hill (hill >= ascent_threshold)
"""
def __init__(self, locations, timestamps, vc_api_key, unit_group="metric"):
"""
Args:
locations: list of locations from TCXFile.read_one_file() method
timestamps: list of timestamps from TCXFile.read_one_file() method
vc_api_key: VisualCrossing API key (https://www.visualcrossing.com/)
unit_group: format of measurements. Possible values: 'us', 'metric', 'uk', 'base' (From: https://www.visualcrossing.com/resources/documentation/weather-api/unit-groups-and-measurement-units/)
"""
self.locations = locations
self.timestamps = timestamps
self.vc_api_key = vc_api_key
self.unit_group = unit_group
def get_weather(self, time_delta=30) -> [Weather]:
"""
Args:
time_delta: time between two measurements, default 30 mins
Returns: list of objects Weather from the nearest meteorological station for every 1 hour of training.
"""
time = datetime(1980, 1, 1)
weather_list: [Weather] = []
index = 0
for index in range(len(self.locations)):
if time == datetime(1980, 1, 1):
time = self.timestamps[index]
location = (self.locations[index][0], self.locations[index][1])
weather_response = self.weather_api_call(time, location, index)
weather_list.append(weather_response)
elif time + timedelta(minutes=time_delta) < self.timestamps[index]:
time = self.timestamps[index]
location = (self.locations[index][0], self.locations[index][1])
weather_response = self.weather_api_call(time, location, index)
weather_list.append(weather_response)
if index == len(self.locations) - 1:
time = self.timestamps[index] + timedelta(minutes=60)
location = (self.locations[index][0], self.locations[index][1])
weather_response = self.weather_api_call(time, location, index)
weather_list.append(weather_response)
return weather_list
def weather_api_call(self, time: datetime, location: (float, float), index):
URL = "https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/weatherdata/history?"
time_start = time.strftime('%Y-%m-%dT%H:%M:%S')
time_end = (time + timedelta(hours=1, seconds=0)).strftime('%Y-%m-%dT%H:%M:%S')
location0_str = "{:.5f}".format(location[0])
location1_str = "{:.5f}".format(location[1])
PARAMS = {'aggregateHours': 1, 'combinationMethod': 'aggregate', 'startDateTime': time_start,
'endDateTime': time_start, 'maxStations': -1, 'maxDistance': -1, 'contentType': 'json',
'unitGroup': self.unit_group, 'locationMode': 'single', 'key': self.vc_api_key,
'dataElements': 'all', 'locations': f'{location0_str}, {location1_str}'}
# sending get request and saving the response as response object
r = requests.get(url=URL, params=PARAMS)
# extracting data in json format
data = r.json()
data_values = data['location']['values'][0]
return Weather(temperature=data_values['temp'], maximum_temperature=data_values['maxt'],
minimum_temperature=data_values['mint'], wind_chill=data_values['windchill'],
heat_index=data_values['heatindex'], precipitation=data_values['precip'],
snow_depth=data_values['snowdepth'], wind_speed=data_values['wspd'],
wind_direction=data_values['wdir'], sea_level_pressure=data_values['sealevelpressure'],
visibility=data_values['visibility'], cloud_cover=data_values['cloudcover'],
dew_point=data_values['dew'],
solar_radiation=data_values['solarradiation'],
relative_humidity=data_values['humidity'], weather_type=data_values['weathertype'],
conditions=data_values['conditions'], date=time, location=location, index=index)
def __find_nearest_weathers(self, timestamp, weather_list):
beforeWeathers = list(filter(lambda x: timestamp >= x.date - timedelta(minutes=1), weather_list))
afterWeathers = list(filter(lambda x: timestamp < x.date, weather_list))
before = None
beforeSeconds = 999999999999999999999999999
after = None
afterSeconds = 999999999999999999999999999
for bw in beforeWeathers:
t = timestamp - bw.date if timestamp > bw.date else bw.date - timestamp
if beforeSeconds > t.seconds:
before = bw
beforeSeconds = t.seconds
for aw in afterWeathers:
t = timestamp - aw.date if timestamp > aw.date else aw.date - timestamp
if afterSeconds > t.seconds:
after = aw
afterSeconds = t.seconds
return {'before': {'weather': before, 'seconds': beforeSeconds},
'after': {'weather': after, 'seconds': afterSeconds}}
def get_average_weather_data(self, timestamps, weather:[Weather]):
"""
:param timestamps: Timestamps from read TCX file method
:return: AverageWeather[], averaged out objects of weather
"""
weather_list = weather
extended_weather_list = []
for timestamp in timestamps:
before_after = self.__find_nearest_weathers(timestamp, weather_list)
before = before_after['before']
after = before_after['after']
weight_a = 1 - (before['seconds'] / (after['seconds'] + before['seconds']))
average_weather_at_timestamp = AverageWeather(weather_a=before['weather'],
weather_b=after['weather'],
weight_a=weight_a)
extended_weather_list.append(average_weather_at_timestamp)
return extended_weather_list | 0.648578 | 0.430506 |
from django.db import models
from datetime import datetime
class Language(models.Model):
name = models.CharField(max_length=100, help_text="Name of the language.")
audio_url = models.URLField(help_text="URL of audios.")
published = models.BooleanField(default=True, help_text="Decide whether this language is ready for users to see.")
class Track(models.Model):
title = models.CharField(max_length=200, help_text="Title of the track.")
index = models.IntegerField(help_text="The position of the track within a playlist.")
audio_url = models.URLField(help_text="URL to the audio file that goes with this track.")
transcript = models.TextField(help_text="A string/text transcript that goes along with the audio.")
duration = models.IntegerField(help_text="Duration in seconds.") # Duration in seconds
created_at = models.DateTimeField(help_text="When the track was created.", default=datetime.now, blank=True)
updated_at = models.DateTimeField(help_text="When the track was last updated.", default=datetime.now, blank=True)
active = models.BooleanField(help_text="Inactivate to temporarily delete track and reactivate to recover.", default=True)
published = models.BooleanField(help_text="Decide whether this track is ready for users to see.", default=True)
language = models.ForeignKey("Language", on_delete=models.CASCADE, help_text="The language of the track.")
class Playlist(models.Model):
title = models.CharField(help_text="The title of the playlist.", max_length=200)
index = models.IntegerField(help_text="The position of the playlist within a topic.")
audio_url = models.URLField(help_text="URL to the audio directory associated with the playlist.")
active = models.BooleanField(help_text="Inactivate to temporarily delete playlist and reactivate to recover.", default=True)
published = models.BooleanField(help_text="Decide to show or hide the playlist from the users.", default=True)
tracks = models.ManyToManyField(Track, help_text="A list of all the tracks this playlist contains.")
language = models.ForeignKey("Language", on_delete=models.CASCADE, help_text="The language of the track.")
class Topic(models.Model):
title = models.CharField(help_text="The name of the topic.", max_length=200)
index = models.IntegerField(help_text="The order/position of the topic within the interface.")
audio_url = models.URLField(help_text="URL to the audio directory associated with the topic.")
active = models.BooleanField(help_text="Inactivate to temporarily delete topic and reactivate to recover.", default=True)
published = models.BooleanField(help_text="Decide to show or hide the topic from the users.", default=True)
playlists = models.ManyToManyField(Playlist, help_text="A list of all the playlists this topic contains.")
language = models.ForeignKey("Language", on_delete=models.CASCADE, help_text="The language of the track.") | audios/models.py | from django.db import models
from datetime import datetime
class Language(models.Model):
name = models.CharField(max_length=100, help_text="Name of the language.")
audio_url = models.URLField(help_text="URL of audios.")
published = models.BooleanField(default=True, help_text="Decide whether this language is ready for users to see.")
class Track(models.Model):
title = models.CharField(max_length=200, help_text="Title of the track.")
index = models.IntegerField(help_text="The position of the track within a playlist.")
audio_url = models.URLField(help_text="URL to the audio file that goes with this track.")
transcript = models.TextField(help_text="A string/text transcript that goes along with the audio.")
duration = models.IntegerField(help_text="Duration in seconds.") # Duration in seconds
created_at = models.DateTimeField(help_text="When the track was created.", default=datetime.now, blank=True)
updated_at = models.DateTimeField(help_text="When the track was last updated.", default=datetime.now, blank=True)
active = models.BooleanField(help_text="Inactivate to temporarily delete track and reactivate to recover.", default=True)
published = models.BooleanField(help_text="Decide whether this track is ready for users to see.", default=True)
language = models.ForeignKey("Language", on_delete=models.CASCADE, help_text="The language of the track.")
class Playlist(models.Model):
title = models.CharField(help_text="The title of the playlist.", max_length=200)
index = models.IntegerField(help_text="The position of the playlist within a topic.")
audio_url = models.URLField(help_text="URL to the audio directory associated with the playlist.")
active = models.BooleanField(help_text="Inactivate to temporarily delete playlist and reactivate to recover.", default=True)
published = models.BooleanField(help_text="Decide to show or hide the playlist from the users.", default=True)
tracks = models.ManyToManyField(Track, help_text="A list of all the tracks this playlist contains.")
language = models.ForeignKey("Language", on_delete=models.CASCADE, help_text="The language of the track.")
class Topic(models.Model):
title = models.CharField(help_text="The name of the topic.", max_length=200)
index = models.IntegerField(help_text="The order/position of the topic within the interface.")
audio_url = models.URLField(help_text="URL to the audio directory associated with the topic.")
active = models.BooleanField(help_text="Inactivate to temporarily delete topic and reactivate to recover.", default=True)
published = models.BooleanField(help_text="Decide to show or hide the topic from the users.", default=True)
playlists = models.ManyToManyField(Playlist, help_text="A list of all the playlists this topic contains.")
language = models.ForeignKey("Language", on_delete=models.CASCADE, help_text="The language of the track.") | 0.580709 | 0.163379 |
from django.shortcuts import render, redirect
from django.views import View
from django.utils.crypto import get_random_string
from django.http import JsonResponse
from django.contrib.staticfiles.utils import get_files
from models.models import *
from API.serializers import PlaylistSerializer
import json
"""
Since we are using django default template with Vue 3.0
we will render all templates with python.
written by @lyonkvalid - https://github.com/lyonkvalid/
"""
class HomeView(View):
def __init__(self):
self.template_name = "activities/home.html"
def get(self, request, *args, **kwargs):
return render(request, self.template_name)
class HomeIntentView(View):
def __init__(self):
self.template_name = "intents/home.intent.html"
def get(self, request, *args, **kwargs):
_id = kwargs.get("id")
collection = Collection.objects.get(id=_id)
return render(request, self.template_name, {"id":_id, "collection":collection})
def post(self, request, *args, **kwargs):
type = request.GET.get("type")
if type == "add-library":
id = json.loads(request.body.decode("utf-8")).get("id")
my_library = MyLibrary.objects.get(user=request.user)
if my_library.playlists.filter(collection__id=id).exists():
playlist = my_library.playlists.get(collection__id=id)
my_library.playlists.remove(playlist)
playlist.delete()
return JsonResponse({"status":True, "type":"remove"})
else:
collection = Collection.objects.get(id=id)
playlist = Playlist.objects.create(user=request.user, collection=collection)
my_library.playlists.add(playlist)
return JsonResponse({"status":True, "type":"add"})
elif type == "add-to-playlist":
id = request.GET.get("id")
song_id = request.GET.get("song_id")
song = Song.objects.get(id=song_id)
collection = Collection.objects.get(id=id)
collection.songs.add(song)
return JsonResponse({"status":True, "type":"add"})
elif type == "delete-playlist":
id = request.GET.get("id")
playlist = Playlist.objects.get(collection__id=id)
playlist.collection.delete()
playlist.delete()
return JsonResponse({"status":True, "type":"remove"})
else:
return JsonResponse({"status":False, "msg":"provide a type in get query"})
class LibraryView(View):
def __init__(self):
self.template_name = "activities/library.html"
def get(self, request, *args, **kwargs):
return render(request, self.template_name)
def post(self, request, *args, **kwargs):
type = request.GET.get("type")
if type == "create-playlist":
title = request.POST.get("title")
if title == None or title == "":
title = get_random_string()
meta_data = MetaData(title=title, artist=request.user.username)
meta_data.save()
collection = Collection(user=request.user, meta_data=meta_data, type="playlist")
collection.save()
playlist = Playlist.objects.create(user=request.user, collection=collection)
my_library = MyLibrary.objects.get(user=request.user, type="online")
my_library.playlists.add(playlist)
my_library.save()
return JsonResponse({"status":True, "playlist":PlaylistSerializer(playlist).data})
else:
return JsonResponse({"status": False, "type":type})
class SearchView(View):
def __init__(self):
self.template_name = "activities/search.html"
def get(self, request, *args, **kwargs):
return render(request, self.template_name)
class SearchIntentView(View):
def __init__(self):
self.template_name = "intents/search.intent.html"
def get(self, request, *args, **kwargs):
return render(request, self.template_name)
class ExploreView(View):
def __init__(self):
self.template_name = "activities/explore.html"
def get(self, request, *args, **kwargs):
return render(request, self.template_name)
class ProfileView(View):
def __init__(self):
self.template_name = "fragments/profile.html"
def get(self, request, *args, **kwargs):
id = kwargs.get("id")
if id is not None:
user = User.objects.get(id=id)
my_library = MyLibrary.objects.get(user=user)
return render(request, self.template_name, {"my_library":my_library, "user":user})
#error 404 callback
def post(self, request, *args, **kwargs):
query = json.loads(request.body.decode("utf-8"))
if True:
following = User.objects.get(id=query.get("id"))
if following.following.filter(id=request.user.id).exists() and request.user.followers.filter(id=following.id).exists():
following.followers.remove(request.user)
request.user.following.remove(following)
return JsonResponse({"status":True, "type":"remove", "followers_count":following.followers.count()})
else:
following.followers.add(request.user)
request.user.following.add(following)
return JsonResponse({"status":True, "type":"add", "followers_count":following.followers.count()})
#except:
return JsonResponse({"status":False, "msg":"Unknown error"})
class ProfileIntentView(View):
def __init__(self):
self.template_name = "forms/profile/edit_profile.html"
def get(self, request, *args, **kwargs):
return render(request, self.template_name)
def post(self, request, *args, **kwargs):
user = User.objects.get(id = request.user.id)
type = request.GET.get("type")
if type == "edit-profile":
avatar = request.FILES.get("avatar")
name = request.POST.get("name")
if avatar is not None:
user.kyc.avatar.save(request.user.username+".png", avatar)
if name is not None:
name = name.strip()
name = name.split(" ")
user.first_name = name[0].capitalize()
if len(name) > 1:
user.last_name = name[1].capitalize()
user.save()
return JsonResponse({"status":True, "msg":"Profile updated"})
else:
return JsonResponse({"status":False, "msg":"provide a type in get query"})
class SettingsView(View):
def __init__(self):
self.template_name = "fragments/settings.html"
def get(self, request, *args, **kwargs):
return render(request, self.template_name)
def post(self, request, *args, **kwargs):
user = User.objects.get(id=request.user.id)
profile = user.profile
type = request.GET.get("type")
if type == "data-saver":
if profile.settings.data_saver:
profile.settings.data_saver = False
else:
profile.settings.data_saver = True
profile.settings.save()
return JsonResponse({"status":True, "checked":user.settings.data_saver})
if type == "auto-download":
if profile.settings.auto_download:
profile.settings.auto_download = False
else:
profile.settings.auto_download = True
profile.settings.save()
return JsonResponse({"status":True, "checked":user.settings.auto_download})
if type == "plan":
plan = request.GET.get("plan")
kyc = request.user.kyc
kyc.plans = plan
kyc.save()
return JsonResponse({"status":True, "plan":plan})
else:
return JsonResponse({"status":False, "msg":"provide a type in get query"})
class Dialog(View):
def get(self, request, *args, **kwargs):
template_name = request.GET.get("template_name")
return render(request, template_name)
from django.contrib.staticfiles.storage import ManifestStaticFilesStorage
from whitenoise.storage import CompressedManifestStaticFilesStorage
class StaticListView(View):
def __init__(self):
self.template_name = "service-worker/sw.js"
def get(self, request, *args, **kwargs):
storage = ManifestStaticFilesStorage()
assets = []
final_assets = []
for folder in ("css", "images", "js"):
assets += list(get_files(storage, location=folder))
for file in assets:
if not file.endswith(".gz"):
final_assets.append("/static/{}".format(file))
return JsonResponse({"assets":final_assets})
class LandingPageView(View):
def __init__(self):
self.template_name = "landing.html"
def get(self, request, *args, **kwargs):
return render(request, self.template_name)
class OfflineView(View):
def __init__(self):
self.template_name = "native-ui/no_item.html"
def get(self, request, *args, **kwargs):
return render(request, self.template_name) | activity/views.py | from django.shortcuts import render, redirect
from django.views import View
from django.utils.crypto import get_random_string
from django.http import JsonResponse
from django.contrib.staticfiles.utils import get_files
from models.models import *
from API.serializers import PlaylistSerializer
import json
"""
Since we are using django default template with Vue 3.0
we will render all templates with python.
written by @lyonkvalid - https://github.com/lyonkvalid/
"""
class HomeView(View):
def __init__(self):
self.template_name = "activities/home.html"
def get(self, request, *args, **kwargs):
return render(request, self.template_name)
class HomeIntentView(View):
def __init__(self):
self.template_name = "intents/home.intent.html"
def get(self, request, *args, **kwargs):
_id = kwargs.get("id")
collection = Collection.objects.get(id=_id)
return render(request, self.template_name, {"id":_id, "collection":collection})
def post(self, request, *args, **kwargs):
type = request.GET.get("type")
if type == "add-library":
id = json.loads(request.body.decode("utf-8")).get("id")
my_library = MyLibrary.objects.get(user=request.user)
if my_library.playlists.filter(collection__id=id).exists():
playlist = my_library.playlists.get(collection__id=id)
my_library.playlists.remove(playlist)
playlist.delete()
return JsonResponse({"status":True, "type":"remove"})
else:
collection = Collection.objects.get(id=id)
playlist = Playlist.objects.create(user=request.user, collection=collection)
my_library.playlists.add(playlist)
return JsonResponse({"status":True, "type":"add"})
elif type == "add-to-playlist":
id = request.GET.get("id")
song_id = request.GET.get("song_id")
song = Song.objects.get(id=song_id)
collection = Collection.objects.get(id=id)
collection.songs.add(song)
return JsonResponse({"status":True, "type":"add"})
elif type == "delete-playlist":
id = request.GET.get("id")
playlist = Playlist.objects.get(collection__id=id)
playlist.collection.delete()
playlist.delete()
return JsonResponse({"status":True, "type":"remove"})
else:
return JsonResponse({"status":False, "msg":"provide a type in get query"})
class LibraryView(View):
def __init__(self):
self.template_name = "activities/library.html"
def get(self, request, *args, **kwargs):
return render(request, self.template_name)
def post(self, request, *args, **kwargs):
type = request.GET.get("type")
if type == "create-playlist":
title = request.POST.get("title")
if title == None or title == "":
title = get_random_string()
meta_data = MetaData(title=title, artist=request.user.username)
meta_data.save()
collection = Collection(user=request.user, meta_data=meta_data, type="playlist")
collection.save()
playlist = Playlist.objects.create(user=request.user, collection=collection)
my_library = MyLibrary.objects.get(user=request.user, type="online")
my_library.playlists.add(playlist)
my_library.save()
return JsonResponse({"status":True, "playlist":PlaylistSerializer(playlist).data})
else:
return JsonResponse({"status": False, "type":type})
class SearchView(View):
def __init__(self):
self.template_name = "activities/search.html"
def get(self, request, *args, **kwargs):
return render(request, self.template_name)
class SearchIntentView(View):
def __init__(self):
self.template_name = "intents/search.intent.html"
def get(self, request, *args, **kwargs):
return render(request, self.template_name)
class ExploreView(View):
def __init__(self):
self.template_name = "activities/explore.html"
def get(self, request, *args, **kwargs):
return render(request, self.template_name)
class ProfileView(View):
def __init__(self):
self.template_name = "fragments/profile.html"
def get(self, request, *args, **kwargs):
id = kwargs.get("id")
if id is not None:
user = User.objects.get(id=id)
my_library = MyLibrary.objects.get(user=user)
return render(request, self.template_name, {"my_library":my_library, "user":user})
#error 404 callback
def post(self, request, *args, **kwargs):
query = json.loads(request.body.decode("utf-8"))
if True:
following = User.objects.get(id=query.get("id"))
if following.following.filter(id=request.user.id).exists() and request.user.followers.filter(id=following.id).exists():
following.followers.remove(request.user)
request.user.following.remove(following)
return JsonResponse({"status":True, "type":"remove", "followers_count":following.followers.count()})
else:
following.followers.add(request.user)
request.user.following.add(following)
return JsonResponse({"status":True, "type":"add", "followers_count":following.followers.count()})
#except:
return JsonResponse({"status":False, "msg":"Unknown error"})
class ProfileIntentView(View):
def __init__(self):
self.template_name = "forms/profile/edit_profile.html"
def get(self, request, *args, **kwargs):
return render(request, self.template_name)
def post(self, request, *args, **kwargs):
user = User.objects.get(id = request.user.id)
type = request.GET.get("type")
if type == "edit-profile":
avatar = request.FILES.get("avatar")
name = request.POST.get("name")
if avatar is not None:
user.kyc.avatar.save(request.user.username+".png", avatar)
if name is not None:
name = name.strip()
name = name.split(" ")
user.first_name = name[0].capitalize()
if len(name) > 1:
user.last_name = name[1].capitalize()
user.save()
return JsonResponse({"status":True, "msg":"Profile updated"})
else:
return JsonResponse({"status":False, "msg":"provide a type in get query"})
class SettingsView(View):
def __init__(self):
self.template_name = "fragments/settings.html"
def get(self, request, *args, **kwargs):
return render(request, self.template_name)
def post(self, request, *args, **kwargs):
user = User.objects.get(id=request.user.id)
profile = user.profile
type = request.GET.get("type")
if type == "data-saver":
if profile.settings.data_saver:
profile.settings.data_saver = False
else:
profile.settings.data_saver = True
profile.settings.save()
return JsonResponse({"status":True, "checked":user.settings.data_saver})
if type == "auto-download":
if profile.settings.auto_download:
profile.settings.auto_download = False
else:
profile.settings.auto_download = True
profile.settings.save()
return JsonResponse({"status":True, "checked":user.settings.auto_download})
if type == "plan":
plan = request.GET.get("plan")
kyc = request.user.kyc
kyc.plans = plan
kyc.save()
return JsonResponse({"status":True, "plan":plan})
else:
return JsonResponse({"status":False, "msg":"provide a type in get query"})
class Dialog(View):
def get(self, request, *args, **kwargs):
template_name = request.GET.get("template_name")
return render(request, template_name)
from django.contrib.staticfiles.storage import ManifestStaticFilesStorage
from whitenoise.storage import CompressedManifestStaticFilesStorage
class StaticListView(View):
def __init__(self):
self.template_name = "service-worker/sw.js"
def get(self, request, *args, **kwargs):
storage = ManifestStaticFilesStorage()
assets = []
final_assets = []
for folder in ("css", "images", "js"):
assets += list(get_files(storage, location=folder))
for file in assets:
if not file.endswith(".gz"):
final_assets.append("/static/{}".format(file))
return JsonResponse({"assets":final_assets})
class LandingPageView(View):
def __init__(self):
self.template_name = "landing.html"
def get(self, request, *args, **kwargs):
return render(request, self.template_name)
class OfflineView(View):
def __init__(self):
self.template_name = "native-ui/no_item.html"
def get(self, request, *args, **kwargs):
return render(request, self.template_name) | 0.480479 | 0.068257 |
from abc import ABC, abstractmethod
from dataclasses import dataclass
from datetime import datetime
@dataclass
class Publication():
title : str
content: str
publishedDate : datetime
url : str
media : list
class Parser(ABC):
@abstractmethod
def __init__(self):
pass
@abstractmethod
def Prepare_url(self, url : str, **kwargs):
"""
Checks if url belogs to reddit domain
Parameters
----------
url : str
Url of source
Returns
-------
tuple
url, url for parsing
"""
return str(), str()
@abstractmethod
def GetTitle(self, element : object, **kwargs):
"""
Extracts title from represented object if it exist. Else gets first sentence from text or first 10 word of text.
Parameters
----------
element : object
Etree object
Returns
-------
str
text of title
"""
return str()
@abstractmethod
def GetContent(self, element : object, **kwargs):
"""
Extracts content from represented object if it exist.
Parameters
----------
element : object
Etree object
Returns
-------
str
text of content
"""
return str()
@abstractmethod
def GetPublishedDate(self, element : object, **kwargs):
"""
Extracts datetime object from time string in represented object if it exist.
Parameters
----------
element : object
Etree object
Returns
-------
datetime.datetime
"""
return datetime()
@abstractmethod
def GetURL(self,element : object, **kwargs):
"""
Extracts url from time string in represented object if it exist.
Parameters
----------
element : object
Etree list of objects
Returns
-------
str
extracted url
"""
return str()
@abstractmethod
def GetMedia(self, element : object, **kwargs):
"""
Not implemented yet
"""
return list(str)
@abstractmethod
def ParseOne(self, url : str, **kwargs):
"""
Parses one element from list of publications
Parameters
----------
one_publication : object
Etree list of objects
Returns
-------
str
id of post
Publication
Publication dataclass
"""
return Publication()
@abstractmethod
def Parse(self, url, **kwargs):
"""
Extracts content from represented object if it exist.
Parameters
----------
url : str
url of resource
Returns
-------
list
list of publication dataclasses
"""
return list(Publication)
class BadUrlException(Exception):
pass | sus/engines/base_engine.py | from abc import ABC, abstractmethod
from dataclasses import dataclass
from datetime import datetime
@dataclass
class Publication():
title : str
content: str
publishedDate : datetime
url : str
media : list
class Parser(ABC):
@abstractmethod
def __init__(self):
pass
@abstractmethod
def Prepare_url(self, url : str, **kwargs):
"""
Checks if url belogs to reddit domain
Parameters
----------
url : str
Url of source
Returns
-------
tuple
url, url for parsing
"""
return str(), str()
@abstractmethod
def GetTitle(self, element : object, **kwargs):
"""
Extracts title from represented object if it exist. Else gets first sentence from text or first 10 word of text.
Parameters
----------
element : object
Etree object
Returns
-------
str
text of title
"""
return str()
@abstractmethod
def GetContent(self, element : object, **kwargs):
"""
Extracts content from represented object if it exist.
Parameters
----------
element : object
Etree object
Returns
-------
str
text of content
"""
return str()
@abstractmethod
def GetPublishedDate(self, element : object, **kwargs):
"""
Extracts datetime object from time string in represented object if it exist.
Parameters
----------
element : object
Etree object
Returns
-------
datetime.datetime
"""
return datetime()
@abstractmethod
def GetURL(self,element : object, **kwargs):
"""
Extracts url from time string in represented object if it exist.
Parameters
----------
element : object
Etree list of objects
Returns
-------
str
extracted url
"""
return str()
@abstractmethod
def GetMedia(self, element : object, **kwargs):
"""
Not implemented yet
"""
return list(str)
@abstractmethod
def ParseOne(self, url : str, **kwargs):
"""
Parses one element from list of publications
Parameters
----------
one_publication : object
Etree list of objects
Returns
-------
str
id of post
Publication
Publication dataclass
"""
return Publication()
@abstractmethod
def Parse(self, url, **kwargs):
"""
Extracts content from represented object if it exist.
Parameters
----------
url : str
url of resource
Returns
-------
list
list of publication dataclasses
"""
return list(Publication)
class BadUrlException(Exception):
pass | 0.886966 | 0.275702 |
import os
from astropy.io import ascii
try:
from cStringIO import StringIO
BytesIO = StringIO
except ImportError:
from io import StringIO, BytesIO
import io
HERE = os.path.abspath(os.path.dirname(__file__))
class _ASCIISuite:
def setup(self):
self.tables = {}
self.data = {}
self.output = StringIO()
self.writers = {
'csv': ascii.Csv,
'rdb': ascii.Rdb,
'fixed_width': ascii.FixedWidth,
'fixed_width_no_header': ascii.FixedWidthNoHeader,
'fixed_width_two_line': ascii.FixedWidthTwoLine,
'tab': ascii.Tab,
'no_header': ascii.NoHeader,
'commented_header': ascii.CommentedHeader,
'basic': ascii.Basic,
'ipac': ascii.Ipac,
'latex': ascii.Latex,
'aastex': ascii.AASTex
}
with io.open(os.path.join(HERE, 'files', self.file_format, '{0}.txt'.format(self.data_type)), 'rb') as f:
self.data = f.read()
if self.file_format != 'sextractor':
self.table = self.read()
def read(self):
return ascii.read(BytesIO(self.data), format=self.file_format, guess=False)
def write(self):
ascii.write(self.table, self.output, Writer=self.writers[self.file_format])
class CsvString(_ASCIISuite):
file_format = 'csv'
data_type = 'string'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class CsvInt(_ASCIISuite):
file_format = 'csv'
data_type = 'int'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class CsvFloat(_ASCIISuite):
file_format = 'csv'
data_type = 'float'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class RdbString(_ASCIISuite):
file_format = 'rdb'
data_type = 'string'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class RdbInt(_ASCIISuite):
file_format = 'rdb'
data_type = 'int'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class RdbFloat(_ASCIISuite):
file_format = 'rdb'
data_type = 'float'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class FixedWidthString(_ASCIISuite):
file_format = 'fixed_width'
data_type = 'string'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class FixedWidthInt(_ASCIISuite):
file_format = 'fixed_width'
data_type = 'int'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class FixedWidthFloat(_ASCIISuite):
file_format = 'fixed_width'
data_type = 'float'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class FixedWidthNoHeaderString(_ASCIISuite):
file_format = 'fixed_width_no_header'
data_type = 'string'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class FixedWidthNoHeaderInt(_ASCIISuite):
file_format = 'fixed_width_no_header'
data_type = 'int'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class FixedWidthNoHeaderFloat(_ASCIISuite):
file_format = 'fixed_width_no_header'
data_type = 'float'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class FixedWidthTwoLineString(_ASCIISuite):
file_format = 'fixed_width_two_line'
data_type = 'string'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class FixedWidthTwoLineInt(_ASCIISuite):
file_format = 'fixed_width_two_line'
data_type = 'int'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class FixedWidthTwoLineFloat(_ASCIISuite):
file_format = 'fixed_width_two_line'
data_type = 'float'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class TabString(_ASCIISuite):
file_format = 'tab'
data_type = 'string'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class TabInt(_ASCIISuite):
file_format = 'tab'
data_type = 'int'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class TabFloat(_ASCIISuite):
file_format = 'tab'
data_type = 'float'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class NoHeaderString(_ASCIISuite):
file_format = 'no_header'
data_type = 'string'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class NoHeaderInt(_ASCIISuite):
file_format = 'no_header'
data_type = 'int'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class NoHeaderFloat(_ASCIISuite):
file_format = 'no_header'
data_type = 'float'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class CommentedHeaderString(_ASCIISuite):
file_format = 'commented_header'
data_type = 'string'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class CommentedHeaderInt(_ASCIISuite):
file_format = 'commented_header'
data_type = 'int'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class CommentedHeaderFloat(_ASCIISuite):
file_format = 'commented_header'
data_type = 'float'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class BasicString(_ASCIISuite):
file_format = 'basic'
data_type = 'string'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class BasicInt(_ASCIISuite):
file_format = 'basic'
data_type = 'int'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class BasicFloat(_ASCIISuite):
file_format = 'basic'
data_type = 'float'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class SextractorString(_ASCIISuite):
file_format = 'sextractor'
data_type = 'string'
time_read = _ASCIISuite.read
class SextractorInt(_ASCIISuite):
file_format = 'sextractor'
data_type = 'int'
time_read = _ASCIISuite.read
class SextractorFloat(_ASCIISuite):
file_format = 'sextractor'
data_type = 'float'
time_read = _ASCIISuite.read
class IpacString(_ASCIISuite):
file_format = 'ipac'
data_type = 'string'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class IpacInt(_ASCIISuite):
file_format = 'ipac'
data_type = 'int'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class IpacFloat(_ASCIISuite):
file_format = 'ipac'
data_type = 'float'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class LatexString(_ASCIISuite):
file_format = 'latex'
data_type = 'string'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class LatexInt(_ASCIISuite):
file_format = 'latex'
data_type = 'int'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class LatexFloat(_ASCIISuite):
file_format = 'latex'
data_type = 'float'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class AastexString(_ASCIISuite):
file_format = 'aastex'
data_type = 'string'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class AastexInt(_ASCIISuite):
file_format = 'aastex'
data_type = 'int'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class AastexFloat(_ASCIISuite):
file_format = 'aastex'
data_type = 'float'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write | benchmarks/io_ascii/main.py | import os
from astropy.io import ascii
try:
from cStringIO import StringIO
BytesIO = StringIO
except ImportError:
from io import StringIO, BytesIO
import io
HERE = os.path.abspath(os.path.dirname(__file__))
class _ASCIISuite:
def setup(self):
self.tables = {}
self.data = {}
self.output = StringIO()
self.writers = {
'csv': ascii.Csv,
'rdb': ascii.Rdb,
'fixed_width': ascii.FixedWidth,
'fixed_width_no_header': ascii.FixedWidthNoHeader,
'fixed_width_two_line': ascii.FixedWidthTwoLine,
'tab': ascii.Tab,
'no_header': ascii.NoHeader,
'commented_header': ascii.CommentedHeader,
'basic': ascii.Basic,
'ipac': ascii.Ipac,
'latex': ascii.Latex,
'aastex': ascii.AASTex
}
with io.open(os.path.join(HERE, 'files', self.file_format, '{0}.txt'.format(self.data_type)), 'rb') as f:
self.data = f.read()
if self.file_format != 'sextractor':
self.table = self.read()
def read(self):
return ascii.read(BytesIO(self.data), format=self.file_format, guess=False)
def write(self):
ascii.write(self.table, self.output, Writer=self.writers[self.file_format])
class CsvString(_ASCIISuite):
file_format = 'csv'
data_type = 'string'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class CsvInt(_ASCIISuite):
file_format = 'csv'
data_type = 'int'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class CsvFloat(_ASCIISuite):
file_format = 'csv'
data_type = 'float'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class RdbString(_ASCIISuite):
file_format = 'rdb'
data_type = 'string'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class RdbInt(_ASCIISuite):
file_format = 'rdb'
data_type = 'int'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class RdbFloat(_ASCIISuite):
file_format = 'rdb'
data_type = 'float'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class FixedWidthString(_ASCIISuite):
file_format = 'fixed_width'
data_type = 'string'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class FixedWidthInt(_ASCIISuite):
file_format = 'fixed_width'
data_type = 'int'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class FixedWidthFloat(_ASCIISuite):
file_format = 'fixed_width'
data_type = 'float'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class FixedWidthNoHeaderString(_ASCIISuite):
file_format = 'fixed_width_no_header'
data_type = 'string'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class FixedWidthNoHeaderInt(_ASCIISuite):
file_format = 'fixed_width_no_header'
data_type = 'int'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class FixedWidthNoHeaderFloat(_ASCIISuite):
file_format = 'fixed_width_no_header'
data_type = 'float'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class FixedWidthTwoLineString(_ASCIISuite):
file_format = 'fixed_width_two_line'
data_type = 'string'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class FixedWidthTwoLineInt(_ASCIISuite):
file_format = 'fixed_width_two_line'
data_type = 'int'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class FixedWidthTwoLineFloat(_ASCIISuite):
file_format = 'fixed_width_two_line'
data_type = 'float'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class TabString(_ASCIISuite):
file_format = 'tab'
data_type = 'string'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class TabInt(_ASCIISuite):
file_format = 'tab'
data_type = 'int'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class TabFloat(_ASCIISuite):
file_format = 'tab'
data_type = 'float'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class NoHeaderString(_ASCIISuite):
file_format = 'no_header'
data_type = 'string'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class NoHeaderInt(_ASCIISuite):
file_format = 'no_header'
data_type = 'int'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class NoHeaderFloat(_ASCIISuite):
file_format = 'no_header'
data_type = 'float'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class CommentedHeaderString(_ASCIISuite):
file_format = 'commented_header'
data_type = 'string'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class CommentedHeaderInt(_ASCIISuite):
file_format = 'commented_header'
data_type = 'int'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class CommentedHeaderFloat(_ASCIISuite):
file_format = 'commented_header'
data_type = 'float'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class BasicString(_ASCIISuite):
file_format = 'basic'
data_type = 'string'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class BasicInt(_ASCIISuite):
file_format = 'basic'
data_type = 'int'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class BasicFloat(_ASCIISuite):
file_format = 'basic'
data_type = 'float'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class SextractorString(_ASCIISuite):
file_format = 'sextractor'
data_type = 'string'
time_read = _ASCIISuite.read
class SextractorInt(_ASCIISuite):
file_format = 'sextractor'
data_type = 'int'
time_read = _ASCIISuite.read
class SextractorFloat(_ASCIISuite):
file_format = 'sextractor'
data_type = 'float'
time_read = _ASCIISuite.read
class IpacString(_ASCIISuite):
file_format = 'ipac'
data_type = 'string'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class IpacInt(_ASCIISuite):
file_format = 'ipac'
data_type = 'int'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class IpacFloat(_ASCIISuite):
file_format = 'ipac'
data_type = 'float'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class LatexString(_ASCIISuite):
file_format = 'latex'
data_type = 'string'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class LatexInt(_ASCIISuite):
file_format = 'latex'
data_type = 'int'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class LatexFloat(_ASCIISuite):
file_format = 'latex'
data_type = 'float'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class AastexString(_ASCIISuite):
file_format = 'aastex'
data_type = 'string'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class AastexInt(_ASCIISuite):
file_format = 'aastex'
data_type = 'int'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write
class AastexFloat(_ASCIISuite):
file_format = 'aastex'
data_type = 'float'
time_read = _ASCIISuite.read
time_write = _ASCIISuite.write | 0.328637 | 0.149004 |
from typing import Final
import numpy as np
from PIL import Image
from PIL.ImageFilter import BoxBlur
# Images will be resized to this before applying SSIM (must be larger than
# `WIN_SIZE`).
SSIM_SIZE: Final = (64, 64)
K1: Final = 0.01 # Algorithm parameter K1 (small constant, see the SSIM paper)
K2: Final = 0.03 # Algorithm parameter K2 (small constant, see the SSIM paper)
# The side-length of the sliding window used in comparison (must be odd)
WIN_SIZE: Final = 7
SSIM_THRESH: Final = 0.9 # SSIM threshold for marking as duplicate
def dhash(img: np.ndarray, /, bits: int = 64) -> bytes:
"""Compute d-hash of an image.
Args:
img: The input image as a 3D RGB image
bits: The number of bits in the hash
Returns:
The d-hash of the image
"""
width = int(np.sqrt(bits))
if width ** 2 != bits:
raise ValueError("Number of bits must be a perfect square")
pillow = Image.fromarray(img)
# Resize image to `width` x `width` with an extra column.
# This extra column is added because the difference b/w two pixels is
# calculated along rows. This results in differences one less than the no.
# of columns in a row.
pillow = pillow.convert("L").resize((width + 1, width), Image.BICUBIC)
img = np.array(pillow)
# Binarize the difference b/w two row-adjacent pixels
diff = img[:, :-1] > img[:, 1:]
# Flatten and pack the bools into bytes, ie 8 bools become one uint8 byte
binary = np.packbits(diff)
# Get the binary hash as a bytes string
return binary.tobytes()
def _preprocess(
img: Image.Image, /, *, size: tuple[int, int] = SSIM_SIZE
) -> np.ndarray:
"""Pre-process the image for SSIM.
This converts into the following:
* Grayscale
* float64 in [0, 1]
Args:
img: The PIL image
size: The size to which the image should be resized
Returns:
The pre-processed float64 ndarray image
"""
img = img.convert("L") # grayscale
img = img.resize(SSIM_SIZE, resample=Image.BICUBIC)
return np.array(img) / 255.0
def _filter(img: np.ndarray, /) -> np.ndarray:
"""Apply a uniform filter to the image.
Args:
img: The float64 grayscale image in [0, 1]
Returns:
The filtered float64 ndarray image in [0, 1]
"""
grey = (img * 255).astype(np.uint8)
orig = Image.fromarray(grey)
filtered = orig.filter(BoxBlur(WIN_SIZE))
return np.array(filtered) / 255.0
def ssim(img1: Image.Image, img2: Image.Image, /) -> float:
"""Compute the mean structural similarity index between two images.
This code was adapted from the SSIM code in the scikit-image library:
https://github.com/scikit-image/scikit-image/blob/master/skimage/metrics/_structural_similarity.py
Arguments:
img1, arr2: The input images of the same shape
Returns:
The mean structural similarity index over the image.
"""
# Preprocess images for SSIM
arr1 = _preprocess(img1)
arr2 = _preprocess(img2)
# Filter is already normalized by NP
NP = WIN_SIZE ** 2
cov_norm = NP / (NP - 1) # sample covariance
# Compute (weighted) means
ux = _filter(arr1)
uy = _filter(arr2)
# Compute (weighted) variances and covariances
uxx = _filter(arr1 * arr1)
uyy = _filter(arr2 * arr2)
uxy = _filter(arr1 * arr2)
vx = cov_norm * (uxx - ux * ux)
vy = cov_norm * (uyy - uy * uy)
vxy = cov_norm * (uxy - ux * uy)
C1 = K1 ** 2
C2 = K2 ** 2
A1 = 2 * ux * uy + C1
A2 = 2 * vxy + C2
B1 = ux ** 2 + uy ** 2 + C1
B2 = vx + vy + C2
D = B1 * B2
S = (A1 * A2) / D
# To avoid edge effects will ignore filter radius strip around edges
pad = (WIN_SIZE - 1) // 2
# Compute (weighted) mean of ssim
mssim = S[pad:-pad, pad:-pad].mean()
return mssim | imgtools/utils.py | from typing import Final
import numpy as np
from PIL import Image
from PIL.ImageFilter import BoxBlur
# Images will be resized to this before applying SSIM (must be larger than
# `WIN_SIZE`).
SSIM_SIZE: Final = (64, 64)
K1: Final = 0.01 # Algorithm parameter K1 (small constant, see the SSIM paper)
K2: Final = 0.03 # Algorithm parameter K2 (small constant, see the SSIM paper)
# The side-length of the sliding window used in comparison (must be odd)
WIN_SIZE: Final = 7
SSIM_THRESH: Final = 0.9 # SSIM threshold for marking as duplicate
def dhash(img: np.ndarray, /, bits: int = 64) -> bytes:
"""Compute d-hash of an image.
Args:
img: The input image as a 3D RGB image
bits: The number of bits in the hash
Returns:
The d-hash of the image
"""
width = int(np.sqrt(bits))
if width ** 2 != bits:
raise ValueError("Number of bits must be a perfect square")
pillow = Image.fromarray(img)
# Resize image to `width` x `width` with an extra column.
# This extra column is added because the difference b/w two pixels is
# calculated along rows. This results in differences one less than the no.
# of columns in a row.
pillow = pillow.convert("L").resize((width + 1, width), Image.BICUBIC)
img = np.array(pillow)
# Binarize the difference b/w two row-adjacent pixels
diff = img[:, :-1] > img[:, 1:]
# Flatten and pack the bools into bytes, ie 8 bools become one uint8 byte
binary = np.packbits(diff)
# Get the binary hash as a bytes string
return binary.tobytes()
def _preprocess(
img: Image.Image, /, *, size: tuple[int, int] = SSIM_SIZE
) -> np.ndarray:
"""Pre-process the image for SSIM.
This converts into the following:
* Grayscale
* float64 in [0, 1]
Args:
img: The PIL image
size: The size to which the image should be resized
Returns:
The pre-processed float64 ndarray image
"""
img = img.convert("L") # grayscale
img = img.resize(SSIM_SIZE, resample=Image.BICUBIC)
return np.array(img) / 255.0
def _filter(img: np.ndarray, /) -> np.ndarray:
"""Apply a uniform filter to the image.
Args:
img: The float64 grayscale image in [0, 1]
Returns:
The filtered float64 ndarray image in [0, 1]
"""
grey = (img * 255).astype(np.uint8)
orig = Image.fromarray(grey)
filtered = orig.filter(BoxBlur(WIN_SIZE))
return np.array(filtered) / 255.0
def ssim(img1: Image.Image, img2: Image.Image, /) -> float:
"""Compute the mean structural similarity index between two images.
This code was adapted from the SSIM code in the scikit-image library:
https://github.com/scikit-image/scikit-image/blob/master/skimage/metrics/_structural_similarity.py
Arguments:
img1, arr2: The input images of the same shape
Returns:
The mean structural similarity index over the image.
"""
# Preprocess images for SSIM
arr1 = _preprocess(img1)
arr2 = _preprocess(img2)
# Filter is already normalized by NP
NP = WIN_SIZE ** 2
cov_norm = NP / (NP - 1) # sample covariance
# Compute (weighted) means
ux = _filter(arr1)
uy = _filter(arr2)
# Compute (weighted) variances and covariances
uxx = _filter(arr1 * arr1)
uyy = _filter(arr2 * arr2)
uxy = _filter(arr1 * arr2)
vx = cov_norm * (uxx - ux * ux)
vy = cov_norm * (uyy - uy * uy)
vxy = cov_norm * (uxy - ux * uy)
C1 = K1 ** 2
C2 = K2 ** 2
A1 = 2 * ux * uy + C1
A2 = 2 * vxy + C2
B1 = ux ** 2 + uy ** 2 + C1
B2 = vx + vy + C2
D = B1 * B2
S = (A1 * A2) / D
# To avoid edge effects will ignore filter radius strip around edges
pad = (WIN_SIZE - 1) // 2
# Compute (weighted) mean of ssim
mssim = S[pad:-pad, pad:-pad].mean()
return mssim | 0.925546 | 0.667588 |
from ..constants import *
from ..statics import *
import os, re
class Deleter:
pass
def __init__(self,name,scheme):
self.name = name
self.scheme = scheme
addr_path = self.scheme['locations']['address']+os.sep+self.name+ADDR_EXT
self.addr = open(addr_path,'rb+')
addr_trash_path = self.scheme['locations']['addr_trash']+os.sep+self.name+ADDR_TRASH_EXT
data_trash_path = self.scheme['locations']['data_trash']+os.sep+self.name+DATA_TRASH_EXT
if os.path.isfile(data_trash_path):
self.data_trash = open(data_trash_path,'rb+')
else:
self.data_trash = open(data_trash_path,'wb')
if os.path.isfile(addr_trash_path):
self.addr_trash = open(addr_trash_path,'rb+')
else:
self.addr_trash = open(addr_trash_path,'wb')
def delete_by_id(self,id):
max_cols = self.scheme['max_values']['max_cols']
max_regs = self.scheme['max_values']['max_regs']
max_pages = self.scheme['max_values']['max_pages']
max_page_size = self.scheme['max_values']['max_page_size']
self.addr.seek(0)
self.addr_trash.seek(0,2)
self.data_trash.seek(0,2)
k = self.addr.read(BUF)
if len(k) == BUF:
k = read_until(self.addr.read,FIELD)
where = self.addr.tell()
offset = 0
while k:
pos_field = k.find(FIELD,offset)
while pos_field > -1:
#pos_field address all fields
id_b = k[pos_field+len(FIELD)+max_cols:pos_field+len(FIELD)+max_cols+max_regs]
#9191 0 0 0 000 00 9494 9191
if from_bytes_e(id_b) == id:
end_field = k.find(FIELD_END,pos_field+len(FIELD))
sub_bytes = k[pos_field:end_field+len(FIELD_END)]
data_trash = {
'page':sub_bytes[len(FIELD)+max_cols+max_regs:len(FIELD)+max_cols+max_regs+max_pages],
'addr':sub_bytes[len(FIELD)+max_cols+max_regs+max_pages:len(FIELD)+max_cols+max_regs+max_pages+max_page_size],
'length':sub_bytes[len(FIELD)+max_cols+max_regs+max_pages+max_page_size:-len(FIELD_END)]
}
data_trash_dump = TRASH+data_trash['page']+data_trash['addr']+data_trash['length']+FIELD_END
trash_mark = TRASH+to_bytes_e(pos_field,0)+VALUE+to_bytes_e(len(sub_bytes),0)+FIELD_END
trash = FIELD
trash += to_bytes_e(2**(8*max_cols)-1,max_cols) ## 0xFF in a column
self.addr.seek(pos_field)
self.addr_trash.write(trash_mark)
self.data_trash.write(data_trash_dump)
# records the address and length of a deleted register in address file
#self.addr.seek(len(trash),1) #teste
self.addr.write(trash)
offset = pos_field+len(FIELD)
pos_field = k.find(FIELD,offset)
self.addr.seek(where)
k = self.addr.read(BUF)
where = self.addr.tell()
pass
def close_deleter(self):
self.addr_trash.seek(0,2)
self.addr_trash.write(TRASH)
self.addr_trash.close()
self.data_trash.seek(0,2)
self.data_trash.write(TRASH)
self.data_trash.close() | galaxydb/low_level/deleter.py | from ..constants import *
from ..statics import *
import os, re
class Deleter:
pass
def __init__(self,name,scheme):
self.name = name
self.scheme = scheme
addr_path = self.scheme['locations']['address']+os.sep+self.name+ADDR_EXT
self.addr = open(addr_path,'rb+')
addr_trash_path = self.scheme['locations']['addr_trash']+os.sep+self.name+ADDR_TRASH_EXT
data_trash_path = self.scheme['locations']['data_trash']+os.sep+self.name+DATA_TRASH_EXT
if os.path.isfile(data_trash_path):
self.data_trash = open(data_trash_path,'rb+')
else:
self.data_trash = open(data_trash_path,'wb')
if os.path.isfile(addr_trash_path):
self.addr_trash = open(addr_trash_path,'rb+')
else:
self.addr_trash = open(addr_trash_path,'wb')
def delete_by_id(self,id):
max_cols = self.scheme['max_values']['max_cols']
max_regs = self.scheme['max_values']['max_regs']
max_pages = self.scheme['max_values']['max_pages']
max_page_size = self.scheme['max_values']['max_page_size']
self.addr.seek(0)
self.addr_trash.seek(0,2)
self.data_trash.seek(0,2)
k = self.addr.read(BUF)
if len(k) == BUF:
k = read_until(self.addr.read,FIELD)
where = self.addr.tell()
offset = 0
while k:
pos_field = k.find(FIELD,offset)
while pos_field > -1:
#pos_field address all fields
id_b = k[pos_field+len(FIELD)+max_cols:pos_field+len(FIELD)+max_cols+max_regs]
#9191 0 0 0 000 00 9494 9191
if from_bytes_e(id_b) == id:
end_field = k.find(FIELD_END,pos_field+len(FIELD))
sub_bytes = k[pos_field:end_field+len(FIELD_END)]
data_trash = {
'page':sub_bytes[len(FIELD)+max_cols+max_regs:len(FIELD)+max_cols+max_regs+max_pages],
'addr':sub_bytes[len(FIELD)+max_cols+max_regs+max_pages:len(FIELD)+max_cols+max_regs+max_pages+max_page_size],
'length':sub_bytes[len(FIELD)+max_cols+max_regs+max_pages+max_page_size:-len(FIELD_END)]
}
data_trash_dump = TRASH+data_trash['page']+data_trash['addr']+data_trash['length']+FIELD_END
trash_mark = TRASH+to_bytes_e(pos_field,0)+VALUE+to_bytes_e(len(sub_bytes),0)+FIELD_END
trash = FIELD
trash += to_bytes_e(2**(8*max_cols)-1,max_cols) ## 0xFF in a column
self.addr.seek(pos_field)
self.addr_trash.write(trash_mark)
self.data_trash.write(data_trash_dump)
# records the address and length of a deleted register in address file
#self.addr.seek(len(trash),1) #teste
self.addr.write(trash)
offset = pos_field+len(FIELD)
pos_field = k.find(FIELD,offset)
self.addr.seek(where)
k = self.addr.read(BUF)
where = self.addr.tell()
pass
def close_deleter(self):
self.addr_trash.seek(0,2)
self.addr_trash.write(TRASH)
self.addr_trash.close()
self.data_trash.seek(0,2)
self.data_trash.write(TRASH)
self.data_trash.close() | 0.134378 | 0.07603 |
from decimal import Decimal
class Bookings:
"""Representing a collection of Booking objects
"""
def __init__(self, env, customers, pets, services):
self.bookings = {}
self.by_start_date = {}
self.env = env
self.loaded = False
self.customers = customers
self.pets = pets
self.services = services
def get(self, bk_no):
if bk_no in self.bookings:
return self.bookings[bk_no]
else:
return None
def get_by_start_date(self, start_date):
self.load()
if start_date in self.by_start_date:
return self.by_start_date[start_date]
return []
def load_by_sql(self, sql_booking, sql_bookingitem, sql_invitem,
sql_invextra, sql_payment):
cursor = self.env.get_cursor()
cursor.execute(sql_booking)
for row in cursor:
bk_no = row[0]
booking = Booking(bk_no)
cust_no = row[1]
booking.customer = self.customers.get(cust_no)
booking.create_date = row[2]
booking.start_date = row[3]
booking.end_date = row[4]
booking.gross_amt = row[5]
booking.paid_amt = row[6]
booking.status = row[7]
booking.peak = row[8]
booking.deluxe = row[9]
booking.skip = row[10]
booking.pickup = row[11]
self.bookings[bk_no] = booking
sdate = booking.start_date.date()
if booking.status == '' or booking.status == 'V':
if sdate in self.by_start_date:
self.by_start_date[sdate].append(booking)
else:
self.by_start_date[sdate] = [booking]
cursor.execute(sql_bookingitem)
for row in cursor:
booking = self.get(row[0])
pet = self.pets.get(row[1])
if not booking:
pass
else:
booking.pets.append(pet)
cursor.execute(sql_invitem)
for row in cursor:
booking = self.get(row[0])
if booking:
pet = self.pets.get(row[1])
service = self.services.get(row[2])
inv_item = InventoryItem(pet, service, row[3], row[4])
booking.inv_items.append(inv_item)
cursor.execute(sql_invextra)
for row in cursor:
booking = self.get(row[0])
if booking:
desc = row[1]
unit_price = row[2]
quantity = row[3]
extra_item = ExtraItem(desc, unit_price, quantity)
booking.extra_items.append(extra_item)
cursor.execute(sql_payment)
for row in cursor:
booking = self.get(row[0])
pay_date = row[1]
amount = row[2]
pay_type = row[3]
payment = Payment(pay_date, amount, pay_type)
booking.payments.append(payment)
def load(self, force=False):
if self.loaded and not force:
return
log.debug('Loading Bookings')
sql_booking = """
Select bk_no, bk_cust_no, bk_create_date, bk_start_datetime, bk_end_datetime,
bk_gross_amt, bk_paid_amt, bk_status, bk_peak, bk_deluxe, bk_skip_confirm,
bk_pickup_no from vwbooking"""
sql_bookingitem = """
Select bi_bk_no, bi_pet_no from vwbookingitem_simple"""
sql_invitem = """
Select ii_bk_no, ii_pet_no, ii_srv_no, ii_quantity, ii_rate from vwinvitem"""
sql_invextra = """
Select ie_bk_no, ie_desc, ie_unit_price, ie_quantity from vwinvextra"""
sql_payment = """
Select pay_bk_no, pay_date, pay_amount, pay_type from vwpayment_simple"""
self.load_by_sql(sql_booking, sql_bookingitem, sql_invitem, sql_invextra,
sql_payment)
log.debug(f'Loaded {len(self.bookings)} bookings')
self.loaded = True
def load_for_customer(self, cust_no):
if self.loaded:
return
log.debug(f'Loading Bookings for customer #{cust_no}')
sql_booking = f"""
Select bk_no, bk_cust_no, bk_create_date, bk_start_datetime, bk_end_datetime,
bk_gross_amt, bk_paid_amt, bk_status, bk_peak, bk_deluxe, bk_skip_confirm,
bk_pickup_no from vwbooking
where bk_cust_no = {cust_no}"""
sql_bookingitem = f"""
Select bi_bk_no, bi_pet_no
from vwbookingitem_simple
where bi_cust_no = {cust_no}"""
sql_invitem = f"""
Select ii_bk_no, ii_pet_no, ii_srv_no, ii_quantity, ii_rate
from vwinvitem where ii_cust_no = {cust_no}"""
sql_invextra = f"""
Select ie_bk_no, ie_desc, ie_unit_price, ie_quantity
from vwinvextra
where ie_cust_no = {cust_no}"""
sql_payment = f"""
Select pay_bk_no, pay_date, pay_amount, pay_type
from vwpayment_simple where pay_cust_no = {cust_no}"""
self.load_by_sql(sql_booking, sql_bookingitem, sql_invitem, sql_invextra,
sql_payment)
log.debug(f'Loaded bookings for customer #{cust_no}')
self.loaded = True
class Payment:
def __init__(self, pay_date, amount, pay_type):
self.pay_date = pay_date
self.amount = amount
self.type = pay_type
from .env import log
class ExtraItem:
def __init__(self, desc, unit_price, quantity):
self.desc = desc
self.unit_price = unit_price
self.quantity = quantity
class InventoryItem:
def __init__(self, pet, service, quantity, rate):
self.pet = pet
self.service = service
self.quantity = quantity
self.rate = rate
class Booking:
"""Representing a PetAdmin Booking"""
def __init__(self, bk_no):
self.no = bk_no
self.customer = None
self.pets = []
self.create_date = None
self.start_date = None
self.end_date = None
self.status = ''
self.skip = 0
self.gross_amt = Decimal("0.0")
self.paid_amt = Decimal("0.0")
self.inv_items = []
self.extra_items = []
self.payments = []
self.peak = 0
self.deluxe = 0
self.skip = 0
def pet_names(self):
if len(self.pets) == 1:
return self.pets[0].name
return ', '.join(map(lambda p: p.name, self.pets[0:-1])) + \
' and ' + self.pets[-1].name
def add_payment(self, payment):
self.payments.append(payment)
def outstanding_amt(self):
return self.gross_amt - self.paid_amt | booking.py | from decimal import Decimal
class Bookings:
"""Representing a collection of Booking objects
"""
def __init__(self, env, customers, pets, services):
self.bookings = {}
self.by_start_date = {}
self.env = env
self.loaded = False
self.customers = customers
self.pets = pets
self.services = services
def get(self, bk_no):
if bk_no in self.bookings:
return self.bookings[bk_no]
else:
return None
def get_by_start_date(self, start_date):
self.load()
if start_date in self.by_start_date:
return self.by_start_date[start_date]
return []
def load_by_sql(self, sql_booking, sql_bookingitem, sql_invitem,
sql_invextra, sql_payment):
cursor = self.env.get_cursor()
cursor.execute(sql_booking)
for row in cursor:
bk_no = row[0]
booking = Booking(bk_no)
cust_no = row[1]
booking.customer = self.customers.get(cust_no)
booking.create_date = row[2]
booking.start_date = row[3]
booking.end_date = row[4]
booking.gross_amt = row[5]
booking.paid_amt = row[6]
booking.status = row[7]
booking.peak = row[8]
booking.deluxe = row[9]
booking.skip = row[10]
booking.pickup = row[11]
self.bookings[bk_no] = booking
sdate = booking.start_date.date()
if booking.status == '' or booking.status == 'V':
if sdate in self.by_start_date:
self.by_start_date[sdate].append(booking)
else:
self.by_start_date[sdate] = [booking]
cursor.execute(sql_bookingitem)
for row in cursor:
booking = self.get(row[0])
pet = self.pets.get(row[1])
if not booking:
pass
else:
booking.pets.append(pet)
cursor.execute(sql_invitem)
for row in cursor:
booking = self.get(row[0])
if booking:
pet = self.pets.get(row[1])
service = self.services.get(row[2])
inv_item = InventoryItem(pet, service, row[3], row[4])
booking.inv_items.append(inv_item)
cursor.execute(sql_invextra)
for row in cursor:
booking = self.get(row[0])
if booking:
desc = row[1]
unit_price = row[2]
quantity = row[3]
extra_item = ExtraItem(desc, unit_price, quantity)
booking.extra_items.append(extra_item)
cursor.execute(sql_payment)
for row in cursor:
booking = self.get(row[0])
pay_date = row[1]
amount = row[2]
pay_type = row[3]
payment = Payment(pay_date, amount, pay_type)
booking.payments.append(payment)
def load(self, force=False):
if self.loaded and not force:
return
log.debug('Loading Bookings')
sql_booking = """
Select bk_no, bk_cust_no, bk_create_date, bk_start_datetime, bk_end_datetime,
bk_gross_amt, bk_paid_amt, bk_status, bk_peak, bk_deluxe, bk_skip_confirm,
bk_pickup_no from vwbooking"""
sql_bookingitem = """
Select bi_bk_no, bi_pet_no from vwbookingitem_simple"""
sql_invitem = """
Select ii_bk_no, ii_pet_no, ii_srv_no, ii_quantity, ii_rate from vwinvitem"""
sql_invextra = """
Select ie_bk_no, ie_desc, ie_unit_price, ie_quantity from vwinvextra"""
sql_payment = """
Select pay_bk_no, pay_date, pay_amount, pay_type from vwpayment_simple"""
self.load_by_sql(sql_booking, sql_bookingitem, sql_invitem, sql_invextra,
sql_payment)
log.debug(f'Loaded {len(self.bookings)} bookings')
self.loaded = True
def load_for_customer(self, cust_no):
if self.loaded:
return
log.debug(f'Loading Bookings for customer #{cust_no}')
sql_booking = f"""
Select bk_no, bk_cust_no, bk_create_date, bk_start_datetime, bk_end_datetime,
bk_gross_amt, bk_paid_amt, bk_status, bk_peak, bk_deluxe, bk_skip_confirm,
bk_pickup_no from vwbooking
where bk_cust_no = {cust_no}"""
sql_bookingitem = f"""
Select bi_bk_no, bi_pet_no
from vwbookingitem_simple
where bi_cust_no = {cust_no}"""
sql_invitem = f"""
Select ii_bk_no, ii_pet_no, ii_srv_no, ii_quantity, ii_rate
from vwinvitem where ii_cust_no = {cust_no}"""
sql_invextra = f"""
Select ie_bk_no, ie_desc, ie_unit_price, ie_quantity
from vwinvextra
where ie_cust_no = {cust_no}"""
sql_payment = f"""
Select pay_bk_no, pay_date, pay_amount, pay_type
from vwpayment_simple where pay_cust_no = {cust_no}"""
self.load_by_sql(sql_booking, sql_bookingitem, sql_invitem, sql_invextra,
sql_payment)
log.debug(f'Loaded bookings for customer #{cust_no}')
self.loaded = True
class Payment:
def __init__(self, pay_date, amount, pay_type):
self.pay_date = pay_date
self.amount = amount
self.type = pay_type
from .env import log
class ExtraItem:
def __init__(self, desc, unit_price, quantity):
self.desc = desc
self.unit_price = unit_price
self.quantity = quantity
class InventoryItem:
def __init__(self, pet, service, quantity, rate):
self.pet = pet
self.service = service
self.quantity = quantity
self.rate = rate
class Booking:
"""Representing a PetAdmin Booking"""
def __init__(self, bk_no):
self.no = bk_no
self.customer = None
self.pets = []
self.create_date = None
self.start_date = None
self.end_date = None
self.status = ''
self.skip = 0
self.gross_amt = Decimal("0.0")
self.paid_amt = Decimal("0.0")
self.inv_items = []
self.extra_items = []
self.payments = []
self.peak = 0
self.deluxe = 0
self.skip = 0
def pet_names(self):
if len(self.pets) == 1:
return self.pets[0].name
return ', '.join(map(lambda p: p.name, self.pets[0:-1])) + \
' and ' + self.pets[-1].name
def add_payment(self, payment):
self.payments.append(payment)
def outstanding_amt(self):
return self.gross_amt - self.paid_amt | 0.471953 | 0.217213 |
import os
from os.path import join
import json
from collections import Counter
def get_counter(dirpath, tag):
dirname = os.path.basename(dirpath)
ann_dirpath = join(dirpath, 'ann')
letters = ''
lens = []
for filename in os.listdir(ann_dirpath):
json_filepath = join(ann_dirpath, filename)
ann = json.load(open(json_filepath, 'r'))
tags = ann['tags']
if tag in tags:
description = ann['description']
lens.append(len(description))
letters += description
print('Max plate length in "%s":' % dirname, max(Counter(lens).keys()))
return Counter(letters)
def labels_to_text(labels):
return ''.join(list(map(lambda x: letters[int(x)], labels)))
def text_to_labels(text):
return list(map(lambda x: letters.index(x), text))
def is_valid_str(s):
for ch in s:
if not ch in letters:
return False
return True
c_val = get_counter('d:/git/supervisely-tutorials/ocrImpl/data/anpr_ocr__train', 'val')
c_train = get_counter('d:/git/supervisely-tutorials/ocrImpl/data/anpr_ocr__train', 'train')
letters_train = set(c_train.keys())
letters_val = set(c_val.keys())
if letters_train == letters_val:
print('Letters in train and val do match')
else:
raise Exception()
# print(len(letters_train), len(letters_val), len(letters_val | letters_train))
letters = sorted(list(letters_train))
print('Letters:', ' '.join(letters))
class TextImageGenerator:
def __init__(self,
dirpath,
tag,
img_w, img_h,
batch_size,
downsample_factor,
max_text_len=8):
self.img_h = img_h
self.img_w = img_w
self.batch_size = batch_size
self.max_text_len = max_text_len
self.downsample_factor = downsample_factor
img_dirpath = join(dirpath, 'img')
ann_dirpath = join(dirpath, 'ann')
self.samples = []
for filename in os.listdir(img_dirpath):
name, ext = os.path.splitext(filename)
if ext in ['.png', '.jpg']:
img_filepath = join(img_dirpath, filename)
json_filepath = join(ann_dirpath, name + '.json')
ann = json.load(open(json_filepath, 'r'))
description = ann['description']
tags = ann['tags']
if tag not in tags:
continue
if is_valid_str(description):
self.samples.append([img_filepath, description])
self.n = len(self.samples)
self.indexes = list(range(self.n))
self.cur_index = 0
def build_data(self):
self.imgs = np.zeros((self.n, self.img_h, self.img_w))
self.texts = []
for i, (img_filepath, text) in enumerate(self.samples):
img = cv2.imread(img_filepath)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, (self.img_w, self.img_h))
img = img.astype(np.float32)
img /= 255
# width and height are backwards from typical Keras convention
# because width is the time dimension when it gets fed into the RNN
self.imgs[i, :, :] = img
self.texts.append(text)
def get_output_size(self):
return len(letters) + 1
def next_sample(self):
self.cur_index += 1
if self.cur_index >= self.n:
self.cur_index = 0
random.shuffle(self.indexes)
return self.imgs[self.indexes[self.cur_index]], self.texts[self.indexes[self.cur_index]]
def next_batch(self):
while True:
# width and height are backwards from typical Keras convention
# because width is the time dimension when it gets fed into the RNN
if K.image_data_format() == 'channels_first':
X_data = np.ones([self.batch_size, 1, self.img_w, self.img_h])
else:
X_data = np.ones([self.batch_size, self.img_w, self.img_h, 1])
Y_data = np.ones([self.batch_size, self.max_text_len])
input_length = np.ones((self.batch_size, 1)) * (self.img_w // self.downsample_factor - 2)
label_length = np.zeros((self.batch_size, 1))
source_str = []
for i in range(self.batch_size):
img, text = self.next_sample()
img = img.T
if K.image_data_format() == 'channels_first':
img = np.expand_dims(img, 0)
else:
img = np.expand_dims(img, -1)
X_data[i] = img
Y_data[i] = text_to_labels(text)
source_str.append(text)
label_length[i] = len(text)
inputs = {
'the_input': X_data,
'the_labels': Y_data,
'input_length': input_length,
'label_length': label_length,
# 'source_str': source_str
}
outputs = {'ctc': np.zeros([self.batch_size])}
yield (inputs, outputs) | ocrImpl/TextImageGenerator.py | import os
from os.path import join
import json
from collections import Counter
def get_counter(dirpath, tag):
dirname = os.path.basename(dirpath)
ann_dirpath = join(dirpath, 'ann')
letters = ''
lens = []
for filename in os.listdir(ann_dirpath):
json_filepath = join(ann_dirpath, filename)
ann = json.load(open(json_filepath, 'r'))
tags = ann['tags']
if tag in tags:
description = ann['description']
lens.append(len(description))
letters += description
print('Max plate length in "%s":' % dirname, max(Counter(lens).keys()))
return Counter(letters)
def labels_to_text(labels):
return ''.join(list(map(lambda x: letters[int(x)], labels)))
def text_to_labels(text):
return list(map(lambda x: letters.index(x), text))
def is_valid_str(s):
for ch in s:
if not ch in letters:
return False
return True
c_val = get_counter('d:/git/supervisely-tutorials/ocrImpl/data/anpr_ocr__train', 'val')
c_train = get_counter('d:/git/supervisely-tutorials/ocrImpl/data/anpr_ocr__train', 'train')
letters_train = set(c_train.keys())
letters_val = set(c_val.keys())
if letters_train == letters_val:
print('Letters in train and val do match')
else:
raise Exception()
# print(len(letters_train), len(letters_val), len(letters_val | letters_train))
letters = sorted(list(letters_train))
print('Letters:', ' '.join(letters))
class TextImageGenerator:
def __init__(self,
dirpath,
tag,
img_w, img_h,
batch_size,
downsample_factor,
max_text_len=8):
self.img_h = img_h
self.img_w = img_w
self.batch_size = batch_size
self.max_text_len = max_text_len
self.downsample_factor = downsample_factor
img_dirpath = join(dirpath, 'img')
ann_dirpath = join(dirpath, 'ann')
self.samples = []
for filename in os.listdir(img_dirpath):
name, ext = os.path.splitext(filename)
if ext in ['.png', '.jpg']:
img_filepath = join(img_dirpath, filename)
json_filepath = join(ann_dirpath, name + '.json')
ann = json.load(open(json_filepath, 'r'))
description = ann['description']
tags = ann['tags']
if tag not in tags:
continue
if is_valid_str(description):
self.samples.append([img_filepath, description])
self.n = len(self.samples)
self.indexes = list(range(self.n))
self.cur_index = 0
def build_data(self):
self.imgs = np.zeros((self.n, self.img_h, self.img_w))
self.texts = []
for i, (img_filepath, text) in enumerate(self.samples):
img = cv2.imread(img_filepath)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, (self.img_w, self.img_h))
img = img.astype(np.float32)
img /= 255
# width and height are backwards from typical Keras convention
# because width is the time dimension when it gets fed into the RNN
self.imgs[i, :, :] = img
self.texts.append(text)
def get_output_size(self):
return len(letters) + 1
def next_sample(self):
self.cur_index += 1
if self.cur_index >= self.n:
self.cur_index = 0
random.shuffle(self.indexes)
return self.imgs[self.indexes[self.cur_index]], self.texts[self.indexes[self.cur_index]]
def next_batch(self):
while True:
# width and height are backwards from typical Keras convention
# because width is the time dimension when it gets fed into the RNN
if K.image_data_format() == 'channels_first':
X_data = np.ones([self.batch_size, 1, self.img_w, self.img_h])
else:
X_data = np.ones([self.batch_size, self.img_w, self.img_h, 1])
Y_data = np.ones([self.batch_size, self.max_text_len])
input_length = np.ones((self.batch_size, 1)) * (self.img_w // self.downsample_factor - 2)
label_length = np.zeros((self.batch_size, 1))
source_str = []
for i in range(self.batch_size):
img, text = self.next_sample()
img = img.T
if K.image_data_format() == 'channels_first':
img = np.expand_dims(img, 0)
else:
img = np.expand_dims(img, -1)
X_data[i] = img
Y_data[i] = text_to_labels(text)
source_str.append(text)
label_length[i] = len(text)
inputs = {
'the_input': X_data,
'the_labels': Y_data,
'input_length': input_length,
'label_length': label_length,
# 'source_str': source_str
}
outputs = {'ctc': np.zeros([self.batch_size])}
yield (inputs, outputs) | 0.19521 | 0.143908 |
import os
import time
import argparse
import torch
from torch import nn
import torch.optim as optim
import torch.nn.functional as F
from torchvision import utils
import matplotlib.pyplot as plt
from utils.datasets import create_dataloader
from utils.util import parse_cfg
from models import build_model
from torchviz import make_dot
def train(model, train_loader, optimizer, epoch, device, train_loss_lst, train_acc_lst):
model.train() # Set the module in training mode
train_loss = 0
correct = 0
for batch_idx, (inputs, labels) in enumerate(train_loader):
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
# foward propagation
outputs = model(inputs)
pred = outputs.max(1, keepdim=True)[1]
correct += pred.eq(labels.view_as(pred)).sum().item()
# back propagation
criterion = nn.CrossEntropyLoss()
loss = criterion(outputs, labels)
train_loss += loss.item()
# loss = F.nll_loss(outputs, labels) # negative log likelihood loss
loss.backward()
optimizer.step()
# show batch0 dataset
if batch_idx == 0 and epoch == 0:
fig = plt.figure()
inputs = inputs.cpu() # convert to cpu
grid = utils.make_grid(inputs)
plt.imshow(grid.numpy().transpose((1, 2, 0)))
plt.show()
# print loss and accuracy
if (batch_idx+1) % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.1f}%)] Loss: {:.6f}'
.format(epoch, batch_idx * len(inputs), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
train_loss /= len(train_loader.dataset)
# record loss and acc
train_loss_lst.append(train_loss)
train_acc_lst.append(correct / len(train_loader.dataset))
return train_loss_lst, train_acc_lst
def validate(model, val_loader, device, val_loss_lst, val_acc_lst):
model.eval() # Sets the module in evaluation mode
val_loss = 0
correct = 0
# no need to calculate gradients
with torch.no_grad():
for data, target in val_loader:
data, target = data.to(device), target.to(device)
output = model(data)
# add one batch loss
criterion = nn.CrossEntropyLoss()
val_loss += criterion(output, target).item()
# val_loss += F.nll_loss(output, target, reduction='sum').item()
# find index of max prob
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
val_loss /= len(val_loader.dataset)
print('\nVal set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'
.format(val_loss, correct, len(val_loader.dataset),
100. * correct / len(val_loader.dataset)))
# record loss and acc
val_loss_lst.append(val_loss)
val_acc_lst.append(correct / len(val_loader.dataset))
return val_loss_lst, val_acc_lst
def test(model, test_loader, device):
model.eval() # Sets the module in evaluation mode
test_loss = 0
correct = 0
# no need to calculate gradients
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
# add one batch loss
criterion = nn.CrossEntropyLoss()
test_loss += criterion(output, target).item()
# test_loss += F.nll_loss(output, target, reduction='sum').item()
# find index of max prob
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
# record loss and acc
test_loss /= len(test_loader.dataset)
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'
.format(test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def arg_parse():
"""
Parse arguements to the detect module
"""
parser = argparse.ArgumentParser(description='Food Recognition System')
parser.add_argument("--cfg", "-c", dest='cfg', default="cfg/frs.cfg",
help="Your config file path", type=str)
parser.add_argument("--weights", "-w", dest='weights', default="",
help="Path of pretrained weights", type=str)
parser.add_argument("--output", "-o", dest='output', default="output",
help="Path of output files", type=str)
parser.add_argument("--epochs", "-e", dest='epochs', default=200,
help="Training epochs", type=int)
parser.add_argument("--lr", "-lr", dest='lr', default=0.005,
help="Training learning rate", type=float)
parser.add_argument("--batch_size", "-b", dest='batch_size', default=32,
help="Training batch size", type=int)
parser.add_argument("--input_size", "-i", dest='input_size', default=224,
help="Image input size", type=int)
parser.add_argument("--save_freq", "-s", dest='save_freq', default=10,
help="Frequency of saving model", type=int)
return parser.parse_args()
if __name__ == "__main__":
args = arg_parse()
weight_path, cfg_path, output_path = args.weights, args.cfg, args.output
epochs, lr, batch_size, input_size, save_freq = args.epochs, args.lr, args.batch_size, args.input_size, args.save_freq
# load configs from config
cfg = parse_cfg(cfg_path)
print('Config:', cfg)
dataset_path, nb_class = cfg['dataset'], int(cfg['nb_class'])
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# load datasets
train_loader, val_loader, test_loader = create_dataloader(
'IMAGE_FOLDER', dataset_path, batch_size, input_size)
# load model
model = build_model(weight_path, cfg).to(device)
print('Model successfully loaded!')
# plot model structure
# graph = make_dot(model(torch.rand(1, 3, input_size, input_size).cuda()),
# params=dict(model.named_parameters()))
# graph.render('model_structure', './', cleanup=True, format='png')
# optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
optimizer = optim.Adam(model.parameters(), lr=lr)
# create output file folder
start = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
os.makedirs(os.path.join(output_path, start))
# loss and accuracy list
train_loss_lst, val_loss_lst = [], []
train_acc_lst, val_acc_lst = [], []
# train
for epoch in range(epochs):
train_loss_lst, train_acc_lst = train(model, train_loader, optimizer,
epoch, device, train_loss_lst, train_acc_lst)
val_loss_lst, val_acc_lst = validate(
model, val_loader, device, val_loss_lst, val_acc_lst)
# save model weights every save_freq epoch
if epoch % save_freq == 0:
torch.save(model.state_dict(), os.path.join(
output_path, start, 'epoch'+str(epoch)+'.pth'))
test(model, test_loader, device)
# plot loss and accuracy, save params change
fig = plt.figure()
plt.plot(range(epochs), train_loss_lst, 'g', label='train loss')
plt.plot(range(epochs), val_loss_lst, 'k', label='val loss')
plt.plot(range(epochs), train_acc_lst, 'r', label='train acc')
plt.plot(range(epochs), val_acc_lst, 'b', label='val acc')
plt.grid(True)
plt.xlabel('epoch')
plt.ylabel('acc-loss')
plt.legend(loc="upper right")
now = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
plt.savefig(os.path.join(output_path, start, now + '.jpg'))
plt.show()
# save model
torch.save(model.state_dict(), os.path.join(output_path, start, 'last.pth')) | train.py | import os
import time
import argparse
import torch
from torch import nn
import torch.optim as optim
import torch.nn.functional as F
from torchvision import utils
import matplotlib.pyplot as plt
from utils.datasets import create_dataloader
from utils.util import parse_cfg
from models import build_model
from torchviz import make_dot
def train(model, train_loader, optimizer, epoch, device, train_loss_lst, train_acc_lst):
model.train() # Set the module in training mode
train_loss = 0
correct = 0
for batch_idx, (inputs, labels) in enumerate(train_loader):
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
# foward propagation
outputs = model(inputs)
pred = outputs.max(1, keepdim=True)[1]
correct += pred.eq(labels.view_as(pred)).sum().item()
# back propagation
criterion = nn.CrossEntropyLoss()
loss = criterion(outputs, labels)
train_loss += loss.item()
# loss = F.nll_loss(outputs, labels) # negative log likelihood loss
loss.backward()
optimizer.step()
# show batch0 dataset
if batch_idx == 0 and epoch == 0:
fig = plt.figure()
inputs = inputs.cpu() # convert to cpu
grid = utils.make_grid(inputs)
plt.imshow(grid.numpy().transpose((1, 2, 0)))
plt.show()
# print loss and accuracy
if (batch_idx+1) % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.1f}%)] Loss: {:.6f}'
.format(epoch, batch_idx * len(inputs), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
train_loss /= len(train_loader.dataset)
# record loss and acc
train_loss_lst.append(train_loss)
train_acc_lst.append(correct / len(train_loader.dataset))
return train_loss_lst, train_acc_lst
def validate(model, val_loader, device, val_loss_lst, val_acc_lst):
model.eval() # Sets the module in evaluation mode
val_loss = 0
correct = 0
# no need to calculate gradients
with torch.no_grad():
for data, target in val_loader:
data, target = data.to(device), target.to(device)
output = model(data)
# add one batch loss
criterion = nn.CrossEntropyLoss()
val_loss += criterion(output, target).item()
# val_loss += F.nll_loss(output, target, reduction='sum').item()
# find index of max prob
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
val_loss /= len(val_loader.dataset)
print('\nVal set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'
.format(val_loss, correct, len(val_loader.dataset),
100. * correct / len(val_loader.dataset)))
# record loss and acc
val_loss_lst.append(val_loss)
val_acc_lst.append(correct / len(val_loader.dataset))
return val_loss_lst, val_acc_lst
def test(model, test_loader, device):
model.eval() # Sets the module in evaluation mode
test_loss = 0
correct = 0
# no need to calculate gradients
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
# add one batch loss
criterion = nn.CrossEntropyLoss()
test_loss += criterion(output, target).item()
# test_loss += F.nll_loss(output, target, reduction='sum').item()
# find index of max prob
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
# record loss and acc
test_loss /= len(test_loader.dataset)
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'
.format(test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def arg_parse():
"""
Parse arguements to the detect module
"""
parser = argparse.ArgumentParser(description='Food Recognition System')
parser.add_argument("--cfg", "-c", dest='cfg', default="cfg/frs.cfg",
help="Your config file path", type=str)
parser.add_argument("--weights", "-w", dest='weights', default="",
help="Path of pretrained weights", type=str)
parser.add_argument("--output", "-o", dest='output', default="output",
help="Path of output files", type=str)
parser.add_argument("--epochs", "-e", dest='epochs', default=200,
help="Training epochs", type=int)
parser.add_argument("--lr", "-lr", dest='lr', default=0.005,
help="Training learning rate", type=float)
parser.add_argument("--batch_size", "-b", dest='batch_size', default=32,
help="Training batch size", type=int)
parser.add_argument("--input_size", "-i", dest='input_size', default=224,
help="Image input size", type=int)
parser.add_argument("--save_freq", "-s", dest='save_freq', default=10,
help="Frequency of saving model", type=int)
return parser.parse_args()
if __name__ == "__main__":
args = arg_parse()
weight_path, cfg_path, output_path = args.weights, args.cfg, args.output
epochs, lr, batch_size, input_size, save_freq = args.epochs, args.lr, args.batch_size, args.input_size, args.save_freq
# load configs from config
cfg = parse_cfg(cfg_path)
print('Config:', cfg)
dataset_path, nb_class = cfg['dataset'], int(cfg['nb_class'])
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# load datasets
train_loader, val_loader, test_loader = create_dataloader(
'IMAGE_FOLDER', dataset_path, batch_size, input_size)
# load model
model = build_model(weight_path, cfg).to(device)
print('Model successfully loaded!')
# plot model structure
# graph = make_dot(model(torch.rand(1, 3, input_size, input_size).cuda()),
# params=dict(model.named_parameters()))
# graph.render('model_structure', './', cleanup=True, format='png')
# optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
optimizer = optim.Adam(model.parameters(), lr=lr)
# create output file folder
start = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
os.makedirs(os.path.join(output_path, start))
# loss and accuracy list
train_loss_lst, val_loss_lst = [], []
train_acc_lst, val_acc_lst = [], []
# train
for epoch in range(epochs):
train_loss_lst, train_acc_lst = train(model, train_loader, optimizer,
epoch, device, train_loss_lst, train_acc_lst)
val_loss_lst, val_acc_lst = validate(
model, val_loader, device, val_loss_lst, val_acc_lst)
# save model weights every save_freq epoch
if epoch % save_freq == 0:
torch.save(model.state_dict(), os.path.join(
output_path, start, 'epoch'+str(epoch)+'.pth'))
test(model, test_loader, device)
# plot loss and accuracy, save params change
fig = plt.figure()
plt.plot(range(epochs), train_loss_lst, 'g', label='train loss')
plt.plot(range(epochs), val_loss_lst, 'k', label='val loss')
plt.plot(range(epochs), train_acc_lst, 'r', label='train acc')
plt.plot(range(epochs), val_acc_lst, 'b', label='val acc')
plt.grid(True)
plt.xlabel('epoch')
plt.ylabel('acc-loss')
plt.legend(loc="upper right")
now = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
plt.savefig(os.path.join(output_path, start, now + '.jpg'))
plt.show()
# save model
torch.save(model.state_dict(), os.path.join(output_path, start, 'last.pth')) | 0.786664 | 0.488466 |
import csv
from scrapy.spider import Spider
from scrapy.http import Request
import os
from itertools import islice
from onderwijsscrapers.items import DANSVoBranch
def float_or_none(string):
try:
return float(string.replace(',','.'))
except Exception:
return None
class DANSVoBranchesSpider(Spider):
name = 'dans_vo_branches'
def __init__(self, root_path=''):
self.root_path = root_path
def start_requests(self):
return [
Request(
'https://easy.dans.knaw.nl/ui/datasets/id/easy-dataset:57879',
self.parse_all
)
]
def parse_all(self, response):
"""
Uses information from `bestandsbeschrijving.doc`
"""
# Doesn't use `response`
# The .cvs files, and .fields.cvs files
csvs = [
'achtergrond12',
'exvk12',
'naw13',
'ok2013',
'verschilsece1012',
'adv12',
'idu12',
'oordeel1113',
'exvc12',
'lwoo12',
]
# Data format definitions
formats = {
'F1' : int,
'F2' : int,
'F3' : int,
'F4' : int,
'F5' : int,
'F8.2' : lambda s: float(s.replace(',','.')),
}
# Values coded with integer identifiers, from .values.csv files
# header: value;description
coded_values = [
'opbrengst',
'regnr',
'opl',
'soortlln',
'vak',
'leerweg',
]
coded_values = {
cname : {
int(row.pop('value')) : row.pop('description')
for row in csv.DictReader(file(os.path.join(self.root_path, '%s.values.csv' % cname)), delimiter=';')
} for cname in coded_values
}
# Index all datasets per branch
per_school = {}
for fname in csvs:
table = csv.DictReader(file(os.path.join(self.root_path, '%s.csv' % fname)), delimiter=';')
# .fields.csv files have header `field;description;format;key`
fields = csv.DictReader(file(os.path.join(self.root_path, '%s.fields.csv' % fname)), delimiter=';')
fields = { f.pop('field').lower() : f for f in fields }
keys = [n for n,f in fields.items() if f['key']]
# Every row has a brin and branch id
for t in table:
branch = ( t.pop('brin'), int(t.pop('vestnr')) )
if branch not in per_school:
per_school[branch] = {}
if fname not in per_school[branch]:
per_school[branch][fname] = []
# take the key fields seperately
per_key = { fname: {} }
for k,v in t.items():
k = k.lower()
v = unicode(v.strip().decode('cp1252'))
if v:
# Typecast this variable given the format
if fields[k]['format'] in formats:
v = formats[fields[k]['format']](v)
# Uncode this variable given the var name
var_code = k.split('_')[0]
if var_code in coded_values:
if v in coded_values[var_code]:
v = coded_values[var_code][v]
else:
print v, 'not in ', var_code, 'for', fname
# Add the keyed variables directly
if fields[k]['key']:
per_key[k] = v
else:
per_key[fname][k] = v
# If it's just per branch, add it directly
if len(keys) > 2:
per_school[branch][fname].append(per_key)
else:
per_school[branch][fname] = per_key[fname]
for (brin, branch_id), per_school in per_school.iteritems():
school = DANSVoBranch(brin=brin, branch_id=branch_id)
for fname in per_school:
school[fname] = per_school[fname]
yield school | onderwijsscrapers/onderwijsscrapers/spiders/dans.py | import csv
from scrapy.spider import Spider
from scrapy.http import Request
import os
from itertools import islice
from onderwijsscrapers.items import DANSVoBranch
def float_or_none(string):
try:
return float(string.replace(',','.'))
except Exception:
return None
class DANSVoBranchesSpider(Spider):
name = 'dans_vo_branches'
def __init__(self, root_path=''):
self.root_path = root_path
def start_requests(self):
return [
Request(
'https://easy.dans.knaw.nl/ui/datasets/id/easy-dataset:57879',
self.parse_all
)
]
def parse_all(self, response):
"""
Uses information from `bestandsbeschrijving.doc`
"""
# Doesn't use `response`
# The .cvs files, and .fields.cvs files
csvs = [
'achtergrond12',
'exvk12',
'naw13',
'ok2013',
'verschilsece1012',
'adv12',
'idu12',
'oordeel1113',
'exvc12',
'lwoo12',
]
# Data format definitions
formats = {
'F1' : int,
'F2' : int,
'F3' : int,
'F4' : int,
'F5' : int,
'F8.2' : lambda s: float(s.replace(',','.')),
}
# Values coded with integer identifiers, from .values.csv files
# header: value;description
coded_values = [
'opbrengst',
'regnr',
'opl',
'soortlln',
'vak',
'leerweg',
]
coded_values = {
cname : {
int(row.pop('value')) : row.pop('description')
for row in csv.DictReader(file(os.path.join(self.root_path, '%s.values.csv' % cname)), delimiter=';')
} for cname in coded_values
}
# Index all datasets per branch
per_school = {}
for fname in csvs:
table = csv.DictReader(file(os.path.join(self.root_path, '%s.csv' % fname)), delimiter=';')
# .fields.csv files have header `field;description;format;key`
fields = csv.DictReader(file(os.path.join(self.root_path, '%s.fields.csv' % fname)), delimiter=';')
fields = { f.pop('field').lower() : f for f in fields }
keys = [n for n,f in fields.items() if f['key']]
# Every row has a brin and branch id
for t in table:
branch = ( t.pop('brin'), int(t.pop('vestnr')) )
if branch not in per_school:
per_school[branch] = {}
if fname not in per_school[branch]:
per_school[branch][fname] = []
# take the key fields seperately
per_key = { fname: {} }
for k,v in t.items():
k = k.lower()
v = unicode(v.strip().decode('cp1252'))
if v:
# Typecast this variable given the format
if fields[k]['format'] in formats:
v = formats[fields[k]['format']](v)
# Uncode this variable given the var name
var_code = k.split('_')[0]
if var_code in coded_values:
if v in coded_values[var_code]:
v = coded_values[var_code][v]
else:
print v, 'not in ', var_code, 'for', fname
# Add the keyed variables directly
if fields[k]['key']:
per_key[k] = v
else:
per_key[fname][k] = v
# If it's just per branch, add it directly
if len(keys) > 2:
per_school[branch][fname].append(per_key)
else:
per_school[branch][fname] = per_key[fname]
for (brin, branch_id), per_school in per_school.iteritems():
school = DANSVoBranch(brin=brin, branch_id=branch_id)
for fname in per_school:
school[fname] = per_school[fname]
yield school | 0.435902 | 0.220217 |
import model
def test_calculated_delta_values():
deltas = model.deltas_state.from_year(1999)
deltas = deltas.update_gross_salary(30000)
deltas = deltas.update_tax(19000)
deltas = deltas.update_tax_refund(700)
deltas = deltas.update_spending(60)
assert 11700 == deltas.total_net_income
assert 11640 == deltas.undifferentiated_savings
def test_get_updated_funds_from_deltas():
year = 2040
previous_funds = model.funds_state(1200, 1010, year)
deltas = model.deltas_state.from_year(year + 1)
deltas = deltas.update_rrsp(400)
deltas = deltas.update_tfsa(333)
new_funds = model.get_updated_funds_from_deltas(previous_funds, deltas)
assert new_funds.rrsp_savings == 1600
assert new_funds.tfsa_savings == 1343
assert new_funds.year == 2041
def test_get_updated_deltas_from_rules():
def set_rrsp_rule(deltas: model.deltas_state, previous_funds: model.funds_state, previous_deltas: model.deltas_state):
return deltas.update_rrsp(320)
def set_gross_salary_rule(deltas: model.deltas_state, previous_funds: model.funds_state, previous_deltas: model.deltas_state):
return deltas.update_gross_salary(109)
def double_rrsp_rule(deltas: model.deltas_state, previous_funds: model.funds_state, previous_deltas: model.deltas_state):
return deltas.update_rrsp(deltas.rrsp * 2)
year = 1999
new_deltas = model.get_updated_deltas_from_rules(model.funds_state(
0, 0, year), model.deltas_state.from_year(year), [set_rrsp_rule, set_gross_salary_rule, double_rrsp_rule])
assert new_deltas.year == 2000
assert new_deltas.gross_salary == 109
assert new_deltas.rrsp == 640
def test_couple_rule_from_single_rule():
def set_gross_salary_rule_partner_1(deltas: model.deltas_state, previous_funds: model.funds_state, previous_deltas: model.deltas_state):
return deltas.update_gross_salary(previous_deltas.gross_salary + 20)
def set_gross_salary_rule_partner_2(deltas: model.deltas_state, previous_funds: model.funds_state, previous_deltas: model.deltas_state):
return deltas.update_gross_salary(previous_deltas.gross_salary + 18)
previous_deltas = model.couple_deltas_state.from_year(1980)
previous_deltas = previous_deltas.update_partner1_deltas(previous_deltas.partner1_deltas.update_gross_salary(14))
previous_deltas = previous_deltas.update_partner2_deltas(previous_deltas.partner2_deltas.update_gross_salary(7))
assert previous_deltas.partner1_deltas.gross_salary == 14
assert previous_deltas.partner2_deltas.gross_salary == 7
rules = [
model.get_couple_rule_from_single_rule(set_gross_salary_rule_partner_1, 1),
model.get_couple_rule_from_single_rule(set_gross_salary_rule_partner_2, 2),
]
deltas = model.get_updated_couple_deltas_from_rules(model.couple_funds_state.from_savings(0,0,0,1980), previous_deltas, rules)
assert deltas.year == 1981
assert deltas.partner1_deltas.gross_salary == 34
assert deltas.partner2_deltas.gross_salary == 25 | tests/test_model.py | import model
def test_calculated_delta_values():
deltas = model.deltas_state.from_year(1999)
deltas = deltas.update_gross_salary(30000)
deltas = deltas.update_tax(19000)
deltas = deltas.update_tax_refund(700)
deltas = deltas.update_spending(60)
assert 11700 == deltas.total_net_income
assert 11640 == deltas.undifferentiated_savings
def test_get_updated_funds_from_deltas():
year = 2040
previous_funds = model.funds_state(1200, 1010, year)
deltas = model.deltas_state.from_year(year + 1)
deltas = deltas.update_rrsp(400)
deltas = deltas.update_tfsa(333)
new_funds = model.get_updated_funds_from_deltas(previous_funds, deltas)
assert new_funds.rrsp_savings == 1600
assert new_funds.tfsa_savings == 1343
assert new_funds.year == 2041
def test_get_updated_deltas_from_rules():
def set_rrsp_rule(deltas: model.deltas_state, previous_funds: model.funds_state, previous_deltas: model.deltas_state):
return deltas.update_rrsp(320)
def set_gross_salary_rule(deltas: model.deltas_state, previous_funds: model.funds_state, previous_deltas: model.deltas_state):
return deltas.update_gross_salary(109)
def double_rrsp_rule(deltas: model.deltas_state, previous_funds: model.funds_state, previous_deltas: model.deltas_state):
return deltas.update_rrsp(deltas.rrsp * 2)
year = 1999
new_deltas = model.get_updated_deltas_from_rules(model.funds_state(
0, 0, year), model.deltas_state.from_year(year), [set_rrsp_rule, set_gross_salary_rule, double_rrsp_rule])
assert new_deltas.year == 2000
assert new_deltas.gross_salary == 109
assert new_deltas.rrsp == 640
def test_couple_rule_from_single_rule():
def set_gross_salary_rule_partner_1(deltas: model.deltas_state, previous_funds: model.funds_state, previous_deltas: model.deltas_state):
return deltas.update_gross_salary(previous_deltas.gross_salary + 20)
def set_gross_salary_rule_partner_2(deltas: model.deltas_state, previous_funds: model.funds_state, previous_deltas: model.deltas_state):
return deltas.update_gross_salary(previous_deltas.gross_salary + 18)
previous_deltas = model.couple_deltas_state.from_year(1980)
previous_deltas = previous_deltas.update_partner1_deltas(previous_deltas.partner1_deltas.update_gross_salary(14))
previous_deltas = previous_deltas.update_partner2_deltas(previous_deltas.partner2_deltas.update_gross_salary(7))
assert previous_deltas.partner1_deltas.gross_salary == 14
assert previous_deltas.partner2_deltas.gross_salary == 7
rules = [
model.get_couple_rule_from_single_rule(set_gross_salary_rule_partner_1, 1),
model.get_couple_rule_from_single_rule(set_gross_salary_rule_partner_2, 2),
]
deltas = model.get_updated_couple_deltas_from_rules(model.couple_funds_state.from_savings(0,0,0,1980), previous_deltas, rules)
assert deltas.year == 1981
assert deltas.partner1_deltas.gross_salary == 34
assert deltas.partner2_deltas.gross_salary == 25 | 0.516108 | 0.701659 |
from pybricks import ev3brick as brick
from pybricks.ev3devices import (Motor, TouchSensor, ColorSensor,
InfraredSensor, UltrasonicSensor, GyroSensor)
from pybricks.parameters import (Port, Stop, Direction, Button, Color,
SoundFile, ImageFile, Align)
from pybricks.tools import print, wait, StopWatch
from pybricks.robotics import DriveBase
# Custom robot class containing our functions that leverage the Gyro sensor
class Robot:
def __init__(self, left_motor_port, right_motor_port, med_motor_port, gyro_port, wheel_diameter, wheel_base):
self.left_motor = Motor(left_motor_port)
self.right_motor = Motor(right_motor_port)
# self.med_motor = Motor(med_motor_port)
self.robot = DriveBase(self.left_motor, self.right_motor, wheel_diameter, wheel_base)
self.gyro = GyroSensor(gyro_port)
# Function definitions
# Gyro sensor reset, waits until the gyro has come to rest and then resets the value to zero
# use at beginning of mission programs
def resetGyro(self):
brick.light(Color.RED)
speed = self.gyro.speed()
angle = self.gyro.angle()
while speed != 0:
wait(100)
speed = self.gyro.speed()
angle = self.gyro.angle()
self.gyro.reset_angle(0)
brick.light(Color.GREEN)
# Drive the robot straight using the GyroSensor
#
def driveStraight(self, rotations, speed):
distance = rotations * 360 # convert wheel rotations to degrees
self.gyro.reset_angle(0)
self.left_motor.reset_angle(0)
self.right_motor.reset_angle(0)
# set our amount to correct back towards straight
correction = -2
if speed < 0:
correction = 2
# start the robot driving
self.robot.drive(speed, 0)
# loop until the robot has travelled the distance we want
# updating the steering angle of the drive based on the gyro measured drift and correction
while abs(self.left_motor.angle()) <= distance and abs(self.right_motor.angle()) <= distance:
drift = self.gyro.angle()
print("Drift: " + str(drift))
steering = drift * correction
#print("Steering: " + str(steering))
self.robot.drive(speed, steering)
self.robot.stop(Stop.BRAKE)
# Turn the robot an exact amount using the GryoSensor
def turnDegrees(self, degrees):
self.gyro.reset_angle(0)
initial_power = 300
end_power = 50
left_motor_power = initial_power
right_motor_power = initial_power * -1
if degrees < 0:
left_motor_power = initial_power * -1
right_motor_power = initial_power
initial_turn = abs(degrees * .75)
self.left_motor.run(left_motor_power)
self.right_motor.run(right_motor_power)
angle = self.gyro.angle()
print("Angle: " + str(angle))
while abs(angle) < initial_turn:
wait(10)
angle = self.gyro.angle()
print("Angle: " + str(angle))
left_motor_power = end_power
right_motor_power = end_power * -1
if degrees < 0:
left_motor_power = end_power * -1
right_motor_power = end_power
self.left_motor.run(left_motor_power)
self.right_motor.run(right_motor_power)
end_degrees = (abs(degrees) -1)
angle = self.gyro.angle()
print("Angle: " + str(angle))
while abs(angle) < end_degrees:
wait(10)
angle = self.gyro.angle()
print("Angle: " + str(angle))
self.left_motor.stop(Stop.BRAKE)
self.right_motor.stop(Stop.BRAKE)
print("Final Angle: " + str(self.gyro.angle()))
def oldDrive(self, speed, turn):
while True:
self.robot.drive(speed, turn) | blocks/robot.py | from pybricks import ev3brick as brick
from pybricks.ev3devices import (Motor, TouchSensor, ColorSensor,
InfraredSensor, UltrasonicSensor, GyroSensor)
from pybricks.parameters import (Port, Stop, Direction, Button, Color,
SoundFile, ImageFile, Align)
from pybricks.tools import print, wait, StopWatch
from pybricks.robotics import DriveBase
# Custom robot class containing our functions that leverage the Gyro sensor
class Robot:
def __init__(self, left_motor_port, right_motor_port, med_motor_port, gyro_port, wheel_diameter, wheel_base):
self.left_motor = Motor(left_motor_port)
self.right_motor = Motor(right_motor_port)
# self.med_motor = Motor(med_motor_port)
self.robot = DriveBase(self.left_motor, self.right_motor, wheel_diameter, wheel_base)
self.gyro = GyroSensor(gyro_port)
# Function definitions
# Gyro sensor reset, waits until the gyro has come to rest and then resets the value to zero
# use at beginning of mission programs
def resetGyro(self):
brick.light(Color.RED)
speed = self.gyro.speed()
angle = self.gyro.angle()
while speed != 0:
wait(100)
speed = self.gyro.speed()
angle = self.gyro.angle()
self.gyro.reset_angle(0)
brick.light(Color.GREEN)
# Drive the robot straight using the GyroSensor
#
def driveStraight(self, rotations, speed):
distance = rotations * 360 # convert wheel rotations to degrees
self.gyro.reset_angle(0)
self.left_motor.reset_angle(0)
self.right_motor.reset_angle(0)
# set our amount to correct back towards straight
correction = -2
if speed < 0:
correction = 2
# start the robot driving
self.robot.drive(speed, 0)
# loop until the robot has travelled the distance we want
# updating the steering angle of the drive based on the gyro measured drift and correction
while abs(self.left_motor.angle()) <= distance and abs(self.right_motor.angle()) <= distance:
drift = self.gyro.angle()
print("Drift: " + str(drift))
steering = drift * correction
#print("Steering: " + str(steering))
self.robot.drive(speed, steering)
self.robot.stop(Stop.BRAKE)
# Turn the robot an exact amount using the GryoSensor
def turnDegrees(self, degrees):
self.gyro.reset_angle(0)
initial_power = 300
end_power = 50
left_motor_power = initial_power
right_motor_power = initial_power * -1
if degrees < 0:
left_motor_power = initial_power * -1
right_motor_power = initial_power
initial_turn = abs(degrees * .75)
self.left_motor.run(left_motor_power)
self.right_motor.run(right_motor_power)
angle = self.gyro.angle()
print("Angle: " + str(angle))
while abs(angle) < initial_turn:
wait(10)
angle = self.gyro.angle()
print("Angle: " + str(angle))
left_motor_power = end_power
right_motor_power = end_power * -1
if degrees < 0:
left_motor_power = end_power * -1
right_motor_power = end_power
self.left_motor.run(left_motor_power)
self.right_motor.run(right_motor_power)
end_degrees = (abs(degrees) -1)
angle = self.gyro.angle()
print("Angle: " + str(angle))
while abs(angle) < end_degrees:
wait(10)
angle = self.gyro.angle()
print("Angle: " + str(angle))
self.left_motor.stop(Stop.BRAKE)
self.right_motor.stop(Stop.BRAKE)
print("Final Angle: " + str(self.gyro.angle()))
def oldDrive(self, speed, turn):
while True:
self.robot.drive(speed, turn) | 0.684053 | 0.497131 |
import pytest
import random
import subprocess
import getpass
import shutil
import six
import dask.dataframe as dd
from ..helpers import (
ResettingCounter, skip_unless_gcs, GCS_TEST_BUCKET, df_from_csv_str,
equal_frame_and_index_content)
from bionic.exception import CodeVersioningError
import bionic as bn
# This is detected by pytest and applied to all the tests in this module.
pytestmark = skip_unless_gcs
def gsutil_wipe_path(url):
assert 'BNTESTDATA' in url
subprocess.check_call(['gsutil', '-q', '-m', 'rm', '-rf', url])
def gsutil_path_exists(url):
return subprocess.call(['gsutil', 'ls', url]) == 0
def local_wipe_path(path_str):
assert 'BNTESTDATA' in path_str
shutil.rmtree(path_str)
@pytest.fixture(scope='module')
def bucket_name():
return GCS_TEST_BUCKET
@pytest.fixture(scope='function')
def tmp_object_path(bucket_name):
random_hex_str = '%016x' % random.randint(0, 2 ** 64)
path_str = '%s/BNTESTDATA/%s' % (getpass.getuser(), random_hex_str)
gs_url = 'gs://%s/%s' % (bucket_name, path_str)
# This emits a stderr warning because the URL doesn't exist. That's
# annoying but I wasn't able to find a straightforward way to avoid it.
assert not gsutil_path_exists(gs_url)
yield path_str
gsutil_wipe_path(gs_url)
@pytest.fixture(scope='function')
def gcs_builder(builder, bucket_name, tmp_object_path):
builder = builder.build().to_builder()
builder.set('core__persistent_cache__gcs__bucket_name', bucket_name)
builder.set('core__persistent_cache__gcs__object_path', tmp_object_path)
builder.set('core__persistent_cache__gcs__enabled', True)
builder.set('core__versioning_mode', 'assist')
return builder
# This should really be multiple separate tests, but it's expensive to do the
# setup, teardown, and client initialization, so we'll just do it all in one
# place.
def test_gcs_caching(gcs_builder):
# Setup.
call_counter = ResettingCounter()
builder = gcs_builder
builder.assign('x', 2)
builder.assign('y', 3)
@builder
def xy(x, y):
call_counter.mark()
return x * y
# Test reading from and writing to GCS cache.
flow = builder.build()
local_cache_path_str = flow.get('core__persistent_cache__flow_dir')
gcs_cache_url = flow.get('core__persistent_cache__gcs__url')
assert flow.get('xy') == 6
assert flow.setting('x', 4).get('xy') == 12
assert call_counter.times_called() == 2
flow = builder.build()
assert flow.get('xy') == 6
assert flow.setting('x', 4).get('xy') == 12
assert call_counter.times_called() == 0
gsutil_wipe_path(gcs_cache_url)
flow = builder.build()
assert flow.get('xy') == 6
assert flow.setting('x', 4).get('xy') == 12
assert call_counter.times_called() == 0
local_wipe_path(local_cache_path_str)
flow = builder.build()
assert flow.get('xy') == 6
assert flow.setting('x', 4).get('xy') == 12
assert call_counter.times_called() == 0
gsutil_wipe_path(gcs_cache_url)
local_wipe_path(local_cache_path_str)
flow = builder.build()
assert flow.get('xy') == 6
assert flow.setting('x', 4).get('xy') == 12
assert call_counter.times_called() == 2
# Test versioning.
@builder # noqa: F811
def xy(x, y):
call_counter.mark()
return y * x
flow = builder.build()
with pytest.raises(CodeVersioningError):
flow.get('xy')
local_wipe_path(local_cache_path_str)
flow = builder.build()
with pytest.raises(CodeVersioningError):
flow.get('xy')
@builder # noqa: F811
@bn.version(minor=1)
def xy(x, y):
call_counter.mark()
return y * x
flow = builder.build()
assert flow.get('xy') == 6
assert flow.setting('x', 4).get('xy') == 12
assert call_counter.times_called() == 0
local_wipe_path(local_cache_path_str)
flow = builder.build()
assert flow.get('xy') == 6
assert flow.setting('x', 4).get('xy') == 12
assert call_counter.times_called() == 0
@builder # noqa: F811
@bn.version(major=1)
def xy(x, y):
call_counter.mark()
return x ** y
flow = builder.build()
assert flow.get('xy') == 8
assert flow.setting('x', 4).get('xy') == 64
assert call_counter.times_called() == 2
local_wipe_path(local_cache_path_str)
flow = builder.build()
assert flow.get('xy') == 8
assert flow.setting('x', 4).get('xy') == 64
assert call_counter.times_called() == 0
# Test indirect versioning.
@builder
def xy_plus(xy):
return xy + 1
flow = builder.build()
assert flow.get('xy_plus') == 9
assert call_counter.times_called() == 0
@builder # noqa: F811
@bn.version(major=1)
def xy(x, y):
call_counter.mark()
return int(float(x)) ** y
flow = builder.build()
with pytest.raises(CodeVersioningError):
flow.get('xy_plus')
@builder # noqa: F811
@bn.version(major=1, minor=1)
def xy(x, y):
call_counter.mark()
return int(float(y)) ** x
flow = builder.build()
assert flow.get('xy_plus') == 9
assert call_counter.times_called() == 0
@builder # noqa: F811
@bn.version(major=2)
def xy(x, y):
call_counter.mark()
return y ** x
flow = builder.build()
assert flow.get('xy_plus') == 10
assert call_counter.times_called() == 1
# Dask only works in Python 3.
if six.PY3:
# Test multi-file serialization.
dask_df = dd.from_pandas(
df_from_csv_str(
'''
color,number
red,1
blue,2
green,3
'''),
npartitions=1)
@builder
@bn.protocol.dask
def df():
call_counter.mark()
return dask_df
flow = builder.build()
assert equal_frame_and_index_content(
flow.get('df').compute(), dask_df.compute())
assert equal_frame_and_index_content(
flow.get('df').compute(), dask_df.compute())
assert call_counter.times_called() == 1
local_wipe_path(local_cache_path_str)
flow = builder.build()
assert equal_frame_and_index_content(
flow.get('df').compute(), dask_df.compute())
assert call_counter.times_called() == 0 | tests/test_flow/test_persistence_gcs.py | import pytest
import random
import subprocess
import getpass
import shutil
import six
import dask.dataframe as dd
from ..helpers import (
ResettingCounter, skip_unless_gcs, GCS_TEST_BUCKET, df_from_csv_str,
equal_frame_and_index_content)
from bionic.exception import CodeVersioningError
import bionic as bn
# This is detected by pytest and applied to all the tests in this module.
pytestmark = skip_unless_gcs
def gsutil_wipe_path(url):
assert 'BNTESTDATA' in url
subprocess.check_call(['gsutil', '-q', '-m', 'rm', '-rf', url])
def gsutil_path_exists(url):
return subprocess.call(['gsutil', 'ls', url]) == 0
def local_wipe_path(path_str):
assert 'BNTESTDATA' in path_str
shutil.rmtree(path_str)
@pytest.fixture(scope='module')
def bucket_name():
return GCS_TEST_BUCKET
@pytest.fixture(scope='function')
def tmp_object_path(bucket_name):
random_hex_str = '%016x' % random.randint(0, 2 ** 64)
path_str = '%s/BNTESTDATA/%s' % (getpass.getuser(), random_hex_str)
gs_url = 'gs://%s/%s' % (bucket_name, path_str)
# This emits a stderr warning because the URL doesn't exist. That's
# annoying but I wasn't able to find a straightforward way to avoid it.
assert not gsutil_path_exists(gs_url)
yield path_str
gsutil_wipe_path(gs_url)
@pytest.fixture(scope='function')
def gcs_builder(builder, bucket_name, tmp_object_path):
builder = builder.build().to_builder()
builder.set('core__persistent_cache__gcs__bucket_name', bucket_name)
builder.set('core__persistent_cache__gcs__object_path', tmp_object_path)
builder.set('core__persistent_cache__gcs__enabled', True)
builder.set('core__versioning_mode', 'assist')
return builder
# This should really be multiple separate tests, but it's expensive to do the
# setup, teardown, and client initialization, so we'll just do it all in one
# place.
def test_gcs_caching(gcs_builder):
# Setup.
call_counter = ResettingCounter()
builder = gcs_builder
builder.assign('x', 2)
builder.assign('y', 3)
@builder
def xy(x, y):
call_counter.mark()
return x * y
# Test reading from and writing to GCS cache.
flow = builder.build()
local_cache_path_str = flow.get('core__persistent_cache__flow_dir')
gcs_cache_url = flow.get('core__persistent_cache__gcs__url')
assert flow.get('xy') == 6
assert flow.setting('x', 4).get('xy') == 12
assert call_counter.times_called() == 2
flow = builder.build()
assert flow.get('xy') == 6
assert flow.setting('x', 4).get('xy') == 12
assert call_counter.times_called() == 0
gsutil_wipe_path(gcs_cache_url)
flow = builder.build()
assert flow.get('xy') == 6
assert flow.setting('x', 4).get('xy') == 12
assert call_counter.times_called() == 0
local_wipe_path(local_cache_path_str)
flow = builder.build()
assert flow.get('xy') == 6
assert flow.setting('x', 4).get('xy') == 12
assert call_counter.times_called() == 0
gsutil_wipe_path(gcs_cache_url)
local_wipe_path(local_cache_path_str)
flow = builder.build()
assert flow.get('xy') == 6
assert flow.setting('x', 4).get('xy') == 12
assert call_counter.times_called() == 2
# Test versioning.
@builder # noqa: F811
def xy(x, y):
call_counter.mark()
return y * x
flow = builder.build()
with pytest.raises(CodeVersioningError):
flow.get('xy')
local_wipe_path(local_cache_path_str)
flow = builder.build()
with pytest.raises(CodeVersioningError):
flow.get('xy')
@builder # noqa: F811
@bn.version(minor=1)
def xy(x, y):
call_counter.mark()
return y * x
flow = builder.build()
assert flow.get('xy') == 6
assert flow.setting('x', 4).get('xy') == 12
assert call_counter.times_called() == 0
local_wipe_path(local_cache_path_str)
flow = builder.build()
assert flow.get('xy') == 6
assert flow.setting('x', 4).get('xy') == 12
assert call_counter.times_called() == 0
@builder # noqa: F811
@bn.version(major=1)
def xy(x, y):
call_counter.mark()
return x ** y
flow = builder.build()
assert flow.get('xy') == 8
assert flow.setting('x', 4).get('xy') == 64
assert call_counter.times_called() == 2
local_wipe_path(local_cache_path_str)
flow = builder.build()
assert flow.get('xy') == 8
assert flow.setting('x', 4).get('xy') == 64
assert call_counter.times_called() == 0
# Test indirect versioning.
@builder
def xy_plus(xy):
return xy + 1
flow = builder.build()
assert flow.get('xy_plus') == 9
assert call_counter.times_called() == 0
@builder # noqa: F811
@bn.version(major=1)
def xy(x, y):
call_counter.mark()
return int(float(x)) ** y
flow = builder.build()
with pytest.raises(CodeVersioningError):
flow.get('xy_plus')
@builder # noqa: F811
@bn.version(major=1, minor=1)
def xy(x, y):
call_counter.mark()
return int(float(y)) ** x
flow = builder.build()
assert flow.get('xy_plus') == 9
assert call_counter.times_called() == 0
@builder # noqa: F811
@bn.version(major=2)
def xy(x, y):
call_counter.mark()
return y ** x
flow = builder.build()
assert flow.get('xy_plus') == 10
assert call_counter.times_called() == 1
# Dask only works in Python 3.
if six.PY3:
# Test multi-file serialization.
dask_df = dd.from_pandas(
df_from_csv_str(
'''
color,number
red,1
blue,2
green,3
'''),
npartitions=1)
@builder
@bn.protocol.dask
def df():
call_counter.mark()
return dask_df
flow = builder.build()
assert equal_frame_and_index_content(
flow.get('df').compute(), dask_df.compute())
assert equal_frame_and_index_content(
flow.get('df').compute(), dask_df.compute())
assert call_counter.times_called() == 1
local_wipe_path(local_cache_path_str)
flow = builder.build()
assert equal_frame_and_index_content(
flow.get('df').compute(), dask_df.compute())
assert call_counter.times_called() == 0 | 0.602529 | 0.464719 |
import ctypes
import os
import shutil
import site
import sys
import urllib.request
class InstallDnD:
def __init__(self):
self.install_success = False
self.message = ""
try:
operating_system = sys.platform
if "linux" in operating_system:
operating_system = "linux-x64.tgz"
if "darwin" in operating_system:
operating_system = "osx-x64.tgz"
if "win" in operating_system and "darwin" not in operating_system:
if ctypes.sizeof(ctypes.c_voidp) == 4:
operating_system = "windows-x86.zip"
else:
operating_system = "windows-x64.zip"
package_paths = site.getsitepackages()
tcl_path = os.path.join(package_paths[0], "tcl")
site_path = package_paths[1]
cur_dir = os.path.dirname(os.path.realpath(__file__))
os.path.join(cur_dir, "..", "resources")
tcl_link = (
"https://github.com/petasis/tkdnd/releases/download/tkdnd-release-test-v2.9.2/tkdnd-2.9.2-"
+ f"{operating_system}"
)
urllib.request.urlretrieve(
tcl_link,
os.path.join(
os.path.join(cur_dir, "..", "resources", "temp"),
os.path.basename(tcl_link),
),
)
shutil.unpack_archive(
os.path.join(
os.path.join(cur_dir, "..", "resources", "temp"),
os.path.basename(tcl_link),
),
os.path.realpath(os.path.join(cur_dir, "..", "resources", "temp")),
)
os.remove(
os.path.join(
os.path.join(cur_dir, "..", "resources", "temp"),
os.path.basename(tcl_link),
)
)
shutil.move(
os.path.join(
os.path.realpath(os.path.join(cur_dir, "..", "resources", "temp")),
os.listdir(
os.path.realpath(
os.path.join(cur_dir, "..", "resources", "temp")
)
)[0],
),
tcl_path,
)
shutil.copytree(
os.path.join(cur_dir, "..", "resources", "TkinterDnD2"),
os.path.join(os.path.realpath(site_path), "TkinterDnD2"),
)
self.install_success = True
except Exception as e:
self.message = e | topasgraphsim/src/classes/install_dnd.py | import ctypes
import os
import shutil
import site
import sys
import urllib.request
class InstallDnD:
def __init__(self):
self.install_success = False
self.message = ""
try:
operating_system = sys.platform
if "linux" in operating_system:
operating_system = "linux-x64.tgz"
if "darwin" in operating_system:
operating_system = "osx-x64.tgz"
if "win" in operating_system and "darwin" not in operating_system:
if ctypes.sizeof(ctypes.c_voidp) == 4:
operating_system = "windows-x86.zip"
else:
operating_system = "windows-x64.zip"
package_paths = site.getsitepackages()
tcl_path = os.path.join(package_paths[0], "tcl")
site_path = package_paths[1]
cur_dir = os.path.dirname(os.path.realpath(__file__))
os.path.join(cur_dir, "..", "resources")
tcl_link = (
"https://github.com/petasis/tkdnd/releases/download/tkdnd-release-test-v2.9.2/tkdnd-2.9.2-"
+ f"{operating_system}"
)
urllib.request.urlretrieve(
tcl_link,
os.path.join(
os.path.join(cur_dir, "..", "resources", "temp"),
os.path.basename(tcl_link),
),
)
shutil.unpack_archive(
os.path.join(
os.path.join(cur_dir, "..", "resources", "temp"),
os.path.basename(tcl_link),
),
os.path.realpath(os.path.join(cur_dir, "..", "resources", "temp")),
)
os.remove(
os.path.join(
os.path.join(cur_dir, "..", "resources", "temp"),
os.path.basename(tcl_link),
)
)
shutil.move(
os.path.join(
os.path.realpath(os.path.join(cur_dir, "..", "resources", "temp")),
os.listdir(
os.path.realpath(
os.path.join(cur_dir, "..", "resources", "temp")
)
)[0],
),
tcl_path,
)
shutil.copytree(
os.path.join(cur_dir, "..", "resources", "TkinterDnD2"),
os.path.join(os.path.realpath(site_path), "TkinterDnD2"),
)
self.install_success = True
except Exception as e:
self.message = e | 0.136666 | 0.1015 |
from django.db import models
class Group(models.Model):
STATUS_NORMAL = 1
STATUS_DISBAND = 0
STATUS_ITEMS = (
(STATUS_NORMAL, '正常'),
(STATUS_DISBAND, '解散')
)
name = models.CharField(max_length=50, verbose_name='组合名')
name_jp = models.CharField(max_length=50, verbose_name='日文')
name_en = models.CharField(max_length=50, verbose_name='罗马')
status = models.PositiveIntegerField(
default=STATUS_NORMAL, choices=STATUS_ITEMS, verbose_name='状态')
created_time = models.DateField(null=True, verbose_name='成立时间')
homepage = models.URLField(null=True, verbose_name='主页')
color = models.CharField(max_length=20, verbose_name='颜色')
favicon = models.URLField(verbose_name='照片')
class Meta:
verbose_name = verbose_name_plural = '组合'
def __str__(self):
return self.name_jp
@classmethod
def get_all(cls, status=None):
if not status:
return cls.objects.all()
else:
return cls.objects.filter(status=Group.STATUS_NORMAL)
def get_all_members(self):
return self.member_set.all()
@classmethod
def get_by_name(cls, name):
return cls.objects.filter(name__contains=name)
class Member(models.Model):
STATUS_NORMAL = 1
STATUS_GRADUATED = 0
STATUS_ITEMS = (
(STATUS_NORMAL, '在籍'),
(STATUS_GRADUATED, '毕业')
)
name = models.CharField(max_length=50, verbose_name='成员')
name_jp = models.CharField(max_length=50, verbose_name='日文')
name_en = models.CharField(max_length=50, verbose_name='罗马')
status = models.PositiveIntegerField(
default=STATUS_NORMAL, choices=STATUS_ITEMS, verbose_name='状态')
joined_time = models.DateField(verbose_name='进入时间', null=True, blank=True)
graduated_time = models.DateField(null=True, verbose_name='毕业时间')
group = models.ForeignKey(Group, verbose_name='组合',
on_delete=models.DO_NOTHING)
favicon = models.URLField(null=True, verbose_name='照片')
color = models.CharField(null=True, max_length=20, verbose_name='成员色')
birthday = models.DateField(null=True, verbose_name='生日', blank=True)
hometown = models.CharField(max_length=50, verbose_name='出生地')
nickname = models.CharField(max_length=50, verbose_name='昵称')
class Meta:
verbose_name = verbose_name_plural = '成员'
ordering = ['-id']
def __str__(self):
return self.name
@classmethod
def get_all(cls, status=None):
if not status:
return cls.objects.all()
else:
return cls.objects.filter(status=Group.STATUS_NORMAL)
@classmethod
def get_by_group(cls, group_id, status=None):
try:
group = Group.objects.get(id=group_id)
except Group.DoesNotExist:
members = []
else:
if not status:
members = group.member_set.all()
else:
members = group.member_set.filter(status=Member.STATUS_NORMAL)
return members.select_related('group')
class CarouselPicture(models.Model):
STATUS_NORMAL = 1
STATUS_DELETE = 0
STATUS_ITEMS = (
(STATUS_NORMAL, '正常'),
(STATUS_DELETE, '删除'),
)
name = models.CharField(max_length=120, verbose_name='名称')
image = models.URLField(verbose_name='图片')
status = models.PositiveIntegerField(default=STATUS_NORMAL, choices=STATUS_ITEMS,
verbose_name='状态')
created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
@property
def current_images(self):
return self.objects.filter(status=self.STATUS_NORMAL) | pictures/models.py | from django.db import models
class Group(models.Model):
STATUS_NORMAL = 1
STATUS_DISBAND = 0
STATUS_ITEMS = (
(STATUS_NORMAL, '正常'),
(STATUS_DISBAND, '解散')
)
name = models.CharField(max_length=50, verbose_name='组合名')
name_jp = models.CharField(max_length=50, verbose_name='日文')
name_en = models.CharField(max_length=50, verbose_name='罗马')
status = models.PositiveIntegerField(
default=STATUS_NORMAL, choices=STATUS_ITEMS, verbose_name='状态')
created_time = models.DateField(null=True, verbose_name='成立时间')
homepage = models.URLField(null=True, verbose_name='主页')
color = models.CharField(max_length=20, verbose_name='颜色')
favicon = models.URLField(verbose_name='照片')
class Meta:
verbose_name = verbose_name_plural = '组合'
def __str__(self):
return self.name_jp
@classmethod
def get_all(cls, status=None):
if not status:
return cls.objects.all()
else:
return cls.objects.filter(status=Group.STATUS_NORMAL)
def get_all_members(self):
return self.member_set.all()
@classmethod
def get_by_name(cls, name):
return cls.objects.filter(name__contains=name)
class Member(models.Model):
STATUS_NORMAL = 1
STATUS_GRADUATED = 0
STATUS_ITEMS = (
(STATUS_NORMAL, '在籍'),
(STATUS_GRADUATED, '毕业')
)
name = models.CharField(max_length=50, verbose_name='成员')
name_jp = models.CharField(max_length=50, verbose_name='日文')
name_en = models.CharField(max_length=50, verbose_name='罗马')
status = models.PositiveIntegerField(
default=STATUS_NORMAL, choices=STATUS_ITEMS, verbose_name='状态')
joined_time = models.DateField(verbose_name='进入时间', null=True, blank=True)
graduated_time = models.DateField(null=True, verbose_name='毕业时间')
group = models.ForeignKey(Group, verbose_name='组合',
on_delete=models.DO_NOTHING)
favicon = models.URLField(null=True, verbose_name='照片')
color = models.CharField(null=True, max_length=20, verbose_name='成员色')
birthday = models.DateField(null=True, verbose_name='生日', blank=True)
hometown = models.CharField(max_length=50, verbose_name='出生地')
nickname = models.CharField(max_length=50, verbose_name='昵称')
class Meta:
verbose_name = verbose_name_plural = '成员'
ordering = ['-id']
def __str__(self):
return self.name
@classmethod
def get_all(cls, status=None):
if not status:
return cls.objects.all()
else:
return cls.objects.filter(status=Group.STATUS_NORMAL)
@classmethod
def get_by_group(cls, group_id, status=None):
try:
group = Group.objects.get(id=group_id)
except Group.DoesNotExist:
members = []
else:
if not status:
members = group.member_set.all()
else:
members = group.member_set.filter(status=Member.STATUS_NORMAL)
return members.select_related('group')
class CarouselPicture(models.Model):
STATUS_NORMAL = 1
STATUS_DELETE = 0
STATUS_ITEMS = (
(STATUS_NORMAL, '正常'),
(STATUS_DELETE, '删除'),
)
name = models.CharField(max_length=120, verbose_name='名称')
image = models.URLField(verbose_name='图片')
status = models.PositiveIntegerField(default=STATUS_NORMAL, choices=STATUS_ITEMS,
verbose_name='状态')
created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
@property
def current_images(self):
return self.objects.filter(status=self.STATUS_NORMAL) | 0.50415 | 0.081447 |
from binaryninja import *
class ROPChain(BinaryDataNotification):
def __init__(self, bv: BinaryView, segment: Segment, length: int, arch: Architecture):
BinaryDataNotification.__init__(self)
self.bv = bv
self.segment = segment
self.chain = [0x0] * length
self.arch = arch
self.listeners = []
self.bv.register_notification(self)
def __getitem__(self, item):
return self.chain[item]
def __setitem__(self, key, value):
self.chain[key] = value
for listener in self.listeners:
listener(key, value)
def data_written(self, view: BinaryView, offset, length):
view.add_analysis_completion_event(lambda: self.read_segment())
def add_listener(self, listener):
self.listeners.append(listener)
def address_at_index(self, index):
if address_size(self) == 4:
return self.segment.start + 5 * index
else:
return self.segment.start + 11 * index
def update_segment(self):
self.bv.write(self.segment.start, self.get_assembly())
def read_segment(self):
func: Function = self.bv.get_function_at(self.segment.start)
if func is None:
return
gadgets = []
il = func.low_level_il
for inst in il.instructions:
if inst.operation == LowLevelILOperation.LLIL_JUMP:
break
if inst.operation == LowLevelILOperation.LLIL_PUSH:
value = inst.operands[0].value
if value.type == RegisterValueType.ConstantValue:
gadgets.append(value.value)
self.chain = gadgets
for listener in self.listeners:
listener(None, None)
def get_assembly(self):
asm = b""
if address_size(self) == 4:
for gadget in self.chain:
asm += self.arch.assemble(f"push 0x{gadget:0x}", 0).ljust(5, b'\x90')
else:
for gadget in self.chain:
asm += self.arch.assemble(f"mov rax, 0x{gadget:0x}", 0).ljust(10, b'\x90')
asm += self.arch.assemble(f"push rax", 0)
asm += self.arch.assemble(f"ret", 0) * len(self.chain)
return asm
def format_addr(bv: BinaryView, addr):
disasm = disasm_at_addr(bv, addr)
if len(disasm) > 0:
return disasm
return f"{addr:x}".rjust(address_size(bv) * 2, '0')
def disasm_at_addr(bv: BinaryView, addr):
if bv.start >= addr or addr > bv.end:
return ""
if bv.get_segment_at(addr) is None or not bv.get_segment_at(addr).executable:
return ""
stop_on = ['retn', 'int', 'syscall']
ops = []
done = False
while not done and len(ops) < 3:
data = bv.read(addr, bv.arch.max_instr_length)
text = bv.arch.get_instruction_text(data, addr)
info = bv.arch.get_instruction_info(data, addr)
if text is None:
return ""
tokens, length = text
if tokens is None:
return ""
if len(info.branches) > 0 and not tokens[0].text == "retn":
done = True
for search in stop_on:
if tokens[0].text == search:
done = True
line = ''.join(token.text for token in tokens)
ops.append(line)
addr += length
return re.sub(r'\s+', ' ', " ; ".join(ops))
def address_size(bv: BinaryView):
return bv.arch.address_size if bv.arch is not None else 8 | model.py | from binaryninja import *
class ROPChain(BinaryDataNotification):
def __init__(self, bv: BinaryView, segment: Segment, length: int, arch: Architecture):
BinaryDataNotification.__init__(self)
self.bv = bv
self.segment = segment
self.chain = [0x0] * length
self.arch = arch
self.listeners = []
self.bv.register_notification(self)
def __getitem__(self, item):
return self.chain[item]
def __setitem__(self, key, value):
self.chain[key] = value
for listener in self.listeners:
listener(key, value)
def data_written(self, view: BinaryView, offset, length):
view.add_analysis_completion_event(lambda: self.read_segment())
def add_listener(self, listener):
self.listeners.append(listener)
def address_at_index(self, index):
if address_size(self) == 4:
return self.segment.start + 5 * index
else:
return self.segment.start + 11 * index
def update_segment(self):
self.bv.write(self.segment.start, self.get_assembly())
def read_segment(self):
func: Function = self.bv.get_function_at(self.segment.start)
if func is None:
return
gadgets = []
il = func.low_level_il
for inst in il.instructions:
if inst.operation == LowLevelILOperation.LLIL_JUMP:
break
if inst.operation == LowLevelILOperation.LLIL_PUSH:
value = inst.operands[0].value
if value.type == RegisterValueType.ConstantValue:
gadgets.append(value.value)
self.chain = gadgets
for listener in self.listeners:
listener(None, None)
def get_assembly(self):
asm = b""
if address_size(self) == 4:
for gadget in self.chain:
asm += self.arch.assemble(f"push 0x{gadget:0x}", 0).ljust(5, b'\x90')
else:
for gadget in self.chain:
asm += self.arch.assemble(f"mov rax, 0x{gadget:0x}", 0).ljust(10, b'\x90')
asm += self.arch.assemble(f"push rax", 0)
asm += self.arch.assemble(f"ret", 0) * len(self.chain)
return asm
def format_addr(bv: BinaryView, addr):
disasm = disasm_at_addr(bv, addr)
if len(disasm) > 0:
return disasm
return f"{addr:x}".rjust(address_size(bv) * 2, '0')
def disasm_at_addr(bv: BinaryView, addr):
if bv.start >= addr or addr > bv.end:
return ""
if bv.get_segment_at(addr) is None or not bv.get_segment_at(addr).executable:
return ""
stop_on = ['retn', 'int', 'syscall']
ops = []
done = False
while not done and len(ops) < 3:
data = bv.read(addr, bv.arch.max_instr_length)
text = bv.arch.get_instruction_text(data, addr)
info = bv.arch.get_instruction_info(data, addr)
if text is None:
return ""
tokens, length = text
if tokens is None:
return ""
if len(info.branches) > 0 and not tokens[0].text == "retn":
done = True
for search in stop_on:
if tokens[0].text == search:
done = True
line = ''.join(token.text for token in tokens)
ops.append(line)
addr += length
return re.sub(r'\s+', ' ', " ; ".join(ops))
def address_size(bv: BinaryView):
return bv.arch.address_size if bv.arch is not None else 8 | 0.689306 | 0.207074 |
"""REPL arguments tokenizer."""
import sys
import re
class Tokenizer(object): # noqa
"""Main class for the Tokenizer.
Tokenize all the arguments passed into the REPL for parsing.
"""
def __init__(self, chars):
"""Initialize the Tokenizer class.
:chars: Passed in arguments to parse from the the repl.
"""
self.chars = chars
bounds = 'BOUNDS'
operator = 'OPERATOR'
variable = 'VARIABLE'
assignment = 'ASSIGNMENT'
ints = 'INTS'
# the expression used to parse and group the arguments
expressions = [
(r'[ \n\t ]+', None),
(r'#[^\n]*', None),
(r'inv\([a-zA-Z]\)', operator),
(r'[a-zA-Z]', variable),
(r'\=', assignment),
(r'\[([0-9, ;]+)\]', bounds),
(r'\d', ints),
(r'\+', operator),
(r'-', operator),
(r'\*', operator),
(r'\'', operator)
]
self.exprs = expressions
@staticmethod
def tokenizer(arguments, patterns_exprs):
"""Parse the arguments against the expressions.
:arguments: The arguments to be parsed.
:patterns_exprs: the pattern expression for matching.
"""
pos = 0
tokens = []
while pos < len(arguments):
match = None
for expr in patterns_exprs:
pattern, tag = expr
regex = re.compile(pattern)
match = regex.match(arguments, pos)
if match:
text = match.group(0)
if tag:
token = (tag, text)
tokens.append(token)
break
if not match:
sys.stdout.write(
'Illegal character: `%s`\n' % arguments[pos])
break
else:
pos = match.end(0)
return tokens
def lexer(self):
"""Re-format the tokens for arithmetic operations."""
tokens = self.tokenizer(self.chars, self.exprs)
return tokens | mini_matlab/tokenizer.py | """REPL arguments tokenizer."""
import sys
import re
class Tokenizer(object): # noqa
"""Main class for the Tokenizer.
Tokenize all the arguments passed into the REPL for parsing.
"""
def __init__(self, chars):
"""Initialize the Tokenizer class.
:chars: Passed in arguments to parse from the the repl.
"""
self.chars = chars
bounds = 'BOUNDS'
operator = 'OPERATOR'
variable = 'VARIABLE'
assignment = 'ASSIGNMENT'
ints = 'INTS'
# the expression used to parse and group the arguments
expressions = [
(r'[ \n\t ]+', None),
(r'#[^\n]*', None),
(r'inv\([a-zA-Z]\)', operator),
(r'[a-zA-Z]', variable),
(r'\=', assignment),
(r'\[([0-9, ;]+)\]', bounds),
(r'\d', ints),
(r'\+', operator),
(r'-', operator),
(r'\*', operator),
(r'\'', operator)
]
self.exprs = expressions
@staticmethod
def tokenizer(arguments, patterns_exprs):
"""Parse the arguments against the expressions.
:arguments: The arguments to be parsed.
:patterns_exprs: the pattern expression for matching.
"""
pos = 0
tokens = []
while pos < len(arguments):
match = None
for expr in patterns_exprs:
pattern, tag = expr
regex = re.compile(pattern)
match = regex.match(arguments, pos)
if match:
text = match.group(0)
if tag:
token = (tag, text)
tokens.append(token)
break
if not match:
sys.stdout.write(
'Illegal character: `%s`\n' % arguments[pos])
break
else:
pos = match.end(0)
return tokens
def lexer(self):
"""Re-format the tokens for arithmetic operations."""
tokens = self.tokenizer(self.chars, self.exprs)
return tokens | 0.556882 | 0.405979 |
import os
import asyncio
from random import randint, sample
import discord
from discord.ext import commands
class Social:
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
async def kiss(self, context, user: discord.Member):
""" kiss anyone """
msg = '{0} Was KISSED by {1}! :kiss:'.format(user.mention, context.message.author.mention)
folder = "kiss"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def taunt(self, context, user: discord.Member):
""" taunt anyone """
msg = '{0} Was TAUNTED by {1}! :kiss:'.format(user.mention, context.message.author.mention)
folder = "taunt"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def gank(self, context, user: discord.Member):
""" gank anyone """
msg = '{0} Was Ganked by {1}! :kiss:'.format(user.mention, context.message.author.mention)
folder = "gank"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def sit(self, context, user: discord.Member):
""" sit on anyone face"""
msg = '{1}! Sits on {0} face :smiling_imp: '.format(user.mention, context.message.author.mention)
folder = "sit"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def tip(self, context, user: discord.Member):
""" make it rain on anyone """
msg = '{1}! Makes it rain on {0} :money_mouth: :money_with_wings: '.format(user.mention, context.message.author.mention)
folder = "tips"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def shoot(self, context, user: discord.Member):
""" shoot anyone """
msg = '{0} Was shot dead by {1}! :skull: :gun: '.format(user.mention, context.message.author.mention)
folder = "shoot"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def snatch(self, context, user: discord.Member):
""" snatch anyone wig"""
msg = '{0} Wig has been snatched by {1}! r.i.p :scream: '.format(user.mention, context.message.author.mention)
folder = "snatched"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def cuddle(self, context, user: discord.Member):
""" cuddle with anyone """
msg = '{1}! Cuddles {0} so hard! '.format(user.mention, context.message.author.mention)
folder = "cuddle"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def spell(self, context, user: discord.Member):
""" casts a spell on anyone """
msg = '{1}! Casts a spell on {0} ! :dizzy: :comet: '.format(user.mention, context.message.author.mention)
folder = "spell"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def hugs(self, context, user: discord.Member):
""" hugs anyone """
msg = '{1}! Gives {0} a big hug! :hugging: '.format(user.mention, context.message.author.mention)
folder = "hug"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def truth(self, context, user: discord.Member):
""" truth questions """
msg = '{1}! Challenges {0} to tell the truth! '.format(user.mention, context.message.author.mention)
folder = "truth"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def dare(self, context, user: discord.Member):
""" dare questions """
msg = '{1}! Challenges {0} to a dare! '.format(user.mention, context.message.author.mention)
folder = "dare"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def feed(self, context, user: discord.Member):
""" feed anyone """
msg = '{1}! Feeds {0}! :yum: '.format(user.mention, context.message.author.mention)
folder = "feeds"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def drag(self, context, user: discord.Member):
""" drag race persona of a friend """
msg = '{1}! Reveals {0}! true inner drag persona! :princess: '.format(user.mention, context.message.author.mention)
folder = "drag"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def future(self, context, user: discord.Member):
""" check some ones future """
msg = '{1}! Takes a glance at what {0}! will become in the future! :scream: '.format(user.mention, context.message.author.mention)
folder = "future"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def shade(self, context, user: discord.Member):
""" throw some serious shade """
msg = 'It\'s cold in the shade. Isn\'t it {mentioned_user}?'.format(
mentioned_user = user.mention)
folder = "shade"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def adore(self, context, *gif):
""" summon adore (e.g. die, drag, ew, fuck, gasp, idgaf, overit, party, tongue) """
adores = ("die", "drag", "ew", "fuck", "gasp", "idgaf", "overit", "party", "tongue")
if gif:
gif = gif.lower()
if gif in adores:
return await self.bot.upload("data/gifs/adore/{0}.gif".format(gif))
await self.upload_random_gif(None, "adore")
@commands.command()
async def rr(self):
""" russian roulette... good luck! """
await self.bot.say('You spin the cylinder of the revolver with 1 bullet in it...')
await asyncio.sleep(1)
await self.bot.say('...you place the muzzle against your head and pull the trigger...')
await asyncio.sleep(2)
if randint(1, 6) == 1:
await self.bot.say('...your brain gets splattered all over the wall.')
else:
await self.bot.say('...you live to see another day.')
async def upload_random_gif(self, msg, folder):
if msg:
await self.bot.say(msg)
folderPath = "data/gifs/" + folder
fileList = os.listdir(folderPath)
gifPath = folderPath + "/" + fileList[randint(0, len(fileList) - 1)]
await self.bot.upload(gifPath)
def setup(bot):
bot.add_cog(Social(bot)) | social/social.py | import os
import asyncio
from random import randint, sample
import discord
from discord.ext import commands
class Social:
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
async def kiss(self, context, user: discord.Member):
""" kiss anyone """
msg = '{0} Was KISSED by {1}! :kiss:'.format(user.mention, context.message.author.mention)
folder = "kiss"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def taunt(self, context, user: discord.Member):
""" taunt anyone """
msg = '{0} Was TAUNTED by {1}! :kiss:'.format(user.mention, context.message.author.mention)
folder = "taunt"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def gank(self, context, user: discord.Member):
""" gank anyone """
msg = '{0} Was Ganked by {1}! :kiss:'.format(user.mention, context.message.author.mention)
folder = "gank"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def sit(self, context, user: discord.Member):
""" sit on anyone face"""
msg = '{1}! Sits on {0} face :smiling_imp: '.format(user.mention, context.message.author.mention)
folder = "sit"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def tip(self, context, user: discord.Member):
""" make it rain on anyone """
msg = '{1}! Makes it rain on {0} :money_mouth: :money_with_wings: '.format(user.mention, context.message.author.mention)
folder = "tips"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def shoot(self, context, user: discord.Member):
""" shoot anyone """
msg = '{0} Was shot dead by {1}! :skull: :gun: '.format(user.mention, context.message.author.mention)
folder = "shoot"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def snatch(self, context, user: discord.Member):
""" snatch anyone wig"""
msg = '{0} Wig has been snatched by {1}! r.i.p :scream: '.format(user.mention, context.message.author.mention)
folder = "snatched"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def cuddle(self, context, user: discord.Member):
""" cuddle with anyone """
msg = '{1}! Cuddles {0} so hard! '.format(user.mention, context.message.author.mention)
folder = "cuddle"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def spell(self, context, user: discord.Member):
""" casts a spell on anyone """
msg = '{1}! Casts a spell on {0} ! :dizzy: :comet: '.format(user.mention, context.message.author.mention)
folder = "spell"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def hugs(self, context, user: discord.Member):
""" hugs anyone """
msg = '{1}! Gives {0} a big hug! :hugging: '.format(user.mention, context.message.author.mention)
folder = "hug"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def truth(self, context, user: discord.Member):
""" truth questions """
msg = '{1}! Challenges {0} to tell the truth! '.format(user.mention, context.message.author.mention)
folder = "truth"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def dare(self, context, user: discord.Member):
""" dare questions """
msg = '{1}! Challenges {0} to a dare! '.format(user.mention, context.message.author.mention)
folder = "dare"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def feed(self, context, user: discord.Member):
""" feed anyone """
msg = '{1}! Feeds {0}! :yum: '.format(user.mention, context.message.author.mention)
folder = "feeds"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def drag(self, context, user: discord.Member):
""" drag race persona of a friend """
msg = '{1}! Reveals {0}! true inner drag persona! :princess: '.format(user.mention, context.message.author.mention)
folder = "drag"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def future(self, context, user: discord.Member):
""" check some ones future """
msg = '{1}! Takes a glance at what {0}! will become in the future! :scream: '.format(user.mention, context.message.author.mention)
folder = "future"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def shade(self, context, user: discord.Member):
""" throw some serious shade """
msg = 'It\'s cold in the shade. Isn\'t it {mentioned_user}?'.format(
mentioned_user = user.mention)
folder = "shade"
await self.upload_random_gif(msg, folder)
@commands.command(pass_context=True)
async def adore(self, context, *gif):
""" summon adore (e.g. die, drag, ew, fuck, gasp, idgaf, overit, party, tongue) """
adores = ("die", "drag", "ew", "fuck", "gasp", "idgaf", "overit", "party", "tongue")
if gif:
gif = gif.lower()
if gif in adores:
return await self.bot.upload("data/gifs/adore/{0}.gif".format(gif))
await self.upload_random_gif(None, "adore")
@commands.command()
async def rr(self):
""" russian roulette... good luck! """
await self.bot.say('You spin the cylinder of the revolver with 1 bullet in it...')
await asyncio.sleep(1)
await self.bot.say('...you place the muzzle against your head and pull the trigger...')
await asyncio.sleep(2)
if randint(1, 6) == 1:
await self.bot.say('...your brain gets splattered all over the wall.')
else:
await self.bot.say('...you live to see another day.')
async def upload_random_gif(self, msg, folder):
if msg:
await self.bot.say(msg)
folderPath = "data/gifs/" + folder
fileList = os.listdir(folderPath)
gifPath = folderPath + "/" + fileList[randint(0, len(fileList) - 1)]
await self.bot.upload(gifPath)
def setup(bot):
bot.add_cog(Social(bot)) | 0.316264 | 0.139719 |
__all__ = ['PolarToCartesianWarp', 'CameraRadarCoordinateTransform', 'compute_radar_intrinsic_matrix']
# Cell
import tensorflow as tf
from tensorflow.keras import models, layers
import tensorflow_addons as tfa
import numpy as np
from .radar import v1_constants
# Cell
class PolarToCartesianWarp(layers.Layer):
"""Differentiable Polar Image to Cartersian Mapping
This is a Tensorflow Keras Layer and
expects a batch of input with shape [n, r, az, c]
Running eagerly is supported as well.
For single example input, use expanddims or newaxis.
"""
def __init__(self, full=True, scale=1.):
super(PolarToCartesianWarp, self).__init__()
self.full = full
self.scale = scale
def build(self, input_shape):
range_bins, angle_bins = input_shape[1:3]
xx = np.arange(-range_bins, range_bins)/range_bins
yy = 1 - np.arange(0, range_bins)/range_bins
mg = np.meshgrid(xx, yy)
rr = range_bins - np.sqrt(mg[0]**2 + mg[1]**2) * range_bins
tt = angle_bins - np.arctan2(mg[1], mg[0])/np.pi*angle_bins
self.warp = tf.Variable(
np.stack([tt, rr], axis=-1)[np.newaxis, ...],
trainable=False,
dtype=tf.float32
)
self.warp = tf.repeat(self.warp, repeats=input_shape[0], axis=0)
def call(self, inputs):
return tfa.image.resampler(inputs, self.warp)
# Cell
class CameraRadarCoordinateTransform:
"""Stub Not implemented yet"""
def __init__(self, camera_f, camera_p, radar_f, radar_p):
self.camera_f = camera_f
self.camera_p = camera_p
self.radar_f = radar_f
self.radar_p = radar_p
def camera2world(self, uv, depth=None):
"""Projects camera pixel coordinates u, v, depth to world coordinates
uv can be n x 2 lists or n x 3
if uv is n x 2, depth image needs to be provided.
Depth will be retreived using nearest neighbor interpolation
"""
pass
def world2camera(self, xyz):
pass
def radar2world(self, xy, h=None):
pass
def world2radar(self, xyz):
pass
def camera2radar(self, uv, depth=None):
"""Convenience function combining `camera2world` and `world2radar`"""
xyz = self.camera2world(uv, depth)
radar_xy = self.world2camera(xyz)
return radar_xy
def radar2camera(self, radar_xy, radar_height=None):
"""Convenience function combining `radar2world` and `world2camera`"""
xyz = self.radar2world(radar_xy, radar_height)
uvz = self.world2camera(xyz)
return uvz
# Cell
def compute_radar_intrinsic_matrix(radarframe):
"""Radar frame needs to provide max_range and range_nbins"""
scale = 1/radarframe.max_range
nbins = radarframe.range_nbins
if radarframe.flipped:
f = np.array([
[nbins, 0, nbins],
[0, -nbins, nbins],
[0, 0, 1./scale]
])
else:
f = np.array([
[nbins, 0, nbins],
[0, -nbins, nbins],
[0, 0, 1/scale]
])
return scale * f | radicalsdk/geometry.py |
__all__ = ['PolarToCartesianWarp', 'CameraRadarCoordinateTransform', 'compute_radar_intrinsic_matrix']
# Cell
import tensorflow as tf
from tensorflow.keras import models, layers
import tensorflow_addons as tfa
import numpy as np
from .radar import v1_constants
# Cell
class PolarToCartesianWarp(layers.Layer):
"""Differentiable Polar Image to Cartersian Mapping
This is a Tensorflow Keras Layer and
expects a batch of input with shape [n, r, az, c]
Running eagerly is supported as well.
For single example input, use expanddims or newaxis.
"""
def __init__(self, full=True, scale=1.):
super(PolarToCartesianWarp, self).__init__()
self.full = full
self.scale = scale
def build(self, input_shape):
range_bins, angle_bins = input_shape[1:3]
xx = np.arange(-range_bins, range_bins)/range_bins
yy = 1 - np.arange(0, range_bins)/range_bins
mg = np.meshgrid(xx, yy)
rr = range_bins - np.sqrt(mg[0]**2 + mg[1]**2) * range_bins
tt = angle_bins - np.arctan2(mg[1], mg[0])/np.pi*angle_bins
self.warp = tf.Variable(
np.stack([tt, rr], axis=-1)[np.newaxis, ...],
trainable=False,
dtype=tf.float32
)
self.warp = tf.repeat(self.warp, repeats=input_shape[0], axis=0)
def call(self, inputs):
return tfa.image.resampler(inputs, self.warp)
# Cell
class CameraRadarCoordinateTransform:
"""Stub Not implemented yet"""
def __init__(self, camera_f, camera_p, radar_f, radar_p):
self.camera_f = camera_f
self.camera_p = camera_p
self.radar_f = radar_f
self.radar_p = radar_p
def camera2world(self, uv, depth=None):
"""Projects camera pixel coordinates u, v, depth to world coordinates
uv can be n x 2 lists or n x 3
if uv is n x 2, depth image needs to be provided.
Depth will be retreived using nearest neighbor interpolation
"""
pass
def world2camera(self, xyz):
pass
def radar2world(self, xy, h=None):
pass
def world2radar(self, xyz):
pass
def camera2radar(self, uv, depth=None):
"""Convenience function combining `camera2world` and `world2radar`"""
xyz = self.camera2world(uv, depth)
radar_xy = self.world2camera(xyz)
return radar_xy
def radar2camera(self, radar_xy, radar_height=None):
"""Convenience function combining `radar2world` and `world2camera`"""
xyz = self.radar2world(radar_xy, radar_height)
uvz = self.world2camera(xyz)
return uvz
# Cell
def compute_radar_intrinsic_matrix(radarframe):
"""Radar frame needs to provide max_range and range_nbins"""
scale = 1/radarframe.max_range
nbins = radarframe.range_nbins
if radarframe.flipped:
f = np.array([
[nbins, 0, nbins],
[0, -nbins, nbins],
[0, 0, 1./scale]
])
else:
f = np.array([
[nbins, 0, nbins],
[0, -nbins, nbins],
[0, 0, 1/scale]
])
return scale * f | 0.909343 | 0.695753 |
import torch
from torch import nn
from typing import Optional
from typing import NamedTuple
from .discriminators import DiscriminatorOutput
from ....misc.toolkit import get_gradient
class GANTarget(NamedTuple):
is_real: bool
labels: Optional[torch.Tensor] = None
class GradientNormLoss(nn.Module):
def __init__(self, k: float = 1.0):
super().__init__()
self.k = k
def forward(self, net_input: torch.Tensor, output: torch.Tensor) -> torch.Tensor:
gradients = get_gradient(output, net_input, True, True)
gradients = gradients.view(net_input.shape[0], -1) # type: ignore
gradients_norm = gradients.norm(2, dim=1)
return torch.mean((gradients_norm - self.k) ** 2)
class GANLoss(nn.Module):
def __init__(self, gan_mode: str):
super().__init__()
self.loss: nn.Module
self.gan_mode = gan_mode
self.register_buffer("real_label", torch.tensor(1.0))
self.register_buffer("fake_label", torch.tensor(0.0))
if gan_mode == "lsgan":
self.loss = nn.MSELoss()
elif gan_mode == "vanilla":
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ["wgangp"]:
self.loss = GradientNormLoss(k=1.0)
else:
raise NotImplementedError(f"gan mode {gan_mode} not implemented")
self.ce = nn.CrossEntropyLoss()
def expand_target(self, tensor: torch.Tensor, is_real: bool) -> torch.Tensor:
target = self.real_label if is_real else self.fake_label
return target.expand_as(tensor) # type: ignore
def forward(self, output: DiscriminatorOutput, target: GANTarget) -> torch.Tensor:
prediction, is_real = output.output, target.is_real
if self.gan_mode in ["lsgan", "vanilla"]:
target_tensor = self.expand_target(prediction, is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == "wgangp":
loss = -prediction.mean() if is_real else prediction.mean()
else:
raise NotImplementedError(f"gan_mode '{self.gan_mode}' is not implemented")
if output.cond_logits is not None and target.is_real:
cond_loss = self.ce(output.cond_logits, target.labels)
loss = loss + cond_loss
return loss
__all__ = [
"GANTarget",
"GradientNormLoss",
"GANLoss",
] | cflearn/models/cv/gan/losses.py | import torch
from torch import nn
from typing import Optional
from typing import NamedTuple
from .discriminators import DiscriminatorOutput
from ....misc.toolkit import get_gradient
class GANTarget(NamedTuple):
is_real: bool
labels: Optional[torch.Tensor] = None
class GradientNormLoss(nn.Module):
def __init__(self, k: float = 1.0):
super().__init__()
self.k = k
def forward(self, net_input: torch.Tensor, output: torch.Tensor) -> torch.Tensor:
gradients = get_gradient(output, net_input, True, True)
gradients = gradients.view(net_input.shape[0], -1) # type: ignore
gradients_norm = gradients.norm(2, dim=1)
return torch.mean((gradients_norm - self.k) ** 2)
class GANLoss(nn.Module):
def __init__(self, gan_mode: str):
super().__init__()
self.loss: nn.Module
self.gan_mode = gan_mode
self.register_buffer("real_label", torch.tensor(1.0))
self.register_buffer("fake_label", torch.tensor(0.0))
if gan_mode == "lsgan":
self.loss = nn.MSELoss()
elif gan_mode == "vanilla":
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ["wgangp"]:
self.loss = GradientNormLoss(k=1.0)
else:
raise NotImplementedError(f"gan mode {gan_mode} not implemented")
self.ce = nn.CrossEntropyLoss()
def expand_target(self, tensor: torch.Tensor, is_real: bool) -> torch.Tensor:
target = self.real_label if is_real else self.fake_label
return target.expand_as(tensor) # type: ignore
def forward(self, output: DiscriminatorOutput, target: GANTarget) -> torch.Tensor:
prediction, is_real = output.output, target.is_real
if self.gan_mode in ["lsgan", "vanilla"]:
target_tensor = self.expand_target(prediction, is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == "wgangp":
loss = -prediction.mean() if is_real else prediction.mean()
else:
raise NotImplementedError(f"gan_mode '{self.gan_mode}' is not implemented")
if output.cond_logits is not None and target.is_real:
cond_loss = self.ce(output.cond_logits, target.labels)
loss = loss + cond_loss
return loss
__all__ = [
"GANTarget",
"GradientNormLoss",
"GANLoss",
] | 0.944035 | 0.350144 |
from __future__ import division
from itertools import cycle
import numpy as np
import pandas as pd
from psychopy import visual, event
from visigoth.stimuli import Pattern, FixationTask
def create_stimuli(exp):
"""Initialize stimulus objects."""
# Fixation point, with color change detection task
fix = FixationTask(
exp.win,
exp.clock,
exp.p.fix_colors,
exp.p.fix_duration,
exp.p.fix_radius,
exp.p.fix_pos,
)
# Average of multiple sinusoidal grating stimulus
pattern = Pattern(exp.win,
n=exp.p.stim_gratings,
contrast=1 / np.sqrt(exp.p.stim_gratings),
elementTex=exp.p.stim_tex,
elementMask=exp.p.stim_mask,
sizes=exp.p.stim_size,
sfs=exp.p.stim_sf,
pos=(0, 0))
return locals()
def generate_trials(exp):
"""Yield block information."""
exp.fix_colors = cycle(exp.p.fix_colors)
for block in range(exp.p.n_blocks):
for stim_pos in range(2):
block_time = (block * 2 + stim_pos) * exp.p.block_dur
info = pd.Series(dict(
block=block,
block_time=block_time,
block_onset=None,
stim_pos=stim_pos,
), dtype=np.object)
yield info
def run_trial(exp, info):
"""Execute a block."""
exp.s.pattern.pos = exp.p.stim_pos[int(info.stim_pos)]
block_dur = exp.p.block_dur
update_hz = exp.p.update_hz
for i in range(block_dur * update_hz):
exp.s.pattern.randomize_phases(limits=(.2, .8))
end = info["block_time"] + (i + 1) * (1 / update_hz)
if not i:
info["block_onset"] = exp.clock.getTime()
exp.wait_until(end, draw=["pattern", "fix"])
exp.check_fixation(allow_blinks=True)
exp.check_abort()
return info
def summarize_task_performance(exp):
# TODO should this code, and the code that computes hit rates /false alarms
# go into the fixation task object? Probably!
if not exp.trial_data:
return None
if hasattr(exp, "task_events"):
return exp.task_events
else:
change_times = exp.s.fix.change_times
key_presses = event.getKeys(exp.p.resp_keys, timeStamped=exp.clock)
if key_presses:
_, press_times = list(zip(*key_presses))
else:
press_times = []
change_times = np.array(change_times)
press_times = np.array(press_times)
events = []
for t in change_times:
deltas = press_times - t
hit = np.any((0 < deltas) & (deltas < exp.p.resp_thresh))
events.append((t, "hit" if hit else "miss"))
for t in press_times:
deltas = t - change_times
fa = ~np.any((0 < deltas) & (deltas < exp.p.resp_thresh))
if fa:
events.append((t, "fa"))
events = pd.DataFrame(events, columns=["time", "event"])
exp.task_events = events
return events
def compute_performance(exp):
events = summarize_task_performance(exp)
if events is None:
hit_rate = false_alarms = None
else:
hit_rate = ((events["event"] == "hit").sum()
/ events["event"].isin(["hit", "miss"]).sum())
false_alarms = (events["event"] == "fa").sum()
return hit_rate, false_alarms
def show_performance(exp, hit_rate, false_alarms):
lines = ["End of the run!"]
if hit_rate is not None:
lines.append("")
lines.append(
"You detected {:.0%} of the color changes,".format(hit_rate)
)
lines.append(
"with {:0d} false alarms.".format(false_alarms)
)
n = len(lines)
height = .5
heights = (np.arange(n)[::-1] - (n / 2 - .5)) * height
for line, y in zip(lines, heights):
visual.TextStim(exp.win, line,
pos=(0, y), height=height).draw()
exp.win.flip() | loc/experiment.py | from __future__ import division
from itertools import cycle
import numpy as np
import pandas as pd
from psychopy import visual, event
from visigoth.stimuli import Pattern, FixationTask
def create_stimuli(exp):
"""Initialize stimulus objects."""
# Fixation point, with color change detection task
fix = FixationTask(
exp.win,
exp.clock,
exp.p.fix_colors,
exp.p.fix_duration,
exp.p.fix_radius,
exp.p.fix_pos,
)
# Average of multiple sinusoidal grating stimulus
pattern = Pattern(exp.win,
n=exp.p.stim_gratings,
contrast=1 / np.sqrt(exp.p.stim_gratings),
elementTex=exp.p.stim_tex,
elementMask=exp.p.stim_mask,
sizes=exp.p.stim_size,
sfs=exp.p.stim_sf,
pos=(0, 0))
return locals()
def generate_trials(exp):
"""Yield block information."""
exp.fix_colors = cycle(exp.p.fix_colors)
for block in range(exp.p.n_blocks):
for stim_pos in range(2):
block_time = (block * 2 + stim_pos) * exp.p.block_dur
info = pd.Series(dict(
block=block,
block_time=block_time,
block_onset=None,
stim_pos=stim_pos,
), dtype=np.object)
yield info
def run_trial(exp, info):
"""Execute a block."""
exp.s.pattern.pos = exp.p.stim_pos[int(info.stim_pos)]
block_dur = exp.p.block_dur
update_hz = exp.p.update_hz
for i in range(block_dur * update_hz):
exp.s.pattern.randomize_phases(limits=(.2, .8))
end = info["block_time"] + (i + 1) * (1 / update_hz)
if not i:
info["block_onset"] = exp.clock.getTime()
exp.wait_until(end, draw=["pattern", "fix"])
exp.check_fixation(allow_blinks=True)
exp.check_abort()
return info
def summarize_task_performance(exp):
# TODO should this code, and the code that computes hit rates /false alarms
# go into the fixation task object? Probably!
if not exp.trial_data:
return None
if hasattr(exp, "task_events"):
return exp.task_events
else:
change_times = exp.s.fix.change_times
key_presses = event.getKeys(exp.p.resp_keys, timeStamped=exp.clock)
if key_presses:
_, press_times = list(zip(*key_presses))
else:
press_times = []
change_times = np.array(change_times)
press_times = np.array(press_times)
events = []
for t in change_times:
deltas = press_times - t
hit = np.any((0 < deltas) & (deltas < exp.p.resp_thresh))
events.append((t, "hit" if hit else "miss"))
for t in press_times:
deltas = t - change_times
fa = ~np.any((0 < deltas) & (deltas < exp.p.resp_thresh))
if fa:
events.append((t, "fa"))
events = pd.DataFrame(events, columns=["time", "event"])
exp.task_events = events
return events
def compute_performance(exp):
events = summarize_task_performance(exp)
if events is None:
hit_rate = false_alarms = None
else:
hit_rate = ((events["event"] == "hit").sum()
/ events["event"].isin(["hit", "miss"]).sum())
false_alarms = (events["event"] == "fa").sum()
return hit_rate, false_alarms
def show_performance(exp, hit_rate, false_alarms):
lines = ["End of the run!"]
if hit_rate is not None:
lines.append("")
lines.append(
"You detected {:.0%} of the color changes,".format(hit_rate)
)
lines.append(
"with {:0d} false alarms.".format(false_alarms)
)
n = len(lines)
height = .5
heights = (np.arange(n)[::-1] - (n / 2 - .5)) * height
for line, y in zip(lines, heights):
visual.TextStim(exp.win, line,
pos=(0, y), height=height).draw()
exp.win.flip() | 0.633183 | 0.311545 |
import json
LEFT_FILE = 'coverage-datailed.json'
RIGHT_FILE = 'coverage-datailed.json'
def line_info(lines):
if lines is None:
return None
all_line_numbers = set()
code_line_numbers = set()
covered_line_numbers = set()
for line in lines:
line_number = line['line_number']
all_line_numbers.add(line_number)
if line['gcovr/noncode']:
continue
code_line_numbers.add(line_number)
if line['count'] > 0:
covered_line_numbers.add(line_number)
if len(all_line_numbers) != len(lines):
raise Exception('Non unique line numbers')
return {
'all': all_line_numbers,
'code': code_line_numbers,
'covered': covered_line_numbers
}
def line_coverage_summary(info):
return info and {
'all_lines': len(info['all']),
'code_lines': len(info['code']),
'covered_lines': len(info['covered']),
}
def compare_line_coverage(left, right):
if left and right and left['all'] != right['all']:
raise Exception('All lines mismatch')
if left and right and left['code'] != right['code']:
raise Exception('Code lines mismatch')
if left is None and right is None:
return None
all_info_source = left or right
result = {
'all': len(all_info_source['all']),
'all_lines': all_info_source['all'],
'code': len(all_info_source['code']),
'code_lines': all_info_source['code'],
}
only_left_lines = left and left['covered']
only_right_lines = right and right['covered']
both_lines = None
if only_left_lines is not None and only_right_lines is not None:
both_lines = only_left_lines & only_right_lines
only_left_lines = only_left_lines - both_lines
only_right_lines = only_right_lines - both_lines
result['both'] = both_lines and len(both_lines)
result['both_lines'] = both_lines
result['left'] = only_left_lines and len(only_left_lines)
result['left_lines'] = only_left_lines
result['right'] = only_right_lines and len(only_right_lines)
result['right_lines'] = only_right_lines
return result
def compare_coverage_files(left, right):
files = left.keys() | right.keys()
result = {}
for file in sorted(files):
left_info = line_info(left.get(file))
right_info = line_info(right.get(file))
result[file] = {
'left_summary': line_coverage_summary(left_info),
'right_summary': line_coverage_summary(right_info),
'coverage': compare_line_coverage(left_info, right_info)
}
return result
def compare_coverage_reports(left, right):
if left['gcovr/format_version'] != right['gcovr/format_version']:
raise Exception('different report formats')
left_files = {it['file']: it['lines'] for it in left['files']}
right_files = {it['file']: it['lines'] for it in right['files']}
if len(left_files) != len(left['files']):
raise Exception('left: non unique files')
if len(right_files) != len(right['files']):
raise Exception('left: non unique files')
return compare_coverage_files(left_files, right_files)
def main():
with open(LEFT_FILE) as f:
left_data = json.load(f)
with open(RIGHT_FILE) as f:
right_data = json.load(f)
result = compare_coverage_reports(left_data, right_data)
result_report = {
'left': LEFT_FILE,
'right': RIGHT_FILE,
'coverage_diff': result
}
result_file_name = f'{LEFT_FILE}-{RIGHT_FILE}.diff.json'
with open(result_file_name, 'w') as f:
json.dump(result_report, f, default=list)
if __name__ == '__main__':
main() | coverage/coverage_diff.py | import json
LEFT_FILE = 'coverage-datailed.json'
RIGHT_FILE = 'coverage-datailed.json'
def line_info(lines):
if lines is None:
return None
all_line_numbers = set()
code_line_numbers = set()
covered_line_numbers = set()
for line in lines:
line_number = line['line_number']
all_line_numbers.add(line_number)
if line['gcovr/noncode']:
continue
code_line_numbers.add(line_number)
if line['count'] > 0:
covered_line_numbers.add(line_number)
if len(all_line_numbers) != len(lines):
raise Exception('Non unique line numbers')
return {
'all': all_line_numbers,
'code': code_line_numbers,
'covered': covered_line_numbers
}
def line_coverage_summary(info):
return info and {
'all_lines': len(info['all']),
'code_lines': len(info['code']),
'covered_lines': len(info['covered']),
}
def compare_line_coverage(left, right):
if left and right and left['all'] != right['all']:
raise Exception('All lines mismatch')
if left and right and left['code'] != right['code']:
raise Exception('Code lines mismatch')
if left is None and right is None:
return None
all_info_source = left or right
result = {
'all': len(all_info_source['all']),
'all_lines': all_info_source['all'],
'code': len(all_info_source['code']),
'code_lines': all_info_source['code'],
}
only_left_lines = left and left['covered']
only_right_lines = right and right['covered']
both_lines = None
if only_left_lines is not None and only_right_lines is not None:
both_lines = only_left_lines & only_right_lines
only_left_lines = only_left_lines - both_lines
only_right_lines = only_right_lines - both_lines
result['both'] = both_lines and len(both_lines)
result['both_lines'] = both_lines
result['left'] = only_left_lines and len(only_left_lines)
result['left_lines'] = only_left_lines
result['right'] = only_right_lines and len(only_right_lines)
result['right_lines'] = only_right_lines
return result
def compare_coverage_files(left, right):
files = left.keys() | right.keys()
result = {}
for file in sorted(files):
left_info = line_info(left.get(file))
right_info = line_info(right.get(file))
result[file] = {
'left_summary': line_coverage_summary(left_info),
'right_summary': line_coverage_summary(right_info),
'coverage': compare_line_coverage(left_info, right_info)
}
return result
def compare_coverage_reports(left, right):
if left['gcovr/format_version'] != right['gcovr/format_version']:
raise Exception('different report formats')
left_files = {it['file']: it['lines'] for it in left['files']}
right_files = {it['file']: it['lines'] for it in right['files']}
if len(left_files) != len(left['files']):
raise Exception('left: non unique files')
if len(right_files) != len(right['files']):
raise Exception('left: non unique files')
return compare_coverage_files(left_files, right_files)
def main():
with open(LEFT_FILE) as f:
left_data = json.load(f)
with open(RIGHT_FILE) as f:
right_data = json.load(f)
result = compare_coverage_reports(left_data, right_data)
result_report = {
'left': LEFT_FILE,
'right': RIGHT_FILE,
'coverage_diff': result
}
result_file_name = f'{LEFT_FILE}-{RIGHT_FILE}.diff.json'
with open(result_file_name, 'w') as f:
json.dump(result_report, f, default=list)
if __name__ == '__main__':
main() | 0.501709 | 0.191933 |
def api_v4(self):
""" API core commands for Cloudflare API"""
# The API commands for /user/
user(self)
user_load_balancers(self)
user_virtual_dns(self)
# The API commands for /zones/
zones(self)
zones_settings(self)
zones_analytics(self)
zones_firewall(self)
zones_rate_limits(self)
zones_amp(self)
# The API commands for /railguns/
railguns(self)
# The API commands for /organizations/
organizations(self)
organizations_virtual_dns(self)
# The API commands for /certificates/
certificates(self)
# The API commands for /ips/
ips(self)
# The API commands for /zones/:zone_id/argo
zones_argo(self)
# The API commands for /zones/:zone_id/dnssec
zones_dnssec(self)
# The API commands for /zones/:zone_id/ssl
zones_ssl(self)
# The API commands for CLB /zones/:zone_id/load_balancers & /user/load_balancers
zones_load_balancers(self)
zones_dns_analytics(self)
def user(self):
""" API core commands for Cloudflare API"""
base = self._base
setattr(self, "user",
self._add_with_auth(base, "user"))
branch = self.user
setattr(branch, "billing",
self._add_unused(base, "user/billing"))
branch = self.user.billing
setattr(branch, "history",
self._add_with_auth(base, "user/billing/history"))
setattr(branch, "profile",
self._add_with_auth(base, "user/billing/profile"))
setattr(branch, "subscriptions",
self._add_unused(base, "user/billing/subscriptions"))
branch = self.user.billing.subscriptions
setattr(branch, "apps",
self._add_with_auth(base, "user/billing/subscriptions/apps"))
setattr(branch, "zones",
self._add_with_auth(base, "user/billing/subscriptions/zones"))
branch = self.user
setattr(branch, "firewall",
self._add_unused(base, "user/firewall"))
branch = self.user.firewall
setattr(branch, "access_rules",
self._add_unused(base, "user/firewall/access_rules"))
branch = self.user.firewall.access_rules
setattr(branch, "rules",
self._add_with_auth(base, "user/firewall/access_rules/rules"))
branch = self.user
setattr(branch, "organizations",
self._add_with_auth(base, "user/organizations"))
setattr(branch, "invites",
self._add_with_auth(base, "user/invites"))
setattr(branch, "subscriptions",
self._add_with_auth(base, "user/subscriptions"))
def zones(self):
""" API core commands for Cloudflare API"""
base = self._base
setattr(self, "zones",
self._add_with_auth(base, "zones"))
branch = self.zones
setattr(branch, "activation_check",
self._add_with_auth(base, "zones", "activation_check"))
setattr(branch, "available_plans",
self._add_with_auth(base, "zones", "available_plans"))
setattr(branch, "available_rate_plans",
self._add_with_auth(base, "zones", "available_rate_plans"))
setattr(branch, "custom_certificates",
self._add_with_auth(base, "zones", "custom_certificates"))
branch = self.zones.custom_certificates
setattr(branch, "prioritize",
self._add_with_auth(base, "zones", "custom_certificates/prioritize"))
branch = self.zones
setattr(branch, "custom_pages",
self._add_with_auth(base, "zones", "custom_pages"))
setattr(branch, "dns_records",
self._add_with_auth(base, "zones", "dns_records"))
setattr(branch, "keyless_certificates",
self._add_with_auth(base, "zones", "keyless_certificates"))
setattr(branch, "pagerules",
self._add_with_auth(base, "zones", "pagerules"))
setattr(branch, "purge_cache",
self._add_with_auth(base, "zones", "purge_cache"))
setattr(branch, "railguns",
self._add_with_auth(base, "zones", "railguns"))
branch = self.zones.railguns
setattr(branch, "diagnose",
self._add_with_auth(base, "zones", "railguns", "diagnose"))
branch = self.zones
setattr(branch, "subscription",
self._add_with_auth(base, "zones", "subscription"))
setattr(branch, "subscriptions",
self._add_with_auth(base, "zones", "subscriptions"))
branch = self.zones.dns_records
setattr(branch, "export",
self._add_with_auth(base, "zones", "dns_records/export"))
setattr(branch, "import",
self._add_with_auth(base, "zones", "dns_records/import"))
branch = self.zones
setattr(branch, "custom_hostnames",
self._add_with_auth(base, "zones", "custom_hostnames"))
def zones_settings(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.zones
setattr(branch, "settings",
self._add_with_auth(base, "zones", "settings"))
branch = self.zones.settings
setattr(branch, "advanced_ddos",
self._add_with_auth(base, "zones", "settings/advanced_ddos"))
setattr(branch, "always_online",
self._add_with_auth(base, "zones", "settings/always_online"))
setattr(branch, "always_use_https",
self._add_with_auth(base, "zones", "settings/always_use_https"))
setattr(branch, "browser_cache_ttl",
self._add_with_auth(base, "zones", "settings/browser_cache_ttl"))
setattr(branch, "browser_check",
self._add_with_auth(base, "zones", "settings/browser_check"))
setattr(branch, "cache_level",
self._add_with_auth(base, "zones", "settings/cache_level"))
setattr(branch, "challenge_ttl",
self._add_with_auth(base, "zones", "settings/challenge_ttl"))
setattr(branch, "development_mode",
self._add_with_auth(base, "zones", "settings/development_mode"))
setattr(branch, "email_obfuscation",
self._add_with_auth(base, "zones", "settings/email_obfuscation"))
setattr(branch, "hotlink_protection",
self._add_with_auth(base, "zones", "settings/hotlink_protection"))
setattr(branch, "ip_geolocation",
self._add_with_auth(base, "zones", "settings/ip_geolocation"))
setattr(branch, "ipv6",
self._add_with_auth(base, "zones", "settings/ipv6"))
setattr(branch, "minify",
self._add_with_auth(base, "zones", "settings/minify"))
setattr(branch, "mirage",
self._add_with_auth(base, "zones", "settings/mirage"))
setattr(branch, "mobile_redirect",
self._add_with_auth(base, "zones", "settings/mobile_redirect"))
setattr(branch, "origin_error_page_pass_thru",
self._add_with_auth(base, "zones", "settings/origin_error_page_pass_thru"))
setattr(branch, "polish",
self._add_with_auth(base, "zones", "settings/polish"))
setattr(branch, "prefetch_preload",
self._add_with_auth(base, "zones", "settings/prefetch_preload"))
setattr(branch, "response_buffering",
self._add_with_auth(base, "zones", "settings/response_buffering"))
setattr(branch, "rocket_loader",
self._add_with_auth(base, "zones", "settings/rocket_loader"))
setattr(branch, "security_header",
self._add_with_auth(base, "zones", "settings/security_header"))
setattr(branch, "security_level",
self._add_with_auth(base, "zones", "settings/security_level"))
setattr(branch, "server_side_exclude",
self._add_with_auth(base, "zones", "settings/server_side_exclude"))
setattr(branch, "sort_query_string_for_cache",
self._add_with_auth(base, "zones", "settings/sort_query_string_for_cache"))
setattr(branch, "ssl",
self._add_with_auth(base, "zones", "settings/ssl"))
setattr(branch, "tls_client_auth",
self._add_with_auth(base, "zones", "settings/tls_client_auth"))
setattr(branch, "true_client_ip_header",
self._add_with_auth(base, "zones", "settings/true_client_ip_header"))
setattr(branch, "tls_1_2_only",
self._add_with_auth(base, "zones", "settings/tls_1_2_only"))
setattr(branch, "tls_1_3",
self._add_with_auth(base, "zones", "settings/tls_1_3"))
# setattr(branch, "tlsadd_auth",
# self._add_with_auth(base, "zones", "settings/tlsadd_auth"))
# setattr(branch, "trueadd_ip_header",
# self._add_with_auth(base, "zones", "settings/trueadd_ip_header"))
setattr(branch, "websockets",
self._add_with_auth(base, "zones", "settings/websockets"))
setattr(branch, "waf",
self._add_with_auth(base, "zones", "settings/waf"))
setattr(branch, "webp",
self._add_with_auth(base, "zones", "settings/webp"))
setattr(branch, "http2",
self._add_with_auth(base, "zones", "settings/http2"))
setattr(branch, "pseudo_ipv4",
self._add_with_auth(base, "zones", "settings/pseudo_ipv4"))
setattr(branch, "opportunistic_encryption",
self._add_with_auth(base, "zones", "settings/opportunistic_encryption"))
setattr(branch, "automatic_https_rewrites",
self._add_with_auth(base, "zones", "settings/automatic_https_rewrites"))
def zones_analytics(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.zones
setattr(branch, "analytics",
self._add_unused(base, "zones", "analytics"))
branch = self.zones.analytics
setattr(branch, "colos",
self._add_with_auth(base, "zones", "analytics/colos"))
setattr(branch, "dashboard",
self._add_with_auth(base, "zones", "analytics/dashboard"))
def zones_firewall(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.zones
setattr(branch, "firewall",
self._add_unused(branch, "zones", "firewall"))
branch = self.zones.firewall
setattr(branch, "access_rules",
self._add_unused(base, "zones", "firewall/access_rules"))
setattr(branch, "waf",
self._add_unused(base, "zones", "firewall/waf"))
branch = self.zones.firewall.waf
setattr(branch, "packages",
self._add_with_auth(base, "zones", "firewall/waf/packages"))
branch = self.zones.firewall.waf.packages
setattr(branch, "groups",
self._add_with_auth(base, "zones", "firewall/waf/packages", "groups"))
setattr(branch, "rules",
self._add_with_auth(base, "zones", "firewall/waf/packages", "rules"))
branch = self.zones.firewall.access_rules
setattr(branch, "rules",
self._add_with_auth(base, "zones", "firewall/access_rules/rules"))
branch = self.zones.firewall
setattr(branch, "lockdowns",
self._add_with_auth(base, "zones", "firewall/lockdowns"))
setattr(branch, "ua_rules",
self._add_with_auth(base, "zones", "firewall/ua_rules"))
def zones_rate_limits(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.zones
setattr(branch, "rate_limits",
self._add_with_auth(base, "zones", "rate_limits"))
def zones_dns_analytics(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.zones
setattr(branch, "dns_analytics",
self._add_unused(base, "zones", "dns_analytics"))
branch = self.zones.dns_analytics
setattr(branch, "report",
self._add_with_auth(base, "zones", "dns_analytics/report"))
branch = self.zones.dns_analytics.report
setattr(branch, "bytime",
self._add_with_auth(base, "zones", "dns_analytics/report/bytime"))
def zones_amp(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.zones
setattr(branch, "amp",
self._add_unused(base, "zones", "amp"))
branch = self.zones.amp
setattr(branch, "viewer",
self._add_with_auth(base, "zones", "amp/viewer"))
def railguns(self):
""" API core commands for Cloudflare API"""
base = self._base
setattr(self, "railguns",
self._add_with_auth(base, "railguns"))
branch = self.railguns
setattr(branch, "zones",
self._add_with_auth(base, "railguns", "zones"))
def organizations(self):
""" API core commands for Cloudflare API"""
base = self._base
setattr(self, "organizations",
self._add_with_auth(base, "organizations"))
branch = self.organizations
setattr(branch, "members",
self._add_with_auth(base, "organizations", "members"))
setattr(branch, "invite",
self._add_with_auth(base, "organizations", "invite"))
setattr(branch, "invites",
self._add_with_auth(base, "organizations", "invites"))
setattr(branch, "railguns",
self._add_with_auth(base, "organizations", "railguns"))
branch = self.organizations.railguns
setattr(branch, "zones",
self._add_with_auth(base, "organizations", "railguns", "zones"))
branch = self.organizations
setattr(branch, "roles",
self._add_with_auth(base, "organizations", "roles"))
setattr(branch, "firewall",
self._add_unused(base, "organizations", "firewall"))
branch = self.organizations.firewall
setattr(branch, "access_rules",
self._add_unused(base, "organizations", "firewall/access_rules"))
branch = self.organizations.firewall.access_rules
setattr(branch, "rules",
self._add_with_auth(base, "organizations", "firewall/access_rules/rules"))
branch = self.organizations
setattr(branch, "load_balancers",
self._add_with_auth(base, "organizations", "load_balancers"))
branch = self.organizations.load_balancers
setattr(branch, "monitors",
self._add_with_auth(base, "organizations", "load_balancers/monitors"))
setattr(branch, "pools",
self._add_with_auth(base, "organizations", "load_balancers/pools"))
def certificates(self):
""" API core commands for Cloudflare API"""
base = self._base
setattr(self, "certificates",
self._add_with_cert_auth(base, "certificates"))
def ips(self):
""" API core commands for Cloudflare API"""
base = self._base
setattr(self, "ips",
self._add_noauth(base, "ips"))
def zones_argo(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.zones
setattr(branch, "argo",
self._add_unused(base, "zones", "argo"))
branch = self.zones.argo
setattr(branch, "tiered_caching",
self._add_with_auth(base, "zones", "argo/tiered_caching"))
setattr(branch, "smart_routing",
self._add_with_auth(base, "zones", "argo/smart_routing"))
def zones_dnssec(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.zones
setattr(branch, "dnssec",
self._add_with_auth(base, "zones", "dnssec"))
def zones_ssl(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.zones
setattr(branch, "ssl",
self._add_unused(base, "zones", "ssl"))
branch = self.zones.ssl
setattr(branch, "analyze",
self._add_with_auth(base, "zones", "ssl/analyze"))
setattr(branch, "certificate_packs",
self._add_with_auth(base, "zones", "ssl/certificate_packs"))
setattr(branch, "verification",
self._add_with_auth(base, "zones", "ssl/verification"))
def zones_load_balancers(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.zones
setattr(branch, "load_balancers",
self._add_with_auth(base, "zones", "load_balancers"))
def user_load_balancers(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.user
setattr(branch, "load_balancers",
self._add_unused(base, "user/load_balancers"))
branch = self.user.load_balancers
setattr(branch, "monitors",
self._add_with_auth(base, "user/load_balancers/monitors"))
setattr(branch, "pools",
self._add_with_auth(base, "user/load_balancers/pools"))
def user_virtual_dns(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.user
setattr(branch, "virtual_dns",
self._add_with_auth(base, "user/virtual_dns"))
branch = self.user.virtual_dns
setattr(branch, "dns_analytics",
self._add_unused(base, "user/virtual_dns", "dns_analytics"))
branch = self.user.virtual_dns.dns_analytics
setattr(branch, "report",
self._add_with_auth(base, "user/virtual_dns", "dns_analytics/report"))
branch = self.user.virtual_dns.dns_analytics.report
setattr(branch, "bytime",
self._add_with_auth(base, "user/virtual_dns", "dns_analytics/report/bytime"))
def organizations_virtual_dns(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.organizations
setattr(branch, "virtual_dns",
self._add_with_auth(base, "organizations", "virtual_dns"))
branch = self.organizations.virtual_dns
setattr(branch, "dns_analytics",
self._add_unused(base, "organizations", "virtual_dns", "dns_analytics"))
branch = self.organizations.virtual_dns.dns_analytics
setattr(branch, "report",
self._add_with_auth(base, "organizations", "virtual_dns", "dns_analytics/report"))
branch = self.organizations.virtual_dns.dns_analytics.report
setattr(branch, "bytime",
self._add_with_auth(base, "organizations", "virtual_dns", "dns_analytics/report/bytime")) | proxySTAR_V3/certbot/venv/lib/python2.7/site-packages/CloudFlare/api_v4.py |
def api_v4(self):
""" API core commands for Cloudflare API"""
# The API commands for /user/
user(self)
user_load_balancers(self)
user_virtual_dns(self)
# The API commands for /zones/
zones(self)
zones_settings(self)
zones_analytics(self)
zones_firewall(self)
zones_rate_limits(self)
zones_amp(self)
# The API commands for /railguns/
railguns(self)
# The API commands for /organizations/
organizations(self)
organizations_virtual_dns(self)
# The API commands for /certificates/
certificates(self)
# The API commands for /ips/
ips(self)
# The API commands for /zones/:zone_id/argo
zones_argo(self)
# The API commands for /zones/:zone_id/dnssec
zones_dnssec(self)
# The API commands for /zones/:zone_id/ssl
zones_ssl(self)
# The API commands for CLB /zones/:zone_id/load_balancers & /user/load_balancers
zones_load_balancers(self)
zones_dns_analytics(self)
def user(self):
""" API core commands for Cloudflare API"""
base = self._base
setattr(self, "user",
self._add_with_auth(base, "user"))
branch = self.user
setattr(branch, "billing",
self._add_unused(base, "user/billing"))
branch = self.user.billing
setattr(branch, "history",
self._add_with_auth(base, "user/billing/history"))
setattr(branch, "profile",
self._add_with_auth(base, "user/billing/profile"))
setattr(branch, "subscriptions",
self._add_unused(base, "user/billing/subscriptions"))
branch = self.user.billing.subscriptions
setattr(branch, "apps",
self._add_with_auth(base, "user/billing/subscriptions/apps"))
setattr(branch, "zones",
self._add_with_auth(base, "user/billing/subscriptions/zones"))
branch = self.user
setattr(branch, "firewall",
self._add_unused(base, "user/firewall"))
branch = self.user.firewall
setattr(branch, "access_rules",
self._add_unused(base, "user/firewall/access_rules"))
branch = self.user.firewall.access_rules
setattr(branch, "rules",
self._add_with_auth(base, "user/firewall/access_rules/rules"))
branch = self.user
setattr(branch, "organizations",
self._add_with_auth(base, "user/organizations"))
setattr(branch, "invites",
self._add_with_auth(base, "user/invites"))
setattr(branch, "subscriptions",
self._add_with_auth(base, "user/subscriptions"))
def zones(self):
""" API core commands for Cloudflare API"""
base = self._base
setattr(self, "zones",
self._add_with_auth(base, "zones"))
branch = self.zones
setattr(branch, "activation_check",
self._add_with_auth(base, "zones", "activation_check"))
setattr(branch, "available_plans",
self._add_with_auth(base, "zones", "available_plans"))
setattr(branch, "available_rate_plans",
self._add_with_auth(base, "zones", "available_rate_plans"))
setattr(branch, "custom_certificates",
self._add_with_auth(base, "zones", "custom_certificates"))
branch = self.zones.custom_certificates
setattr(branch, "prioritize",
self._add_with_auth(base, "zones", "custom_certificates/prioritize"))
branch = self.zones
setattr(branch, "custom_pages",
self._add_with_auth(base, "zones", "custom_pages"))
setattr(branch, "dns_records",
self._add_with_auth(base, "zones", "dns_records"))
setattr(branch, "keyless_certificates",
self._add_with_auth(base, "zones", "keyless_certificates"))
setattr(branch, "pagerules",
self._add_with_auth(base, "zones", "pagerules"))
setattr(branch, "purge_cache",
self._add_with_auth(base, "zones", "purge_cache"))
setattr(branch, "railguns",
self._add_with_auth(base, "zones", "railguns"))
branch = self.zones.railguns
setattr(branch, "diagnose",
self._add_with_auth(base, "zones", "railguns", "diagnose"))
branch = self.zones
setattr(branch, "subscription",
self._add_with_auth(base, "zones", "subscription"))
setattr(branch, "subscriptions",
self._add_with_auth(base, "zones", "subscriptions"))
branch = self.zones.dns_records
setattr(branch, "export",
self._add_with_auth(base, "zones", "dns_records/export"))
setattr(branch, "import",
self._add_with_auth(base, "zones", "dns_records/import"))
branch = self.zones
setattr(branch, "custom_hostnames",
self._add_with_auth(base, "zones", "custom_hostnames"))
def zones_settings(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.zones
setattr(branch, "settings",
self._add_with_auth(base, "zones", "settings"))
branch = self.zones.settings
setattr(branch, "advanced_ddos",
self._add_with_auth(base, "zones", "settings/advanced_ddos"))
setattr(branch, "always_online",
self._add_with_auth(base, "zones", "settings/always_online"))
setattr(branch, "always_use_https",
self._add_with_auth(base, "zones", "settings/always_use_https"))
setattr(branch, "browser_cache_ttl",
self._add_with_auth(base, "zones", "settings/browser_cache_ttl"))
setattr(branch, "browser_check",
self._add_with_auth(base, "zones", "settings/browser_check"))
setattr(branch, "cache_level",
self._add_with_auth(base, "zones", "settings/cache_level"))
setattr(branch, "challenge_ttl",
self._add_with_auth(base, "zones", "settings/challenge_ttl"))
setattr(branch, "development_mode",
self._add_with_auth(base, "zones", "settings/development_mode"))
setattr(branch, "email_obfuscation",
self._add_with_auth(base, "zones", "settings/email_obfuscation"))
setattr(branch, "hotlink_protection",
self._add_with_auth(base, "zones", "settings/hotlink_protection"))
setattr(branch, "ip_geolocation",
self._add_with_auth(base, "zones", "settings/ip_geolocation"))
setattr(branch, "ipv6",
self._add_with_auth(base, "zones", "settings/ipv6"))
setattr(branch, "minify",
self._add_with_auth(base, "zones", "settings/minify"))
setattr(branch, "mirage",
self._add_with_auth(base, "zones", "settings/mirage"))
setattr(branch, "mobile_redirect",
self._add_with_auth(base, "zones", "settings/mobile_redirect"))
setattr(branch, "origin_error_page_pass_thru",
self._add_with_auth(base, "zones", "settings/origin_error_page_pass_thru"))
setattr(branch, "polish",
self._add_with_auth(base, "zones", "settings/polish"))
setattr(branch, "prefetch_preload",
self._add_with_auth(base, "zones", "settings/prefetch_preload"))
setattr(branch, "response_buffering",
self._add_with_auth(base, "zones", "settings/response_buffering"))
setattr(branch, "rocket_loader",
self._add_with_auth(base, "zones", "settings/rocket_loader"))
setattr(branch, "security_header",
self._add_with_auth(base, "zones", "settings/security_header"))
setattr(branch, "security_level",
self._add_with_auth(base, "zones", "settings/security_level"))
setattr(branch, "server_side_exclude",
self._add_with_auth(base, "zones", "settings/server_side_exclude"))
setattr(branch, "sort_query_string_for_cache",
self._add_with_auth(base, "zones", "settings/sort_query_string_for_cache"))
setattr(branch, "ssl",
self._add_with_auth(base, "zones", "settings/ssl"))
setattr(branch, "tls_client_auth",
self._add_with_auth(base, "zones", "settings/tls_client_auth"))
setattr(branch, "true_client_ip_header",
self._add_with_auth(base, "zones", "settings/true_client_ip_header"))
setattr(branch, "tls_1_2_only",
self._add_with_auth(base, "zones", "settings/tls_1_2_only"))
setattr(branch, "tls_1_3",
self._add_with_auth(base, "zones", "settings/tls_1_3"))
# setattr(branch, "tlsadd_auth",
# self._add_with_auth(base, "zones", "settings/tlsadd_auth"))
# setattr(branch, "trueadd_ip_header",
# self._add_with_auth(base, "zones", "settings/trueadd_ip_header"))
setattr(branch, "websockets",
self._add_with_auth(base, "zones", "settings/websockets"))
setattr(branch, "waf",
self._add_with_auth(base, "zones", "settings/waf"))
setattr(branch, "webp",
self._add_with_auth(base, "zones", "settings/webp"))
setattr(branch, "http2",
self._add_with_auth(base, "zones", "settings/http2"))
setattr(branch, "pseudo_ipv4",
self._add_with_auth(base, "zones", "settings/pseudo_ipv4"))
setattr(branch, "opportunistic_encryption",
self._add_with_auth(base, "zones", "settings/opportunistic_encryption"))
setattr(branch, "automatic_https_rewrites",
self._add_with_auth(base, "zones", "settings/automatic_https_rewrites"))
def zones_analytics(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.zones
setattr(branch, "analytics",
self._add_unused(base, "zones", "analytics"))
branch = self.zones.analytics
setattr(branch, "colos",
self._add_with_auth(base, "zones", "analytics/colos"))
setattr(branch, "dashboard",
self._add_with_auth(base, "zones", "analytics/dashboard"))
def zones_firewall(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.zones
setattr(branch, "firewall",
self._add_unused(branch, "zones", "firewall"))
branch = self.zones.firewall
setattr(branch, "access_rules",
self._add_unused(base, "zones", "firewall/access_rules"))
setattr(branch, "waf",
self._add_unused(base, "zones", "firewall/waf"))
branch = self.zones.firewall.waf
setattr(branch, "packages",
self._add_with_auth(base, "zones", "firewall/waf/packages"))
branch = self.zones.firewall.waf.packages
setattr(branch, "groups",
self._add_with_auth(base, "zones", "firewall/waf/packages", "groups"))
setattr(branch, "rules",
self._add_with_auth(base, "zones", "firewall/waf/packages", "rules"))
branch = self.zones.firewall.access_rules
setattr(branch, "rules",
self._add_with_auth(base, "zones", "firewall/access_rules/rules"))
branch = self.zones.firewall
setattr(branch, "lockdowns",
self._add_with_auth(base, "zones", "firewall/lockdowns"))
setattr(branch, "ua_rules",
self._add_with_auth(base, "zones", "firewall/ua_rules"))
def zones_rate_limits(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.zones
setattr(branch, "rate_limits",
self._add_with_auth(base, "zones", "rate_limits"))
def zones_dns_analytics(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.zones
setattr(branch, "dns_analytics",
self._add_unused(base, "zones", "dns_analytics"))
branch = self.zones.dns_analytics
setattr(branch, "report",
self._add_with_auth(base, "zones", "dns_analytics/report"))
branch = self.zones.dns_analytics.report
setattr(branch, "bytime",
self._add_with_auth(base, "zones", "dns_analytics/report/bytime"))
def zones_amp(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.zones
setattr(branch, "amp",
self._add_unused(base, "zones", "amp"))
branch = self.zones.amp
setattr(branch, "viewer",
self._add_with_auth(base, "zones", "amp/viewer"))
def railguns(self):
""" API core commands for Cloudflare API"""
base = self._base
setattr(self, "railguns",
self._add_with_auth(base, "railguns"))
branch = self.railguns
setattr(branch, "zones",
self._add_with_auth(base, "railguns", "zones"))
def organizations(self):
""" API core commands for Cloudflare API"""
base = self._base
setattr(self, "organizations",
self._add_with_auth(base, "organizations"))
branch = self.organizations
setattr(branch, "members",
self._add_with_auth(base, "organizations", "members"))
setattr(branch, "invite",
self._add_with_auth(base, "organizations", "invite"))
setattr(branch, "invites",
self._add_with_auth(base, "organizations", "invites"))
setattr(branch, "railguns",
self._add_with_auth(base, "organizations", "railguns"))
branch = self.organizations.railguns
setattr(branch, "zones",
self._add_with_auth(base, "organizations", "railguns", "zones"))
branch = self.organizations
setattr(branch, "roles",
self._add_with_auth(base, "organizations", "roles"))
setattr(branch, "firewall",
self._add_unused(base, "organizations", "firewall"))
branch = self.organizations.firewall
setattr(branch, "access_rules",
self._add_unused(base, "organizations", "firewall/access_rules"))
branch = self.organizations.firewall.access_rules
setattr(branch, "rules",
self._add_with_auth(base, "organizations", "firewall/access_rules/rules"))
branch = self.organizations
setattr(branch, "load_balancers",
self._add_with_auth(base, "organizations", "load_balancers"))
branch = self.organizations.load_balancers
setattr(branch, "monitors",
self._add_with_auth(base, "organizations", "load_balancers/monitors"))
setattr(branch, "pools",
self._add_with_auth(base, "organizations", "load_balancers/pools"))
def certificates(self):
""" API core commands for Cloudflare API"""
base = self._base
setattr(self, "certificates",
self._add_with_cert_auth(base, "certificates"))
def ips(self):
""" API core commands for Cloudflare API"""
base = self._base
setattr(self, "ips",
self._add_noauth(base, "ips"))
def zones_argo(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.zones
setattr(branch, "argo",
self._add_unused(base, "zones", "argo"))
branch = self.zones.argo
setattr(branch, "tiered_caching",
self._add_with_auth(base, "zones", "argo/tiered_caching"))
setattr(branch, "smart_routing",
self._add_with_auth(base, "zones", "argo/smart_routing"))
def zones_dnssec(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.zones
setattr(branch, "dnssec",
self._add_with_auth(base, "zones", "dnssec"))
def zones_ssl(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.zones
setattr(branch, "ssl",
self._add_unused(base, "zones", "ssl"))
branch = self.zones.ssl
setattr(branch, "analyze",
self._add_with_auth(base, "zones", "ssl/analyze"))
setattr(branch, "certificate_packs",
self._add_with_auth(base, "zones", "ssl/certificate_packs"))
setattr(branch, "verification",
self._add_with_auth(base, "zones", "ssl/verification"))
def zones_load_balancers(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.zones
setattr(branch, "load_balancers",
self._add_with_auth(base, "zones", "load_balancers"))
def user_load_balancers(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.user
setattr(branch, "load_balancers",
self._add_unused(base, "user/load_balancers"))
branch = self.user.load_balancers
setattr(branch, "monitors",
self._add_with_auth(base, "user/load_balancers/monitors"))
setattr(branch, "pools",
self._add_with_auth(base, "user/load_balancers/pools"))
def user_virtual_dns(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.user
setattr(branch, "virtual_dns",
self._add_with_auth(base, "user/virtual_dns"))
branch = self.user.virtual_dns
setattr(branch, "dns_analytics",
self._add_unused(base, "user/virtual_dns", "dns_analytics"))
branch = self.user.virtual_dns.dns_analytics
setattr(branch, "report",
self._add_with_auth(base, "user/virtual_dns", "dns_analytics/report"))
branch = self.user.virtual_dns.dns_analytics.report
setattr(branch, "bytime",
self._add_with_auth(base, "user/virtual_dns", "dns_analytics/report/bytime"))
def organizations_virtual_dns(self):
""" API core commands for Cloudflare API"""
base = self._base
branch = self.organizations
setattr(branch, "virtual_dns",
self._add_with_auth(base, "organizations", "virtual_dns"))
branch = self.organizations.virtual_dns
setattr(branch, "dns_analytics",
self._add_unused(base, "organizations", "virtual_dns", "dns_analytics"))
branch = self.organizations.virtual_dns.dns_analytics
setattr(branch, "report",
self._add_with_auth(base, "organizations", "virtual_dns", "dns_analytics/report"))
branch = self.organizations.virtual_dns.dns_analytics.report
setattr(branch, "bytime",
self._add_with_auth(base, "organizations", "virtual_dns", "dns_analytics/report/bytime")) | 0.53607 | 0.101589 |
import shutil
from PIL import Image
from PIL.ExifTags import TAGS
def square(old_path, new_path, side):
"""
剪切图片为正方形
side: 边长
"""
try:
img = Image.open(old_path).convert('RGB')
except IOError:
raise IOError(u'图片格式异常,无法处理。')
w, h = img.size
if w != h or w > side:
r = w if w < h else h
offset = int(abs(w-h)/2)
area = (offset, 0, offset+r, r) if w > h else (0, offset, r, offset+r)
img = img.transform((r, r), Image.EXTENT, area)
img.thumbnail((side, side), Image.ANTIALIAS)
img.save(new_path, "JPEG", quality=85)
elif old_path != new_path:
img.save(new_path, "JPEG", quality=85)
def zoom(old_path, new_path, long=0, short=0, percent=0, size=()):
"""
按原比例缩放图片
指定缩放长边或短边,或通过比例缩放,或直接指定宽高
"""
if percent:
zoom_percent(old_path, new_path, percent)
return
img = Image.open(old_path)
w, h = img.size
if size:
img.resize(size, Image.ANTIALIAS).save(new_path, "JPEG", quality=85)
return
percent = 1
if long and not short:
r = w if w > h else h
percent = long/float(r)
if short and not long:
r = w if w < h else h
percent = short/float(r)
#如图片缩放和原图差距很小则不处理
if 1 > (1-percent) > 0.1:
zoom_percent(old_path, new_path, percent)
elif new_path != old_path:
shutil.copy2(old_path, new_path)
def zoom_percent(old_path, new_path, percent):
"""
按比例缩放图片 - 指定百分比
"""
img = Image.open(old_path)
w, h = img.size
img.resize(
(int(w*percent), int(h*percent)),
Image.ANTIALIAS
).save(new_path, "JPEG", quality=85)
def get_exif(img_path):
ret = {}
i = Image.open(img_path)
info = i._getexif()
if info:
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
ret[decoded] = value
return ret
def get_genTime(img_path):
info = get_exif(img_path)
if info:
return info.get('DateTimeOriginal') or info.get('DateTime')
else:
return None
def img_area_select(path, box, percent=1, to_path=None):
img = Image.open(path)
if percent != 1:
w, h = img.size
img = img.resize(
(int(w*percent), int(h*percent)),
Image.ANTIALIAS
)
img.crop(box).save(to_path or path)
def img_check_origin_size(img_path, size):
img = Image.open(img_path)
w, h = img.size
r = w if w > h else h
if r > size*1.2:
percent = size/float(r)
img.resize((int(w*percent), int(h*percent)), Image.ANTIALIAS).save(img_path, "JPEG", quality=80)
def img_resize(img_path, size, to_path):
img = Image.open(img_path)
w, h = img.size
r = w if w > h else h
if r < size:
img.save(to_path, "JPEG", quality=80)
else:
percent = size/float(r)
img.resize(
(int(w*percent), int(h*percent)),
Image.ANTIALIAS
).save(
to_path, "JPEG", quality=80
) | sharper/util/imgtool.py | import shutil
from PIL import Image
from PIL.ExifTags import TAGS
def square(old_path, new_path, side):
"""
剪切图片为正方形
side: 边长
"""
try:
img = Image.open(old_path).convert('RGB')
except IOError:
raise IOError(u'图片格式异常,无法处理。')
w, h = img.size
if w != h or w > side:
r = w if w < h else h
offset = int(abs(w-h)/2)
area = (offset, 0, offset+r, r) if w > h else (0, offset, r, offset+r)
img = img.transform((r, r), Image.EXTENT, area)
img.thumbnail((side, side), Image.ANTIALIAS)
img.save(new_path, "JPEG", quality=85)
elif old_path != new_path:
img.save(new_path, "JPEG", quality=85)
def zoom(old_path, new_path, long=0, short=0, percent=0, size=()):
"""
按原比例缩放图片
指定缩放长边或短边,或通过比例缩放,或直接指定宽高
"""
if percent:
zoom_percent(old_path, new_path, percent)
return
img = Image.open(old_path)
w, h = img.size
if size:
img.resize(size, Image.ANTIALIAS).save(new_path, "JPEG", quality=85)
return
percent = 1
if long and not short:
r = w if w > h else h
percent = long/float(r)
if short and not long:
r = w if w < h else h
percent = short/float(r)
#如图片缩放和原图差距很小则不处理
if 1 > (1-percent) > 0.1:
zoom_percent(old_path, new_path, percent)
elif new_path != old_path:
shutil.copy2(old_path, new_path)
def zoom_percent(old_path, new_path, percent):
"""
按比例缩放图片 - 指定百分比
"""
img = Image.open(old_path)
w, h = img.size
img.resize(
(int(w*percent), int(h*percent)),
Image.ANTIALIAS
).save(new_path, "JPEG", quality=85)
def get_exif(img_path):
ret = {}
i = Image.open(img_path)
info = i._getexif()
if info:
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
ret[decoded] = value
return ret
def get_genTime(img_path):
info = get_exif(img_path)
if info:
return info.get('DateTimeOriginal') or info.get('DateTime')
else:
return None
def img_area_select(path, box, percent=1, to_path=None):
img = Image.open(path)
if percent != 1:
w, h = img.size
img = img.resize(
(int(w*percent), int(h*percent)),
Image.ANTIALIAS
)
img.crop(box).save(to_path or path)
def img_check_origin_size(img_path, size):
img = Image.open(img_path)
w, h = img.size
r = w if w > h else h
if r > size*1.2:
percent = size/float(r)
img.resize((int(w*percent), int(h*percent)), Image.ANTIALIAS).save(img_path, "JPEG", quality=80)
def img_resize(img_path, size, to_path):
img = Image.open(img_path)
w, h = img.size
r = w if w > h else h
if r < size:
img.save(to_path, "JPEG", quality=80)
else:
percent = size/float(r)
img.resize(
(int(w*percent), int(h*percent)),
Image.ANTIALIAS
).save(
to_path, "JPEG", quality=80
) | 0.275617 | 0.323313 |
from rest_framework import serializers
from django.contrib.sites.shortcuts import get_current_site
from urllib.parse import urlsplit
from posts.models import Post
from comments.api.serializers import CommentListSerializers
from comments.models import Comments
class PostListSerializer(serializers.ModelSerializer):
author = serializers.SerializerMethodField()
avatar = serializers.SerializerMethodField()
detail = serializers.HyperlinkedIdentityField(
view_name='posts-api:post-detail-api'
)
delete = serializers.HyperlinkedIdentityField(
view_name='posts-api:post-delete-api'
)
class Meta:
model = Post
fields = ('id', 'avatar', 'delete', 'detail', 'title',
'author', 'image', 'content',)
def get_author(self, obj):
return str(obj.author.username)
def get_avatar(self, obj):
request = self.context['request']
current_site = get_current_site(request)
protocol = urlsplit(request.build_absolute_uri(None)).scheme
a = (protocol + "://", current_site.domain, obj.author.profile.image.url)
return ''.join(a)
class PostDetailSerializer(serializers.ModelSerializer):
avatar = serializers.SerializerMethodField()
comments = serializers.SerializerMethodField()
author = serializers.SerializerMethodField()
delete = serializers.HyperlinkedIdentityField(
view_name='posts-api:post-delete-api'
)
class Meta:
model = Post
fields = '__all__'
# exclude = ('id', )
def get_author(self, obj):
return obj.author.username
def get_avatar(self, obj):
request = self.context['request']
current_site = get_current_site(request)
protocol = urlsplit(request.build_absolute_uri(None)).scheme
a = (protocol + "://", current_site.domain, obj.author.profile.image.url)
return ''.join(a)
def get_comments(self, obj):
post_comments = obj.comments
comments = CommentListSerializers(
post_comments, many=True, context=self.context).data
return comments
class PostCreateSerializer(serializers.ModelSerializer):
class Meta:
model = Post
exclude = ('id', 'read_time', 'slug', 'author') | src/posts/api/serializers.py | from rest_framework import serializers
from django.contrib.sites.shortcuts import get_current_site
from urllib.parse import urlsplit
from posts.models import Post
from comments.api.serializers import CommentListSerializers
from comments.models import Comments
class PostListSerializer(serializers.ModelSerializer):
author = serializers.SerializerMethodField()
avatar = serializers.SerializerMethodField()
detail = serializers.HyperlinkedIdentityField(
view_name='posts-api:post-detail-api'
)
delete = serializers.HyperlinkedIdentityField(
view_name='posts-api:post-delete-api'
)
class Meta:
model = Post
fields = ('id', 'avatar', 'delete', 'detail', 'title',
'author', 'image', 'content',)
def get_author(self, obj):
return str(obj.author.username)
def get_avatar(self, obj):
request = self.context['request']
current_site = get_current_site(request)
protocol = urlsplit(request.build_absolute_uri(None)).scheme
a = (protocol + "://", current_site.domain, obj.author.profile.image.url)
return ''.join(a)
class PostDetailSerializer(serializers.ModelSerializer):
avatar = serializers.SerializerMethodField()
comments = serializers.SerializerMethodField()
author = serializers.SerializerMethodField()
delete = serializers.HyperlinkedIdentityField(
view_name='posts-api:post-delete-api'
)
class Meta:
model = Post
fields = '__all__'
# exclude = ('id', )
def get_author(self, obj):
return obj.author.username
def get_avatar(self, obj):
request = self.context['request']
current_site = get_current_site(request)
protocol = urlsplit(request.build_absolute_uri(None)).scheme
a = (protocol + "://", current_site.domain, obj.author.profile.image.url)
return ''.join(a)
def get_comments(self, obj):
post_comments = obj.comments
comments = CommentListSerializers(
post_comments, many=True, context=self.context).data
return comments
class PostCreateSerializer(serializers.ModelSerializer):
class Meta:
model = Post
exclude = ('id', 'read_time', 'slug', 'author') | 0.595375 | 0.075312 |
import bpy
import os
import sys
import math
from mathutils import Vector, Quaternion, Matrix
from bpy.props import *
from bpy_extras.io_utils import ExportHelper, ImportHelper
from . import mh
from .error import MHError, handleMHError
from . import utils
from .utils import round, setObjectMode
#----------------------------------------------------------
# saveMhpFile(context, filepath):
# loadMhpFile(context, filepath):
#----------------------------------------------------------
def saveMhpFile(context, filepath):
ob = context.object
if ob.type == 'ARMATURE':
rig = ob
else:
rig = ob.parent
scn = context.scene
if rig and rig.type == 'ARMATURE':
roots = rigRoots(rig)
if len(roots) > 1:
raise MHError("Armature %s has multiple roots: %s" % (rig.name, roots))
(pname, ext) = os.path.splitext(filepath)
mhppath = pname + ".mhp"
fp = open(mhppath, "w", encoding="utf-8", newline="\n")
root = rig.pose.bones[roots[0]]
writeMhpBones(fp, root, None)
fp.close()
print(("Mhp file %s saved" % mhppath))
def writeMhpBones(fp, pb, log):
b = pb.bone
if pb.parent:
mat = b.matrix_local.inverted() * b.parent.matrix_local * pb.parent.matrix.inverted() * pb.matrix
else:
mat = b.matrix_local.inverted() * pb.matrix
#maty = mat[1].copy()
#matz = mat[2].copy()
#mat[1] = matz
#mat[2] = -maty
diff = mat - Matrix()
nonzero = False
for i in range(4):
if abs(diff[i].length) > 5e-3:
nonzero = True
break
if nonzero:
fp.write("%s\tmatrix" % pb.name)
for i in range(4):
row = mat[i]
fp.write("\t%s\t%s\t%s\t%s" % (round(row[0]), round(row[1]), round(row[2]), round(row[3])))
fp.write("\n")
"""
t,q,s = mat.decompose()
magn = math.sqrt(q.x*q.x + q.y*q.y + q.z*q.z)
if magn > 1e-5:
fp.write("%s\t%s\t%s\t%s\t%s\t%s\n" % (pb.name, string, round(q.w), round(q.x), round(q.y), round(q.z)))
s -= Vector((1,1,1))
if s.length > 1e-3 and isMuscleBone(pb):
fp.write("%s\t%s\t%s\t%s\t%s\n" % (pb.name, "scale", round(s[0]), round(s[1]), round(s[2])))
#log.write("%s %s\n%s\n" % (pb.name, s, m))
"""
for child in pb.children:
writeMhpBones(fp, child, log)
def isMuscleBone(pb):
for cns in pb.constraints:
if (cns.type == 'STRETCH_TO' or
cns.type == 'TRANSFORM' or
cns.type == 'TRACK_TO' or
cns.type == 'COPY_ROTATION'):
return True
return False
def loadMhpFile(context, filepath):
ob = context.object
if ob.type == 'ARMATURE':
rig = ob
else:
rig = ob.parent
unit = Matrix()
for pb in rig.pose.bones:
pb.matrix_basis = unit
scn = context.scene
if rig and rig.type == 'ARMATURE':
(pname, ext) = os.path.splitext(filepath)
mhppath = pname + ".mhp"
fp = open(mhppath, "rU")
for line in fp:
words = line.split()
if len(words) < 4:
continue
try:
pb = rig.pose.bones[words[0]]
except KeyError:
continue
if isMuscleBone(pb):
pass
elif words[1] == "quat":
q = Quaternion((float(words[2]), float(words[3]), float(words[4]), float(words[5])))
mat = q.to_matrix().to_4x4()
pb.matrix_basis = mat
elif words[1] == "gquat":
q = Quaternion((float(words[2]), float(words[3]), float(words[4]), float(words[5])))
mat = q.to_matrix().to_4x4()
maty = mat[1].copy()
matz = mat[2].copy()
mat[1] = -matz
mat[2] = maty
pb.matrix_basis = pb.bone.matrix_local.inverted() * mat
elif words[1] == "matrix":
rows = []
n = 2
for i in range(4):
rows.append((float(words[n]), float(words[n+1]), float(words[n+2]), float(words[n+3])))
n += 4
mat = Matrix(rows)
if pb.parent:
pb.matrix_basis = mat
else:
maty = mat[1].copy()
matz = mat[2].copy()
mat[1] = -matz
mat[2] = maty
pb.matrix_basis = pb.bone.matrix_local.inverted() * mat
elif words[1] == "scale":
pass
else:
raise MHError("Unknown line in mcp file:\n%s" % line)
fp.close()
print(("Mhp file %s loaded" % mhppath))
class VIEW3D_OT_LoadMhpButton(bpy.types.Operator):
bl_idname = "mh.load_mhp"
bl_label = "Load MHP File"
bl_description = "Load a pose in MHP format"
bl_options = {'UNDO'}
filename_ext = ".mhp"
filter_glob = StringProperty(default="*.mhp", options={'HIDDEN'})
filepath = bpy.props.StringProperty(
name="File Path",
description="File path used for mhp file",
maxlen= 1024, default= "")
@classmethod
def poll(self, context):
return context.object
def execute(self, context):
from .maketarget import makeBaseObj
setObjectMode(context)
try:
loadMhpFile(context, self.properties.filepath)
makeBaseObj(context)
except MHError:
handleMHError(context)
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
class VIEW3D_OT_SaveasMhpFileButton(bpy.types.Operator, ExportHelper):
bl_idname = "mh.saveas_mhp"
bl_label = "Save MHP File"
bl_description = "Save current pose in MHP format"
bl_options = {'UNDO'}
filename_ext = ".mhp"
filter_glob = StringProperty(default="*.mhp", options={'HIDDEN'})
filepath = bpy.props.StringProperty(
name="File Path",
description="File path used for mhp file",
maxlen= 1024, default= "")
@classmethod
def poll(self, context):
return context.object
def execute(self, context):
setObjectMode(context)
try:
saveMhpFile(context, self.properties.filepath)
except MHError:
handleMHError(context)
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
#----------------------------------------------------------
# saveBvhFile(context, filepath):
# loadBvhFile(context, filepath):
#----------------------------------------------------------
import io_anim_bvh
from io_anim_bvh import export_bvh, import_bvh
def saveBvhFile(context, filepath):
ob = context.object
rig = ob.parent
scn = context.scene
if rig and rig.type == 'ARMATURE':
roots = rigRoots(rig)
if len(roots) > 1:
raise MHError("Armature %s has multiple roots: %s" % (rig.name, roots))
scn.objects.active = rig
(pname, ext) = os.path.splitext(filepath)
bvhpath = pname + ".bvh"
export_bvh.write_armature(context, bvhpath,
frame_start = scn.frame_current,
frame_end = scn.frame_current,
global_scale = 1.0,
rotate_mode = scn.MhExportRotateMode,
root_transform_only = True
)
scn.objects.active = ob
print(("Saved %s" % bvhpath))
return True
else:
return False
def rigRoots(rig):
roots = []
for bone in rig.data.bones:
if not bone.parent:
roots.append(bone.name)
return roots
def loadBvhFile(context, filepath):
ob = context.object
rig = ob.parent
scn = context.scene
if rig and rig.type == 'ARMATURE':
(pname, ext) = os.path.splitext(filepath)
bvhpath = pname + ".bvh"
bvh_nodes = import_bvh.read_bvh(context, bvhpath,
rotate_mode=scn.MhImportRotateMode,
global_scale=1.0)
frame_orig = context.scene.frame_current
bvh_name = bpy.path.display_name_from_filepath(bvhpath)
import_bvh.bvh_node_dict2armature(context, bvh_name, bvh_nodes,
rotate_mode = scn.MhImportRotateMode,
frame_start = scn.frame_current,
IMPORT_LOOP = False,
global_matrix = rig.matrix_world,
)
context.scene.frame_set(frame_orig)
tmp = context.object
bpy.ops.object.mode_set(mode='POSE')
scn.objects.active = rig
bpy.ops.object.mode_set(mode='POSE')
copyPose(tmp, rig)
scn.objects.active = ob
scn.objects.unlink(tmp)
del tmp
print(("Loaded %s" % bvhpath))
return True
else:
return False
def copyPose(src, trg):
for name,srcBone in list(src.pose.bones.items()):
trgBone = trg.pose.bones[srcBone.name]
s = srcBone.matrix_basis
t = trgBone.matrix_basis.copy()
for i in range(3):
for j in range(3):
t[i][j] = s[i][j]
trgBone.matrix_basis = t
class VIEW3D_OT_LoadBvhButton(bpy.types.Operator):
bl_idname = "mh.load_bvh"
bl_label = "Load BVH File"
bl_description = "Load a pose in BVH format"
bl_options = {'UNDO'}
filename_ext = ".bvh"
filter_glob = StringProperty(default="*.bvh", options={'HIDDEN'})
filepath = bpy.props.StringProperty(
name="File Path",
description="File path used for bvh file",
maxlen= 1024, default= "")
@classmethod
def poll(self, context):
return context.object
def execute(self, context):
try:
setObjectMode(context)
loadBvhFile(context, self.properties.filepath)
except MHError:
handleMHError(context)
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
class VIEW3D_OT_SaveasBvhFileButton(bpy.types.Operator, ExportHelper):
bl_idname = "mh.saveas_bvh"
bl_label = "Save BVH File"
bl_description = "Save current pose in BVH format"
bl_options = {'UNDO'}
filename_ext = ".bvh"
filter_glob = StringProperty(default="*.bvh", options={'HIDDEN'})
filepath = bpy.props.StringProperty(
name="File Path",
description="File path used for bvh file",
maxlen= 1024, default= "")
@classmethod
def poll(self, context):
return context.object
def execute(self, context):
try:
setObjectMode(context)
saveBvhFile(context, self.properties.filepath)
except MHError:
handleMHError(context)
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
#----------------------------------------------------------
# Convert weights
#----------------------------------------------------------
def readWeights(filepath, nVerts):
weights = {}
for n in range(nVerts):
weights[n] = []
bone = None
fp = open(filepath, "rU")
for line in fp:
words = line.split()
if len(words) < 2:
pass
elif words[0] == "#":
if words[1] == "weights":
bone = words[2]
else:
bone = None
elif bone:
vn = int(words[0])
if vn < mh.NBodyVerts:
weights[vn].append( (bone, float(words[1])) )
fp.close()
normedWeights = {}
for vn,data in list(weights.items()):
wsum = 0.0
for bone,w in data:
wsum += w
ndata = []
for bone,w in data:
ndata.append((bone,w/wsum))
normedWeights[vn] = ndata
return normedWeights
def defineMatrices(rig):
mats = {}
for pb in rig.pose.bones:
mats[pb.name] = pb.matrix * pb.bone.matrix_local.inverted()
return mats
def getPoseLocs(mats, restLocs, weights, nVerts):
locs = {}
for n in range(nVerts):
if weights[n]:
mat = getMatrix(mats, weights[n])
locs[n] = mat * restLocs[n]
else:
locs[n] = restLocs[n]
return locs
def getRestLocs(mats, poseLocs, weights, nVerts):
locs = {}
for n in range(nVerts):
if weights[n]:
mat = getMatrix(mats, weights[n])
locs[n] = mat.inverted() * poseLocs[n]
else:
locs[n] = poseLocs[n]
return locs
def getMatrix(mats, weight):
mat = Matrix()
mat.zero()
for bname,w in weight:
mat += w * mats[bname]
return mat
def getShapeLocs(ob, nVerts):
locs = {}
filename = "test"
for n in range(nVerts):
locs[n] = Vector((0,0,0))
for skey in ob.data.shape_keys.key_blocks:
if skey.name == "Basis":
continue
filename = skey.name
for n,v in enumerate(skey.data):
bv = ob.data.vertices[n]
vec = v.co - bv.co
locs[n] += skey.value*vec
return locs, filename
def addLocs(locs1, locs2, nVerts):
locs = {}
for n in range(nVerts):
locs[n] = locs1[n] + locs2[n]
return locs
def subLocs(locs1, locs2, nVerts):
locs = {}
for n in range(nVerts):
locs[n] = locs1[n] - locs2[n]
return locs
def saveNewTarget(filepath, locs, nVerts):
fp = open(filepath, "w", encoding="utf-8", newline="\n")
locList = list(locs.items())
locList.sort()
for (n, dr) in locList:
if dr.length > Epsilon:
fp.write("%d %s %s %s\n" % (n, round(dr[0]), round(dr[2]), round(-dr[1])))
fp.close()
return
class VIEW3D_OT_ConvertRigButton(bpy.types.Operator):
bl_idname = "mh.convert_rig"
bl_label = "Convert to rig"
bl_description = ""
bl_options = {'UNDO'}
@classmethod
def poll(self, context):
return context.object
def execute(self, context):
setObjectMode(context)
scn = context.scene
ob = context.object
rig = ob.parent
nVerts = len(ob.data.vertices)
oldWeights = readWeights(os.path.join(scn.MhProgramPath, "data/rigs", scn.MhSourceRig+".rig"), nVerts)
newWeights = readWeights(os.path.join(scn.MhProgramPath, "data/rigs",scn.MhTargetRig+".rig"), nVerts)
mats = defineMatrices(rig)
restLocs = {}
for n in range(nVerts):
restLocs[n] = ob.data.vertices[n].co
oldShapeDiffs, filename = getShapeLocs(ob, nVerts)
oldRestLocs = addLocs(restLocs, oldShapeDiffs, nVerts)
globalLocs = getPoseLocs(mats, oldRestLocs, oldWeights, nVerts)
newRestLocs = getRestLocs(mats, globalLocs, newWeights, nVerts)
newShapeDiffs = subLocs(newRestLocs, restLocs, nVerts)
saveNewTarget(os.path.join(scn.MhProgramPath, "data/poses", scn.MhPoseTargetDir, filename + ".target"), newShapeDiffs, nVerts)
return{'FINISHED'}
#----------------------------------------------------------
# Write matrices (for debug)
#----------------------------------------------------------
def writeMatrices(context, filepath):
rig = context.object
fp = open(filepath, "w", encoding="utf-8", newline="\n")
for pb in rig.pose.bones:
fp.write(
"\n%s\n" % pb.name +
"%s\n" % pb.matrix_basis +
"%s\n" % pb.matrix)
fp.close()
class VIEW3D_OT_WriteMatricesButton(bpy.types.Operator, ExportHelper):
bl_idname = "mh.write_matrices"
bl_label = "Write Matrices"
bl_description = "Write Matrices"
bl_options = {'UNDO'}
filename_ext = ".txt"
filter_glob = StringProperty(default="*.txt", options={'HIDDEN'})
filepath = bpy.props.StringProperty(
name="File Path",
description="File path used for txt file",
maxlen= 1024, default= "")
@classmethod
def poll(self, context):
return context.object
def execute(self, context):
try:
setObjectMode(context)
writeMatrices(context, self.properties.filepath)
except MHError:
handleMHError(context)
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
#----------------------------------------------------------
# Init
#----------------------------------------------------------
def init():
bpy.types.Scene.MhSourceRig = StringProperty(default = "rigid")
bpy.types.Scene.MhTargetRig = StringProperty(default = "soft1")
bpy.types.Scene.MhPoseTargetDir = StringProperty(default = "dance1-soft1")
bpy.types.Scene.MhImportRotateMode = EnumProperty(
name="Rotation",
description="Rotation conversion",
items=(('QUATERNION', "Quaternion",
"Convert rotations to quaternions"),
('NATIVE', "Euler (Native)", ("Use the rotation order "
"defined in the BVH file")),
('XYZ', "Euler (XYZ)", "Convert rotations to euler XYZ"),
('XZY', "Euler (XZY)", "Convert rotations to euler XZY"),
('YXZ', "Euler (YXZ)", "Convert rotations to euler YXZ"),
('YZX', "Euler (YZX)", "Convert rotations to euler YZX"),
('ZXY', "Euler (ZXY)", "Convert rotations to euler ZXY"),
('ZYX', "Euler (ZYX)", "Convert rotations to euler ZYX"),
),
default='NATIVE',
)
bpy.types.Scene.MhExportRotateMode = EnumProperty(
name="Rotation",
description="Rotation conversion",
items=(('NATIVE', "Euler (Native)",
"Use the rotation order defined in the BVH file"),
('XYZ', "Euler (XYZ)", "Convert rotations to euler XYZ"),
('XZY', "Euler (XZY)", "Convert rotations to euler XZY"),
('YXZ', "Euler (YXZ)", "Convert rotations to euler YXZ"),
('YZX', "Euler (YZX)", "Convert rotations to euler YZX"),
('ZXY', "Euler (ZXY)", "Convert rotations to euler ZXY"),
('ZYX', "Euler (ZYX)", "Convert rotations to euler ZYX"),
),
default='ZYX',
)
print("pose.py reloaded") | makehuman-master/blendertools/maketarget/pose.py | import bpy
import os
import sys
import math
from mathutils import Vector, Quaternion, Matrix
from bpy.props import *
from bpy_extras.io_utils import ExportHelper, ImportHelper
from . import mh
from .error import MHError, handleMHError
from . import utils
from .utils import round, setObjectMode
#----------------------------------------------------------
# saveMhpFile(context, filepath):
# loadMhpFile(context, filepath):
#----------------------------------------------------------
def saveMhpFile(context, filepath):
ob = context.object
if ob.type == 'ARMATURE':
rig = ob
else:
rig = ob.parent
scn = context.scene
if rig and rig.type == 'ARMATURE':
roots = rigRoots(rig)
if len(roots) > 1:
raise MHError("Armature %s has multiple roots: %s" % (rig.name, roots))
(pname, ext) = os.path.splitext(filepath)
mhppath = pname + ".mhp"
fp = open(mhppath, "w", encoding="utf-8", newline="\n")
root = rig.pose.bones[roots[0]]
writeMhpBones(fp, root, None)
fp.close()
print(("Mhp file %s saved" % mhppath))
def writeMhpBones(fp, pb, log):
b = pb.bone
if pb.parent:
mat = b.matrix_local.inverted() * b.parent.matrix_local * pb.parent.matrix.inverted() * pb.matrix
else:
mat = b.matrix_local.inverted() * pb.matrix
#maty = mat[1].copy()
#matz = mat[2].copy()
#mat[1] = matz
#mat[2] = -maty
diff = mat - Matrix()
nonzero = False
for i in range(4):
if abs(diff[i].length) > 5e-3:
nonzero = True
break
if nonzero:
fp.write("%s\tmatrix" % pb.name)
for i in range(4):
row = mat[i]
fp.write("\t%s\t%s\t%s\t%s" % (round(row[0]), round(row[1]), round(row[2]), round(row[3])))
fp.write("\n")
"""
t,q,s = mat.decompose()
magn = math.sqrt(q.x*q.x + q.y*q.y + q.z*q.z)
if magn > 1e-5:
fp.write("%s\t%s\t%s\t%s\t%s\t%s\n" % (pb.name, string, round(q.w), round(q.x), round(q.y), round(q.z)))
s -= Vector((1,1,1))
if s.length > 1e-3 and isMuscleBone(pb):
fp.write("%s\t%s\t%s\t%s\t%s\n" % (pb.name, "scale", round(s[0]), round(s[1]), round(s[2])))
#log.write("%s %s\n%s\n" % (pb.name, s, m))
"""
for child in pb.children:
writeMhpBones(fp, child, log)
def isMuscleBone(pb):
for cns in pb.constraints:
if (cns.type == 'STRETCH_TO' or
cns.type == 'TRANSFORM' or
cns.type == 'TRACK_TO' or
cns.type == 'COPY_ROTATION'):
return True
return False
def loadMhpFile(context, filepath):
ob = context.object
if ob.type == 'ARMATURE':
rig = ob
else:
rig = ob.parent
unit = Matrix()
for pb in rig.pose.bones:
pb.matrix_basis = unit
scn = context.scene
if rig and rig.type == 'ARMATURE':
(pname, ext) = os.path.splitext(filepath)
mhppath = pname + ".mhp"
fp = open(mhppath, "rU")
for line in fp:
words = line.split()
if len(words) < 4:
continue
try:
pb = rig.pose.bones[words[0]]
except KeyError:
continue
if isMuscleBone(pb):
pass
elif words[1] == "quat":
q = Quaternion((float(words[2]), float(words[3]), float(words[4]), float(words[5])))
mat = q.to_matrix().to_4x4()
pb.matrix_basis = mat
elif words[1] == "gquat":
q = Quaternion((float(words[2]), float(words[3]), float(words[4]), float(words[5])))
mat = q.to_matrix().to_4x4()
maty = mat[1].copy()
matz = mat[2].copy()
mat[1] = -matz
mat[2] = maty
pb.matrix_basis = pb.bone.matrix_local.inverted() * mat
elif words[1] == "matrix":
rows = []
n = 2
for i in range(4):
rows.append((float(words[n]), float(words[n+1]), float(words[n+2]), float(words[n+3])))
n += 4
mat = Matrix(rows)
if pb.parent:
pb.matrix_basis = mat
else:
maty = mat[1].copy()
matz = mat[2].copy()
mat[1] = -matz
mat[2] = maty
pb.matrix_basis = pb.bone.matrix_local.inverted() * mat
elif words[1] == "scale":
pass
else:
raise MHError("Unknown line in mcp file:\n%s" % line)
fp.close()
print(("Mhp file %s loaded" % mhppath))
class VIEW3D_OT_LoadMhpButton(bpy.types.Operator):
bl_idname = "mh.load_mhp"
bl_label = "Load MHP File"
bl_description = "Load a pose in MHP format"
bl_options = {'UNDO'}
filename_ext = ".mhp"
filter_glob = StringProperty(default="*.mhp", options={'HIDDEN'})
filepath = bpy.props.StringProperty(
name="File Path",
description="File path used for mhp file",
maxlen= 1024, default= "")
@classmethod
def poll(self, context):
return context.object
def execute(self, context):
from .maketarget import makeBaseObj
setObjectMode(context)
try:
loadMhpFile(context, self.properties.filepath)
makeBaseObj(context)
except MHError:
handleMHError(context)
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
class VIEW3D_OT_SaveasMhpFileButton(bpy.types.Operator, ExportHelper):
bl_idname = "mh.saveas_mhp"
bl_label = "Save MHP File"
bl_description = "Save current pose in MHP format"
bl_options = {'UNDO'}
filename_ext = ".mhp"
filter_glob = StringProperty(default="*.mhp", options={'HIDDEN'})
filepath = bpy.props.StringProperty(
name="File Path",
description="File path used for mhp file",
maxlen= 1024, default= "")
@classmethod
def poll(self, context):
return context.object
def execute(self, context):
setObjectMode(context)
try:
saveMhpFile(context, self.properties.filepath)
except MHError:
handleMHError(context)
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
#----------------------------------------------------------
# saveBvhFile(context, filepath):
# loadBvhFile(context, filepath):
#----------------------------------------------------------
import io_anim_bvh
from io_anim_bvh import export_bvh, import_bvh
def saveBvhFile(context, filepath):
ob = context.object
rig = ob.parent
scn = context.scene
if rig and rig.type == 'ARMATURE':
roots = rigRoots(rig)
if len(roots) > 1:
raise MHError("Armature %s has multiple roots: %s" % (rig.name, roots))
scn.objects.active = rig
(pname, ext) = os.path.splitext(filepath)
bvhpath = pname + ".bvh"
export_bvh.write_armature(context, bvhpath,
frame_start = scn.frame_current,
frame_end = scn.frame_current,
global_scale = 1.0,
rotate_mode = scn.MhExportRotateMode,
root_transform_only = True
)
scn.objects.active = ob
print(("Saved %s" % bvhpath))
return True
else:
return False
def rigRoots(rig):
roots = []
for bone in rig.data.bones:
if not bone.parent:
roots.append(bone.name)
return roots
def loadBvhFile(context, filepath):
ob = context.object
rig = ob.parent
scn = context.scene
if rig and rig.type == 'ARMATURE':
(pname, ext) = os.path.splitext(filepath)
bvhpath = pname + ".bvh"
bvh_nodes = import_bvh.read_bvh(context, bvhpath,
rotate_mode=scn.MhImportRotateMode,
global_scale=1.0)
frame_orig = context.scene.frame_current
bvh_name = bpy.path.display_name_from_filepath(bvhpath)
import_bvh.bvh_node_dict2armature(context, bvh_name, bvh_nodes,
rotate_mode = scn.MhImportRotateMode,
frame_start = scn.frame_current,
IMPORT_LOOP = False,
global_matrix = rig.matrix_world,
)
context.scene.frame_set(frame_orig)
tmp = context.object
bpy.ops.object.mode_set(mode='POSE')
scn.objects.active = rig
bpy.ops.object.mode_set(mode='POSE')
copyPose(tmp, rig)
scn.objects.active = ob
scn.objects.unlink(tmp)
del tmp
print(("Loaded %s" % bvhpath))
return True
else:
return False
def copyPose(src, trg):
for name,srcBone in list(src.pose.bones.items()):
trgBone = trg.pose.bones[srcBone.name]
s = srcBone.matrix_basis
t = trgBone.matrix_basis.copy()
for i in range(3):
for j in range(3):
t[i][j] = s[i][j]
trgBone.matrix_basis = t
class VIEW3D_OT_LoadBvhButton(bpy.types.Operator):
bl_idname = "mh.load_bvh"
bl_label = "Load BVH File"
bl_description = "Load a pose in BVH format"
bl_options = {'UNDO'}
filename_ext = ".bvh"
filter_glob = StringProperty(default="*.bvh", options={'HIDDEN'})
filepath = bpy.props.StringProperty(
name="File Path",
description="File path used for bvh file",
maxlen= 1024, default= "")
@classmethod
def poll(self, context):
return context.object
def execute(self, context):
try:
setObjectMode(context)
loadBvhFile(context, self.properties.filepath)
except MHError:
handleMHError(context)
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
class VIEW3D_OT_SaveasBvhFileButton(bpy.types.Operator, ExportHelper):
bl_idname = "mh.saveas_bvh"
bl_label = "Save BVH File"
bl_description = "Save current pose in BVH format"
bl_options = {'UNDO'}
filename_ext = ".bvh"
filter_glob = StringProperty(default="*.bvh", options={'HIDDEN'})
filepath = bpy.props.StringProperty(
name="File Path",
description="File path used for bvh file",
maxlen= 1024, default= "")
@classmethod
def poll(self, context):
return context.object
def execute(self, context):
try:
setObjectMode(context)
saveBvhFile(context, self.properties.filepath)
except MHError:
handleMHError(context)
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
#----------------------------------------------------------
# Convert weights
#----------------------------------------------------------
def readWeights(filepath, nVerts):
weights = {}
for n in range(nVerts):
weights[n] = []
bone = None
fp = open(filepath, "rU")
for line in fp:
words = line.split()
if len(words) < 2:
pass
elif words[0] == "#":
if words[1] == "weights":
bone = words[2]
else:
bone = None
elif bone:
vn = int(words[0])
if vn < mh.NBodyVerts:
weights[vn].append( (bone, float(words[1])) )
fp.close()
normedWeights = {}
for vn,data in list(weights.items()):
wsum = 0.0
for bone,w in data:
wsum += w
ndata = []
for bone,w in data:
ndata.append((bone,w/wsum))
normedWeights[vn] = ndata
return normedWeights
def defineMatrices(rig):
mats = {}
for pb in rig.pose.bones:
mats[pb.name] = pb.matrix * pb.bone.matrix_local.inverted()
return mats
def getPoseLocs(mats, restLocs, weights, nVerts):
locs = {}
for n in range(nVerts):
if weights[n]:
mat = getMatrix(mats, weights[n])
locs[n] = mat * restLocs[n]
else:
locs[n] = restLocs[n]
return locs
def getRestLocs(mats, poseLocs, weights, nVerts):
locs = {}
for n in range(nVerts):
if weights[n]:
mat = getMatrix(mats, weights[n])
locs[n] = mat.inverted() * poseLocs[n]
else:
locs[n] = poseLocs[n]
return locs
def getMatrix(mats, weight):
mat = Matrix()
mat.zero()
for bname,w in weight:
mat += w * mats[bname]
return mat
def getShapeLocs(ob, nVerts):
locs = {}
filename = "test"
for n in range(nVerts):
locs[n] = Vector((0,0,0))
for skey in ob.data.shape_keys.key_blocks:
if skey.name == "Basis":
continue
filename = skey.name
for n,v in enumerate(skey.data):
bv = ob.data.vertices[n]
vec = v.co - bv.co
locs[n] += skey.value*vec
return locs, filename
def addLocs(locs1, locs2, nVerts):
locs = {}
for n in range(nVerts):
locs[n] = locs1[n] + locs2[n]
return locs
def subLocs(locs1, locs2, nVerts):
locs = {}
for n in range(nVerts):
locs[n] = locs1[n] - locs2[n]
return locs
def saveNewTarget(filepath, locs, nVerts):
fp = open(filepath, "w", encoding="utf-8", newline="\n")
locList = list(locs.items())
locList.sort()
for (n, dr) in locList:
if dr.length > Epsilon:
fp.write("%d %s %s %s\n" % (n, round(dr[0]), round(dr[2]), round(-dr[1])))
fp.close()
return
class VIEW3D_OT_ConvertRigButton(bpy.types.Operator):
bl_idname = "mh.convert_rig"
bl_label = "Convert to rig"
bl_description = ""
bl_options = {'UNDO'}
@classmethod
def poll(self, context):
return context.object
def execute(self, context):
setObjectMode(context)
scn = context.scene
ob = context.object
rig = ob.parent
nVerts = len(ob.data.vertices)
oldWeights = readWeights(os.path.join(scn.MhProgramPath, "data/rigs", scn.MhSourceRig+".rig"), nVerts)
newWeights = readWeights(os.path.join(scn.MhProgramPath, "data/rigs",scn.MhTargetRig+".rig"), nVerts)
mats = defineMatrices(rig)
restLocs = {}
for n in range(nVerts):
restLocs[n] = ob.data.vertices[n].co
oldShapeDiffs, filename = getShapeLocs(ob, nVerts)
oldRestLocs = addLocs(restLocs, oldShapeDiffs, nVerts)
globalLocs = getPoseLocs(mats, oldRestLocs, oldWeights, nVerts)
newRestLocs = getRestLocs(mats, globalLocs, newWeights, nVerts)
newShapeDiffs = subLocs(newRestLocs, restLocs, nVerts)
saveNewTarget(os.path.join(scn.MhProgramPath, "data/poses", scn.MhPoseTargetDir, filename + ".target"), newShapeDiffs, nVerts)
return{'FINISHED'}
#----------------------------------------------------------
# Write matrices (for debug)
#----------------------------------------------------------
def writeMatrices(context, filepath):
rig = context.object
fp = open(filepath, "w", encoding="utf-8", newline="\n")
for pb in rig.pose.bones:
fp.write(
"\n%s\n" % pb.name +
"%s\n" % pb.matrix_basis +
"%s\n" % pb.matrix)
fp.close()
class VIEW3D_OT_WriteMatricesButton(bpy.types.Operator, ExportHelper):
bl_idname = "mh.write_matrices"
bl_label = "Write Matrices"
bl_description = "Write Matrices"
bl_options = {'UNDO'}
filename_ext = ".txt"
filter_glob = StringProperty(default="*.txt", options={'HIDDEN'})
filepath = bpy.props.StringProperty(
name="File Path",
description="File path used for txt file",
maxlen= 1024, default= "")
@classmethod
def poll(self, context):
return context.object
def execute(self, context):
try:
setObjectMode(context)
writeMatrices(context, self.properties.filepath)
except MHError:
handleMHError(context)
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
#----------------------------------------------------------
# Init
#----------------------------------------------------------
def init():
bpy.types.Scene.MhSourceRig = StringProperty(default = "rigid")
bpy.types.Scene.MhTargetRig = StringProperty(default = "soft1")
bpy.types.Scene.MhPoseTargetDir = StringProperty(default = "dance1-soft1")
bpy.types.Scene.MhImportRotateMode = EnumProperty(
name="Rotation",
description="Rotation conversion",
items=(('QUATERNION', "Quaternion",
"Convert rotations to quaternions"),
('NATIVE', "Euler (Native)", ("Use the rotation order "
"defined in the BVH file")),
('XYZ', "Euler (XYZ)", "Convert rotations to euler XYZ"),
('XZY', "Euler (XZY)", "Convert rotations to euler XZY"),
('YXZ', "Euler (YXZ)", "Convert rotations to euler YXZ"),
('YZX', "Euler (YZX)", "Convert rotations to euler YZX"),
('ZXY', "Euler (ZXY)", "Convert rotations to euler ZXY"),
('ZYX', "Euler (ZYX)", "Convert rotations to euler ZYX"),
),
default='NATIVE',
)
bpy.types.Scene.MhExportRotateMode = EnumProperty(
name="Rotation",
description="Rotation conversion",
items=(('NATIVE', "Euler (Native)",
"Use the rotation order defined in the BVH file"),
('XYZ', "Euler (XYZ)", "Convert rotations to euler XYZ"),
('XZY', "Euler (XZY)", "Convert rotations to euler XZY"),
('YXZ', "Euler (YXZ)", "Convert rotations to euler YXZ"),
('YZX', "Euler (YZX)", "Convert rotations to euler YZX"),
('ZXY', "Euler (ZXY)", "Convert rotations to euler ZXY"),
('ZYX', "Euler (ZYX)", "Convert rotations to euler ZYX"),
),
default='ZYX',
)
print("pose.py reloaded") | 0.217254 | 0.224374 |
from misago.acl.testutils import override_acl
from misago.categories.models import Category
from misago.conf import settings
from misago.threads import testutils
from misago.threads.checksums import update_post_checksum
from misago.threads.events import record_event
from misago.threads.moderation import threads as threads_moderation
from misago.threads.moderation import hide_post
from misago.users.testutils import AuthenticatedUserTestCase
class MockRequest(object):
def __init__(self, user):
self.user = user
self.user_ip = '127.0.0.1'
class ThreadViewTestCase(AuthenticatedUserTestCase):
def setUp(self):
super(ThreadViewTestCase, self).setUp()
self.category = Category.objects.get(slug='first-category')
self.thread = testutils.post_thread(category=self.category)
def override_acl(self, acl=None):
category_acl = self.user.acl_cache['categories'][self.category.pk]
category_acl.update({
'can_see': 1,
'can_browse': 1,
'can_see_all_threads': 1,
'can_see_own_threads': 0,
'can_hide_threads': 0,
'can_approve_content': 0,
'can_edit_posts': 0,
'can_hide_posts': 0,
'can_hide_own_posts': 0,
'can_close_threads': 0,
'post_edit_time': 0,
'can_hide_events': 0,
})
if acl:
category_acl.update(acl)
override_acl(self.user, {
'categories': {
self.category.pk: category_acl,
},
})
class ThreadVisibilityTests(ThreadViewTestCase):
def test_thread_displays(self):
"""thread view has no showstoppers"""
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, self.thread.title)
def test_view_shows_owner_thread(self):
"""view handles "owned threads" only"""
self.override_acl({'can_see_all_threads': 0})
response = self.client.get(self.thread.get_absolute_url())
self.assertEqual(response.status_code, 404)
self.thread.starter = self.user
self.thread.save()
self.override_acl({'can_see_all_threads': 0})
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, self.thread.title)
def test_view_validates_category_permissions(self):
"""view validates category visiblity"""
self.override_acl({'can_see': 0})
response = self.client.get(self.thread.get_absolute_url())
self.assertEqual(response.status_code, 404)
self.override_acl({'can_browse': 0})
response = self.client.get(self.thread.get_absolute_url())
self.assertEqual(response.status_code, 404)
def test_view_shows_unapproved_thread(self):
"""view handles unapproved thread"""
self.override_acl({'can_approve_content': 0})
self.thread.is_unapproved = True
self.thread.save()
response = self.client.get(self.thread.get_absolute_url())
self.assertEqual(response.status_code, 404)
# grant permission to see unapproved content
self.override_acl({'can_approve_content': 1})
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, self.thread.title)
# make test user thread's owner and remove permission to see unapproved
# user should be able to see thread as its author anyway
self.thread.starter = self.user
self.thread.save()
self.override_acl({'can_approve_content': 0})
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, self.thread.title)
def test_view_shows_hidden_thread(self):
"""view handles hidden thread"""
self.override_acl({'can_hide_threads': 0})
self.thread.is_hidden = True
self.thread.save()
response = self.client.get(self.thread.get_absolute_url())
self.assertEqual(response.status_code, 404)
# threads owners are not extempt from hidden threads check
self.thread.starter = self.user
self.thread.save()
response = self.client.get(self.thread.get_absolute_url())
self.assertEqual(response.status_code, 404)
# grant permission to see hidden content
self.override_acl({'can_hide_threads': 1})
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, self.thread.title)
class ThreadPostsVisibilityTests(ThreadViewTestCase):
def test_post_renders(self):
"""post renders"""
post = testutils.reply_thread(self.thread, poster=self.user)
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, post.get_absolute_url())
def test_invalid_post_renders(self):
"""invalid post renders"""
post = testutils.reply_thread(self.thread, poster=self.user)
post.parsed = 'fiddled post content'
post.save()
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, post.get_absolute_url())
self.assertContains(response, "This post's contents cannot be displayed.")
self.assertNotContains(response, post.parsed)
def test_hidden_post_visibility(self):
"""hidden post renders correctly"""
post = testutils.reply_thread(self.thread, message="Hello, I'm hidden post!")
hide_post(self.user, post)
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, post.get_absolute_url())
self.assertContains(response, "This post is hidden. You cannot not see its contents.")
self.assertNotContains(response, post.parsed)
# posts authors are not extempt from seeing hidden posts content
post.posted_by = self.user
post.save()
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, post.get_absolute_url())
self.assertContains(response, "This post is hidden. You cannot not see its contents.")
self.assertNotContains(response, post.parsed)
# permission to hide own posts isn't enought to see post content
self.override_acl({'can_hide_own_posts': 1})
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, post.get_absolute_url())
self.assertContains(response, "This post is hidden. You cannot not see its contents.")
self.assertNotContains(response, post.parsed)
# post's content is displayed after permission to see posts is granted
self.override_acl({'can_hide_posts': 1})
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, post.get_absolute_url())
self.assertContains(
response, "This post is hidden. Only users with permission may see its contents."
)
self.assertNotContains(response, "This post is hidden. You cannot not see its contents.")
self.assertContains(response, post.parsed)
def test_unapproved_post_visibility(self):
"""unapproved post renders for its author and users with perm to approve content"""
post = testutils.reply_thread(self.thread, is_unapproved=True)
# post is hdden because we aren't its author nor user with permission to approve
response = self.client.get(self.thread.get_absolute_url())
self.assertNotContains(response, post.get_absolute_url())
# post displays because we have permission to approve unapproved content
self.override_acl({'can_approve_content': 1})
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, post.get_absolute_url())
self.assertContains(response, "This post is unapproved.")
self.assertContains(response, post.parsed)
# post displays because we are its author
post.poster = self.user
post.save()
self.override_acl({'can_approve_content': 0})
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, post.get_absolute_url())
self.assertContains(response, "This post is unapproved.")
self.assertContains(response, post.parsed)
class ThreadEventVisibilityTests(ThreadViewTestCase):
def test_thread_events_render(self):
"""different thread events render"""
TEST_ACTIONS = [
(threads_moderation.pin_thread_globally, "Thread has been pinned globally."),
(threads_moderation.pin_thread_locally, "Thread has been pinned locally."),
(threads_moderation.unpin_thread, "Thread has been unpinned."),
(threads_moderation.approve_thread, "Thread has been approved."),
(threads_moderation.close_thread, "Thread has been closed."),
(threads_moderation.open_thread, "Thread has been opened."),
(threads_moderation.hide_thread, "Thread has been made hidden."),
(threads_moderation.unhide_thread, "Thread has been revealed."),
]
self.thread.is_unapproved = True
self.thread.save()
for action, message in TEST_ACTIONS:
self.override_acl({'can_approve_content': 1, 'can_hide_threads': 1})
self.thread.post_set.filter(is_event=True).delete()
action(MockRequest(self.user), self.thread)
event = self.thread.post_set.filter(is_event=True)[0]
# event renders
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, event.get_absolute_url())
self.assertContains(response, message)
# hidden events don't render without permission
hide_post(self.user, event)
self.override_acl({'can_approve_content': 1, 'can_hide_threads': 1})
response = self.client.get(self.thread.get_absolute_url())
self.assertNotContains(response, event.get_absolute_url())
self.assertNotContains(response, message)
# hidden event renders with permission
hide_post(self.user, event)
self.override_acl({
'can_approve_content': 1,
'can_hide_threads': 1,
'can_hide_events': 1,
})
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, event.get_absolute_url())
self.assertContains(response, message)
self.assertContains(response, "Hidden by")
# Event is only loaded if thread has events flag
self.thread.has_events = False
self.thread.save()
self.override_acl({
'can_approve_content': 1,
'can_hide_threads': 1,
'can_hide_events': 1,
})
response = self.client.get(self.thread.get_absolute_url())
self.assertNotContains(response, event.get_absolute_url())
def test_events_limit(self):
"""forum will trim oldest events if theres more than allowed by config"""
events_limit = settings.MISAGO_EVENTS_PER_PAGE
events = []
for _ in range(events_limit + 5):
event = record_event(MockRequest(self.user), self.thread, 'closed')
events.append(event)
# test that only events within limits were rendered
response = self.client.get(self.thread.get_absolute_url())
for event in events[5:]:
self.assertContains(response, event.get_absolute_url())
for event in events[:5]:
self.assertNotContains(response, event.get_absolute_url())
def test_events_dont_take_space(self):
"""events dont take space away from posts"""
posts_limit = settings.MISAGO_POSTS_PER_PAGE
events_limit = settings.MISAGO_EVENTS_PER_PAGE
events = []
for _ in range(events_limit + 5):
event = record_event(MockRequest(self.user), self.thread, 'closed')
events.append(event)
posts = []
for _ in range(posts_limit - 1):
post = testutils.reply_thread(self.thread)
posts.append(post)
# test that all events and posts within limits were rendered
response = self.client.get(self.thread.get_absolute_url())
for event in events[5:]:
self.assertContains(response, event.get_absolute_url())
for post in posts:
self.assertContains(response, post.get_absolute_url())
# add second page to thread with more events
for _ in range(posts_limit):
post = testutils.reply_thread(self.thread)
for _ in range(events_limit):
event = record_event(MockRequest(self.user), self.thread, 'closed')
events.append(event)
# see first page
response = self.client.get(self.thread.get_absolute_url())
for event in events[5:events_limit]:
self.assertContains(response, event.get_absolute_url())
for post in posts[:posts_limit - 1]:
self.assertContains(response, post.get_absolute_url())
# see second page
response = self.client.get('%s2/' % self.thread.get_absolute_url())
for event in events[5 + events_limit:]:
self.assertContains(response, event.get_absolute_url())
for post in posts[posts_limit - 1:]:
self.assertContains(response, post.get_absolute_url())
def test_changed_thread_title_event_renders(self):
"""changed thread title event renders"""
threads_moderation.change_thread_title(
MockRequest(self.user), self.thread, "Lorem renamed ipsum!"
)
event = self.thread.post_set.filter(is_event=True)[0]
self.assertEqual(event.event_type, 'changed_title')
# event renders
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, event.get_absolute_url())
self.assertContains(response, "title has been changed from")
self.assertContains(response, self.thread.title)
def test_thread_move_event_renders(self):
"""moved thread event renders"""
self.thread.category = self.thread.category.parent
self.thread.save()
threads_moderation.move_thread(MockRequest(self.user), self.thread, self.category)
event = self.thread.post_set.filter(is_event=True)[0]
self.assertEqual(event.event_type, 'moved')
# event renders
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, event.get_absolute_url())
self.assertContains(response, "Thread has been moved from")
def test_thread_merged_event_renders(self):
"""merged thread event renders"""
other_thread = testutils.post_thread(category=self.category)
threads_moderation.merge_thread(MockRequest(self.user), self.thread, other_thread)
event = self.thread.post_set.filter(is_event=True)[0]
self.assertEqual(event.event_type, 'merged')
# event renders
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, event.get_absolute_url())
self.assertContains(response, "thread has been merged into this thread")
class ThreadAttachmentsViewTests(ThreadViewTestCase):
def mock_attachment_cache(self, data):
json = {
'url': {},
'size': 16914,
'filename': 'Archiwum.zip',
'filetype': 'ZIP',
'is_image': False,
'uploaded_on': '2016-10-22T21:17:40.408710Z',
'uploader_name': 'BobBoberson',
}
json.update(data)
return json
def test_attachments_display(self):
"""thread posts show list of attachments below them"""
post = self.thread.first_post
post.attachments_cache = [
self.mock_attachment_cache({
'url': {
'index': '/attachment/loremipsum-123/',
'thumb': None,
'uploader': '/user/bobboberson-123/',
},
'filename': 'Archiwum-1.zip',
}),
self.mock_attachment_cache({
'url': {
'index': '/attachment/loremipsum-223/',
'thumb': '/attachment/thumb/loremipsum-223/',
'uploader': '/user/bobboberson-223/',
},
'is_image': True,
'filename': 'Archiwum-2.zip',
}),
self.mock_attachment_cache({
'url': {
'index': '/attachment/loremipsum-323/',
'thumb': None,
'uploader': '/user/bobboberson-323/',
},
'filename': 'Archiwum-3.zip',
}),
]
post.save()
# attachments render
response = self.client.get(self.thread.get_absolute_url())
for attachment in post.attachments_cache:
self.assertContains(response, attachment['filename'])
self.assertContains(response, attachment['uploader_name'])
self.assertContains(response, attachment['url']['index'])
self.assertContains(response, attachment['url']['uploader'])
if attachment['url']['thumb']:
self.assertContains(response, attachment['url']['thumb'])
class ThreadPollViewTests(ThreadViewTestCase):
def test_poll_voted_display(self):
"""view has no showstoppers when displaying voted poll"""
poll = testutils.post_poll(self.thread, self.user)
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, poll.question)
self.assertContains(response, '4 votes')
self.assertNotContains(response, 'Save your vote')
def test_poll_unvoted_display(self):
"""view has no showstoppers when displaying poll vote form"""
poll = testutils.post_poll(self.thread, self.user)
poll.pollvote_set.all().delete()
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, poll.question)
self.assertContains(response, 'Save your vote')
def test_poll_anonymous_view(self):
"""view has no showstoppers when displaying poll to anon user"""
poll = testutils.post_poll(self.thread, self.user)
self.logout_user()
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, poll.question)
self.assertContains(response, '4 votes')
self.assertNotContains(response, 'Save your vote')
class ThreadLikedPostsViewTests(ThreadViewTestCase):
def test_liked_posts_display(self):
"""view has no showstoppers on displaying posts with likes"""
testutils.like_post(self.thread.first_post, self.user)
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, '"is_liked": true')
def test_liked_posts_no_permission(self):
"""
view has no showstoppers on displaying posts with likes without perm
"""
testutils.like_post(self.thread.first_post, self.user)
self.override_acl({'can_see_posts_likes': 0})
response = self.client.get(self.thread.get_absolute_url())
self.assertNotContains(response, '"is_liked": true')
self.assertNotContains(response, '"is_liked": false')
self.assertContains(response, '"is_liked": null')
class ThreadAnonViewTests(ThreadViewTestCase):
def test_anonymous_user_view_no_showstoppers_display(self):
"""kitchensink thread view has no showstoppers for anons"""
poll = testutils.post_poll(self.thread, self.user)
event = record_event(MockRequest(self.user), self.thread, 'closed')
hidden_event = record_event(MockRequest(self.user), self.thread, 'opened')
hide_post(self.user, hidden_event)
unapproved_post = testutils.reply_thread(self.thread, is_unapproved=True)
post = testutils.reply_thread(self.thread)
self.logout_user()
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, poll.question)
self.assertContains(response, event.get_absolute_url())
self.assertContains(response, post.get_absolute_url())
self.assertNotContains(response, hidden_event.get_absolute_url())
self.assertNotContains(response, unapproved_post.get_absolute_url())
class ThreadUnicodeSupportTests(ThreadViewTestCase):
def test_category_name(self):
"""unicode in category name causes no showstopper"""
self.category.name = u'Łódź'
self.category.slug = 'Lodz'
self.category.save()
self.override_acl()
response = self.client.get(self.thread.get_absolute_url())
self.assertEqual(response.status_code, 200)
def test_thread_title(self):
"""unicode in thread title causes no showstopper"""
self.thread.title = u'Łódź'
self.thread.slug = 'Lodz'
self.thread.save()
self.override_acl()
response = self.client.get(self.thread.get_absolute_url())
self.assertEqual(response.status_code, 200)
def test_post_content(self):
"""unicode in thread title causes no showstopper"""
self.thread.first_post.original = u'Łódź'
self.thread.first_post.parsed = u'<p>Łódź</p>'
update_post_checksum(self.thread.first_post)
self.thread.first_post.save()
self.override_acl()
response = self.client.get(self.thread.get_absolute_url())
self.assertEqual(response.status_code, 200)
def test_user_rank(self):
"""unicode in user rank causes no showstopper"""
self.user.title = u'Łódź'
self.user.rank.name = u'Łódź'
self.user.rank.title = u'Łódź'
self.user.rank.save()
self.user.save()
self.override_acl()
response = self.client.get(self.thread.get_absolute_url())
self.assertEqual(response.status_code, 200) | misago/threads/tests/test_threadview.py | from misago.acl.testutils import override_acl
from misago.categories.models import Category
from misago.conf import settings
from misago.threads import testutils
from misago.threads.checksums import update_post_checksum
from misago.threads.events import record_event
from misago.threads.moderation import threads as threads_moderation
from misago.threads.moderation import hide_post
from misago.users.testutils import AuthenticatedUserTestCase
class MockRequest(object):
def __init__(self, user):
self.user = user
self.user_ip = '127.0.0.1'
class ThreadViewTestCase(AuthenticatedUserTestCase):
def setUp(self):
super(ThreadViewTestCase, self).setUp()
self.category = Category.objects.get(slug='first-category')
self.thread = testutils.post_thread(category=self.category)
def override_acl(self, acl=None):
category_acl = self.user.acl_cache['categories'][self.category.pk]
category_acl.update({
'can_see': 1,
'can_browse': 1,
'can_see_all_threads': 1,
'can_see_own_threads': 0,
'can_hide_threads': 0,
'can_approve_content': 0,
'can_edit_posts': 0,
'can_hide_posts': 0,
'can_hide_own_posts': 0,
'can_close_threads': 0,
'post_edit_time': 0,
'can_hide_events': 0,
})
if acl:
category_acl.update(acl)
override_acl(self.user, {
'categories': {
self.category.pk: category_acl,
},
})
class ThreadVisibilityTests(ThreadViewTestCase):
def test_thread_displays(self):
"""thread view has no showstoppers"""
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, self.thread.title)
def test_view_shows_owner_thread(self):
"""view handles "owned threads" only"""
self.override_acl({'can_see_all_threads': 0})
response = self.client.get(self.thread.get_absolute_url())
self.assertEqual(response.status_code, 404)
self.thread.starter = self.user
self.thread.save()
self.override_acl({'can_see_all_threads': 0})
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, self.thread.title)
def test_view_validates_category_permissions(self):
"""view validates category visiblity"""
self.override_acl({'can_see': 0})
response = self.client.get(self.thread.get_absolute_url())
self.assertEqual(response.status_code, 404)
self.override_acl({'can_browse': 0})
response = self.client.get(self.thread.get_absolute_url())
self.assertEqual(response.status_code, 404)
def test_view_shows_unapproved_thread(self):
"""view handles unapproved thread"""
self.override_acl({'can_approve_content': 0})
self.thread.is_unapproved = True
self.thread.save()
response = self.client.get(self.thread.get_absolute_url())
self.assertEqual(response.status_code, 404)
# grant permission to see unapproved content
self.override_acl({'can_approve_content': 1})
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, self.thread.title)
# make test user thread's owner and remove permission to see unapproved
# user should be able to see thread as its author anyway
self.thread.starter = self.user
self.thread.save()
self.override_acl({'can_approve_content': 0})
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, self.thread.title)
def test_view_shows_hidden_thread(self):
"""view handles hidden thread"""
self.override_acl({'can_hide_threads': 0})
self.thread.is_hidden = True
self.thread.save()
response = self.client.get(self.thread.get_absolute_url())
self.assertEqual(response.status_code, 404)
# threads owners are not extempt from hidden threads check
self.thread.starter = self.user
self.thread.save()
response = self.client.get(self.thread.get_absolute_url())
self.assertEqual(response.status_code, 404)
# grant permission to see hidden content
self.override_acl({'can_hide_threads': 1})
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, self.thread.title)
class ThreadPostsVisibilityTests(ThreadViewTestCase):
def test_post_renders(self):
"""post renders"""
post = testutils.reply_thread(self.thread, poster=self.user)
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, post.get_absolute_url())
def test_invalid_post_renders(self):
"""invalid post renders"""
post = testutils.reply_thread(self.thread, poster=self.user)
post.parsed = 'fiddled post content'
post.save()
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, post.get_absolute_url())
self.assertContains(response, "This post's contents cannot be displayed.")
self.assertNotContains(response, post.parsed)
def test_hidden_post_visibility(self):
"""hidden post renders correctly"""
post = testutils.reply_thread(self.thread, message="Hello, I'm hidden post!")
hide_post(self.user, post)
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, post.get_absolute_url())
self.assertContains(response, "This post is hidden. You cannot not see its contents.")
self.assertNotContains(response, post.parsed)
# posts authors are not extempt from seeing hidden posts content
post.posted_by = self.user
post.save()
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, post.get_absolute_url())
self.assertContains(response, "This post is hidden. You cannot not see its contents.")
self.assertNotContains(response, post.parsed)
# permission to hide own posts isn't enought to see post content
self.override_acl({'can_hide_own_posts': 1})
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, post.get_absolute_url())
self.assertContains(response, "This post is hidden. You cannot not see its contents.")
self.assertNotContains(response, post.parsed)
# post's content is displayed after permission to see posts is granted
self.override_acl({'can_hide_posts': 1})
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, post.get_absolute_url())
self.assertContains(
response, "This post is hidden. Only users with permission may see its contents."
)
self.assertNotContains(response, "This post is hidden. You cannot not see its contents.")
self.assertContains(response, post.parsed)
def test_unapproved_post_visibility(self):
"""unapproved post renders for its author and users with perm to approve content"""
post = testutils.reply_thread(self.thread, is_unapproved=True)
# post is hdden because we aren't its author nor user with permission to approve
response = self.client.get(self.thread.get_absolute_url())
self.assertNotContains(response, post.get_absolute_url())
# post displays because we have permission to approve unapproved content
self.override_acl({'can_approve_content': 1})
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, post.get_absolute_url())
self.assertContains(response, "This post is unapproved.")
self.assertContains(response, post.parsed)
# post displays because we are its author
post.poster = self.user
post.save()
self.override_acl({'can_approve_content': 0})
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, post.get_absolute_url())
self.assertContains(response, "This post is unapproved.")
self.assertContains(response, post.parsed)
class ThreadEventVisibilityTests(ThreadViewTestCase):
def test_thread_events_render(self):
"""different thread events render"""
TEST_ACTIONS = [
(threads_moderation.pin_thread_globally, "Thread has been pinned globally."),
(threads_moderation.pin_thread_locally, "Thread has been pinned locally."),
(threads_moderation.unpin_thread, "Thread has been unpinned."),
(threads_moderation.approve_thread, "Thread has been approved."),
(threads_moderation.close_thread, "Thread has been closed."),
(threads_moderation.open_thread, "Thread has been opened."),
(threads_moderation.hide_thread, "Thread has been made hidden."),
(threads_moderation.unhide_thread, "Thread has been revealed."),
]
self.thread.is_unapproved = True
self.thread.save()
for action, message in TEST_ACTIONS:
self.override_acl({'can_approve_content': 1, 'can_hide_threads': 1})
self.thread.post_set.filter(is_event=True).delete()
action(MockRequest(self.user), self.thread)
event = self.thread.post_set.filter(is_event=True)[0]
# event renders
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, event.get_absolute_url())
self.assertContains(response, message)
# hidden events don't render without permission
hide_post(self.user, event)
self.override_acl({'can_approve_content': 1, 'can_hide_threads': 1})
response = self.client.get(self.thread.get_absolute_url())
self.assertNotContains(response, event.get_absolute_url())
self.assertNotContains(response, message)
# hidden event renders with permission
hide_post(self.user, event)
self.override_acl({
'can_approve_content': 1,
'can_hide_threads': 1,
'can_hide_events': 1,
})
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, event.get_absolute_url())
self.assertContains(response, message)
self.assertContains(response, "Hidden by")
# Event is only loaded if thread has events flag
self.thread.has_events = False
self.thread.save()
self.override_acl({
'can_approve_content': 1,
'can_hide_threads': 1,
'can_hide_events': 1,
})
response = self.client.get(self.thread.get_absolute_url())
self.assertNotContains(response, event.get_absolute_url())
def test_events_limit(self):
"""forum will trim oldest events if theres more than allowed by config"""
events_limit = settings.MISAGO_EVENTS_PER_PAGE
events = []
for _ in range(events_limit + 5):
event = record_event(MockRequest(self.user), self.thread, 'closed')
events.append(event)
# test that only events within limits were rendered
response = self.client.get(self.thread.get_absolute_url())
for event in events[5:]:
self.assertContains(response, event.get_absolute_url())
for event in events[:5]:
self.assertNotContains(response, event.get_absolute_url())
def test_events_dont_take_space(self):
"""events dont take space away from posts"""
posts_limit = settings.MISAGO_POSTS_PER_PAGE
events_limit = settings.MISAGO_EVENTS_PER_PAGE
events = []
for _ in range(events_limit + 5):
event = record_event(MockRequest(self.user), self.thread, 'closed')
events.append(event)
posts = []
for _ in range(posts_limit - 1):
post = testutils.reply_thread(self.thread)
posts.append(post)
# test that all events and posts within limits were rendered
response = self.client.get(self.thread.get_absolute_url())
for event in events[5:]:
self.assertContains(response, event.get_absolute_url())
for post in posts:
self.assertContains(response, post.get_absolute_url())
# add second page to thread with more events
for _ in range(posts_limit):
post = testutils.reply_thread(self.thread)
for _ in range(events_limit):
event = record_event(MockRequest(self.user), self.thread, 'closed')
events.append(event)
# see first page
response = self.client.get(self.thread.get_absolute_url())
for event in events[5:events_limit]:
self.assertContains(response, event.get_absolute_url())
for post in posts[:posts_limit - 1]:
self.assertContains(response, post.get_absolute_url())
# see second page
response = self.client.get('%s2/' % self.thread.get_absolute_url())
for event in events[5 + events_limit:]:
self.assertContains(response, event.get_absolute_url())
for post in posts[posts_limit - 1:]:
self.assertContains(response, post.get_absolute_url())
def test_changed_thread_title_event_renders(self):
"""changed thread title event renders"""
threads_moderation.change_thread_title(
MockRequest(self.user), self.thread, "Lorem renamed ipsum!"
)
event = self.thread.post_set.filter(is_event=True)[0]
self.assertEqual(event.event_type, 'changed_title')
# event renders
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, event.get_absolute_url())
self.assertContains(response, "title has been changed from")
self.assertContains(response, self.thread.title)
def test_thread_move_event_renders(self):
"""moved thread event renders"""
self.thread.category = self.thread.category.parent
self.thread.save()
threads_moderation.move_thread(MockRequest(self.user), self.thread, self.category)
event = self.thread.post_set.filter(is_event=True)[0]
self.assertEqual(event.event_type, 'moved')
# event renders
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, event.get_absolute_url())
self.assertContains(response, "Thread has been moved from")
def test_thread_merged_event_renders(self):
"""merged thread event renders"""
other_thread = testutils.post_thread(category=self.category)
threads_moderation.merge_thread(MockRequest(self.user), self.thread, other_thread)
event = self.thread.post_set.filter(is_event=True)[0]
self.assertEqual(event.event_type, 'merged')
# event renders
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, event.get_absolute_url())
self.assertContains(response, "thread has been merged into this thread")
class ThreadAttachmentsViewTests(ThreadViewTestCase):
def mock_attachment_cache(self, data):
json = {
'url': {},
'size': 16914,
'filename': 'Archiwum.zip',
'filetype': 'ZIP',
'is_image': False,
'uploaded_on': '2016-10-22T21:17:40.408710Z',
'uploader_name': 'BobBoberson',
}
json.update(data)
return json
def test_attachments_display(self):
"""thread posts show list of attachments below them"""
post = self.thread.first_post
post.attachments_cache = [
self.mock_attachment_cache({
'url': {
'index': '/attachment/loremipsum-123/',
'thumb': None,
'uploader': '/user/bobboberson-123/',
},
'filename': 'Archiwum-1.zip',
}),
self.mock_attachment_cache({
'url': {
'index': '/attachment/loremipsum-223/',
'thumb': '/attachment/thumb/loremipsum-223/',
'uploader': '/user/bobboberson-223/',
},
'is_image': True,
'filename': 'Archiwum-2.zip',
}),
self.mock_attachment_cache({
'url': {
'index': '/attachment/loremipsum-323/',
'thumb': None,
'uploader': '/user/bobboberson-323/',
},
'filename': 'Archiwum-3.zip',
}),
]
post.save()
# attachments render
response = self.client.get(self.thread.get_absolute_url())
for attachment in post.attachments_cache:
self.assertContains(response, attachment['filename'])
self.assertContains(response, attachment['uploader_name'])
self.assertContains(response, attachment['url']['index'])
self.assertContains(response, attachment['url']['uploader'])
if attachment['url']['thumb']:
self.assertContains(response, attachment['url']['thumb'])
class ThreadPollViewTests(ThreadViewTestCase):
def test_poll_voted_display(self):
"""view has no showstoppers when displaying voted poll"""
poll = testutils.post_poll(self.thread, self.user)
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, poll.question)
self.assertContains(response, '4 votes')
self.assertNotContains(response, 'Save your vote')
def test_poll_unvoted_display(self):
"""view has no showstoppers when displaying poll vote form"""
poll = testutils.post_poll(self.thread, self.user)
poll.pollvote_set.all().delete()
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, poll.question)
self.assertContains(response, 'Save your vote')
def test_poll_anonymous_view(self):
"""view has no showstoppers when displaying poll to anon user"""
poll = testutils.post_poll(self.thread, self.user)
self.logout_user()
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, poll.question)
self.assertContains(response, '4 votes')
self.assertNotContains(response, 'Save your vote')
class ThreadLikedPostsViewTests(ThreadViewTestCase):
def test_liked_posts_display(self):
"""view has no showstoppers on displaying posts with likes"""
testutils.like_post(self.thread.first_post, self.user)
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, '"is_liked": true')
def test_liked_posts_no_permission(self):
"""
view has no showstoppers on displaying posts with likes without perm
"""
testutils.like_post(self.thread.first_post, self.user)
self.override_acl({'can_see_posts_likes': 0})
response = self.client.get(self.thread.get_absolute_url())
self.assertNotContains(response, '"is_liked": true')
self.assertNotContains(response, '"is_liked": false')
self.assertContains(response, '"is_liked": null')
class ThreadAnonViewTests(ThreadViewTestCase):
def test_anonymous_user_view_no_showstoppers_display(self):
"""kitchensink thread view has no showstoppers for anons"""
poll = testutils.post_poll(self.thread, self.user)
event = record_event(MockRequest(self.user), self.thread, 'closed')
hidden_event = record_event(MockRequest(self.user), self.thread, 'opened')
hide_post(self.user, hidden_event)
unapproved_post = testutils.reply_thread(self.thread, is_unapproved=True)
post = testutils.reply_thread(self.thread)
self.logout_user()
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, poll.question)
self.assertContains(response, event.get_absolute_url())
self.assertContains(response, post.get_absolute_url())
self.assertNotContains(response, hidden_event.get_absolute_url())
self.assertNotContains(response, unapproved_post.get_absolute_url())
class ThreadUnicodeSupportTests(ThreadViewTestCase):
def test_category_name(self):
"""unicode in category name causes no showstopper"""
self.category.name = u'Łódź'
self.category.slug = 'Lodz'
self.category.save()
self.override_acl()
response = self.client.get(self.thread.get_absolute_url())
self.assertEqual(response.status_code, 200)
def test_thread_title(self):
"""unicode in thread title causes no showstopper"""
self.thread.title = u'Łódź'
self.thread.slug = 'Lodz'
self.thread.save()
self.override_acl()
response = self.client.get(self.thread.get_absolute_url())
self.assertEqual(response.status_code, 200)
def test_post_content(self):
"""unicode in thread title causes no showstopper"""
self.thread.first_post.original = u'Łódź'
self.thread.first_post.parsed = u'<p>Łódź</p>'
update_post_checksum(self.thread.first_post)
self.thread.first_post.save()
self.override_acl()
response = self.client.get(self.thread.get_absolute_url())
self.assertEqual(response.status_code, 200)
def test_user_rank(self):
"""unicode in user rank causes no showstopper"""
self.user.title = u'Łódź'
self.user.rank.name = u'Łódź'
self.user.rank.title = u'Łódź'
self.user.rank.save()
self.user.save()
self.override_acl()
response = self.client.get(self.thread.get_absolute_url())
self.assertEqual(response.status_code, 200) | 0.765067 | 0.239549 |
import arrow
from purepage.ext import r, db, abort
class Admin:
"""
后台管理
$shared:
user:
id?str: 用户ID
role?str: 角色
email?email&optional: 邮箱
github?url&optional: Github地址
avatar?url&default="http://purepage.org/static/avatar-default.png": 头像
date_create?datetime&optional: 创建时间
date_modify?datetime&optional: 修改时间
timestamp?int&optional: 安全时间戳
lastlogin_date?datetime&optional: 最近登录时间
lastlogin_ip?ipv4&optional: 最近登录IP
lastlogin_ua?str&optional: 最近登录设备UserAgent
""" # noqa
def put(self, id, role, email):
"""
修改帐号信息
$input:
id?str: 用户ID
role?str: 角色
email?email: 邮箱
$output: @message
"""
if role == "root":
abort(403, "PermissionDeny", "不能设为root帐号")
db.run(
r.table("user").get(id).update({
"role": role,
"email": email,
"date_modify": arrow.utcnow().datetime,
"timestamp": arrow.utcnow().timestamp
})
)
return {"message": "OK"}
def get(self, account):
"""
查找帐号
$input:
account?str: 用户名或邮箱
$output: @user
$error:
404.NotFound: 用户不存在
"""
user = db.run(r.table("user").get(account))
if not user:
user = db.first(r.table("user").get_all(account, index="email"))
if not user:
abort(404, "NotFound", "用户不存在")
return user
def get_list(self, page, per_page):
"""
查看所有用户
$input: @pagging
$output:
- @user
"""
return db.pagging(r.table("user"), page, per_page)
def delete(self, id):
"""
删除帐号
$input:
id?str: ID
$output: @message
"""
user = db.run(r.table("user").get(id))
if user and user["role"] == "root":
abort(403, "PermissionDeny", "root帐号无法删除")
db.run(r.table("user").get(id).delete())
return {"message": "OK"} | api/purepage/views/admin.py | import arrow
from purepage.ext import r, db, abort
class Admin:
"""
后台管理
$shared:
user:
id?str: 用户ID
role?str: 角色
email?email&optional: 邮箱
github?url&optional: Github地址
avatar?url&default="http://purepage.org/static/avatar-default.png": 头像
date_create?datetime&optional: 创建时间
date_modify?datetime&optional: 修改时间
timestamp?int&optional: 安全时间戳
lastlogin_date?datetime&optional: 最近登录时间
lastlogin_ip?ipv4&optional: 最近登录IP
lastlogin_ua?str&optional: 最近登录设备UserAgent
""" # noqa
def put(self, id, role, email):
"""
修改帐号信息
$input:
id?str: 用户ID
role?str: 角色
email?email: 邮箱
$output: @message
"""
if role == "root":
abort(403, "PermissionDeny", "不能设为root帐号")
db.run(
r.table("user").get(id).update({
"role": role,
"email": email,
"date_modify": arrow.utcnow().datetime,
"timestamp": arrow.utcnow().timestamp
})
)
return {"message": "OK"}
def get(self, account):
"""
查找帐号
$input:
account?str: 用户名或邮箱
$output: @user
$error:
404.NotFound: 用户不存在
"""
user = db.run(r.table("user").get(account))
if not user:
user = db.first(r.table("user").get_all(account, index="email"))
if not user:
abort(404, "NotFound", "用户不存在")
return user
def get_list(self, page, per_page):
"""
查看所有用户
$input: @pagging
$output:
- @user
"""
return db.pagging(r.table("user"), page, per_page)
def delete(self, id):
"""
删除帐号
$input:
id?str: ID
$output: @message
"""
user = db.run(r.table("user").get(id))
if user and user["role"] == "root":
abort(403, "PermissionDeny", "root帐号无法删除")
db.run(r.table("user").get(id).delete())
return {"message": "OK"} | 0.192463 | 0.175079 |
import torch
import torch.nn as nn
from typing import Dict
class ApexAgent(torch.jit.ScriptModule):
__constants__ = ["multi_step", "gamma"]
def __init__(self, net_cons, multi_step, gamma):
super().__init__()
self.net_cons = net_cons
self.multi_step = multi_step
self.gamma = gamma
self.online_net = net_cons()
self.target_net = net_cons()
@classmethod
def clone(cls, model, device):
cloned = cls(model.net_cons, model.multi_step, model.gamma)
cloned.load_state_dict(model.state_dict())
return cloned.to(device)
def sync_target_with_online(self):
self.target_net.load_state_dict(self.online_net.state_dict())
@torch.jit.script_method
def td_err(
self,
obs: Dict[str, torch.Tensor],
action: Dict[str, torch.Tensor],
reward: torch.Tensor,
bootstrap: torch.Tensor,
next_obs: Dict[str, torch.Tensor],
) -> torch.Tensor:
online_q = self.online_net(obs)
online_qa = online_q.gather(1, action["a"].unsqueeze(1)).squeeze(1)
online_next_a = self.greedy_act(next_obs)
bootstrap_q = self.target_net(next_obs)
bootstrap_qa = bootstrap_q.gather(1, online_next_a.unsqueeze(1)).squeeze(1)
target = reward + bootstrap * (self.gamma ** self.multi_step) * bootstrap_qa
return target.detach() - online_qa
@torch.jit.script_method
def greedy_act(self, obs: Dict[str, torch.Tensor]) -> torch.Tensor:
legal_move = obs["legal_move"]
q = self.online_net(obs).detach()
legal_q = (1 + q - q.min()) * legal_move
# legal_q > 0 for legal_move and maintain correct orders
greedy_action = legal_q.argmax(1)
return greedy_action
@torch.jit.script_method
def act(self, obs: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
greedy_action = self.greedy_act(obs)
eps = obs["eps"].squeeze(1)
random_action = obs["legal_move"].multinomial(1).squeeze(1)
rand = torch.rand(greedy_action.size(0), device=greedy_action.device)
rand = (rand < eps).long()
action = (greedy_action * (1 - rand) + random_action * rand).long()
return {"a": action.detach().cpu()}
@torch.jit.script_method
def compute_priority(
self,
obs: Dict[str, torch.Tensor],
action: Dict[str, torch.Tensor],
reward: torch.Tensor,
terminal: torch.Tensor,
bootstrap: torch.Tensor,
next_obs: Dict[str, torch.Tensor],
) -> torch.Tensor:
err = self.td_err(obs, action, reward, bootstrap, next_obs)
return err.detach().abs().cpu()
def loss(self, batch):
"""
returns the loss and priority
"""
err = self.td_err(
batch.obs, batch.action, batch.reward, batch.bootstrap, batch.next_obs
)
loss = nn.functional.smooth_l1_loss(
err, torch.zeros_like(err), reduction="none"
)
priority = err.detach().abs().cpu()
return loss, priority | pyrela/apex.py |
import torch
import torch.nn as nn
from typing import Dict
class ApexAgent(torch.jit.ScriptModule):
__constants__ = ["multi_step", "gamma"]
def __init__(self, net_cons, multi_step, gamma):
super().__init__()
self.net_cons = net_cons
self.multi_step = multi_step
self.gamma = gamma
self.online_net = net_cons()
self.target_net = net_cons()
@classmethod
def clone(cls, model, device):
cloned = cls(model.net_cons, model.multi_step, model.gamma)
cloned.load_state_dict(model.state_dict())
return cloned.to(device)
def sync_target_with_online(self):
self.target_net.load_state_dict(self.online_net.state_dict())
@torch.jit.script_method
def td_err(
self,
obs: Dict[str, torch.Tensor],
action: Dict[str, torch.Tensor],
reward: torch.Tensor,
bootstrap: torch.Tensor,
next_obs: Dict[str, torch.Tensor],
) -> torch.Tensor:
online_q = self.online_net(obs)
online_qa = online_q.gather(1, action["a"].unsqueeze(1)).squeeze(1)
online_next_a = self.greedy_act(next_obs)
bootstrap_q = self.target_net(next_obs)
bootstrap_qa = bootstrap_q.gather(1, online_next_a.unsqueeze(1)).squeeze(1)
target = reward + bootstrap * (self.gamma ** self.multi_step) * bootstrap_qa
return target.detach() - online_qa
@torch.jit.script_method
def greedy_act(self, obs: Dict[str, torch.Tensor]) -> torch.Tensor:
legal_move = obs["legal_move"]
q = self.online_net(obs).detach()
legal_q = (1 + q - q.min()) * legal_move
# legal_q > 0 for legal_move and maintain correct orders
greedy_action = legal_q.argmax(1)
return greedy_action
@torch.jit.script_method
def act(self, obs: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
greedy_action = self.greedy_act(obs)
eps = obs["eps"].squeeze(1)
random_action = obs["legal_move"].multinomial(1).squeeze(1)
rand = torch.rand(greedy_action.size(0), device=greedy_action.device)
rand = (rand < eps).long()
action = (greedy_action * (1 - rand) + random_action * rand).long()
return {"a": action.detach().cpu()}
@torch.jit.script_method
def compute_priority(
self,
obs: Dict[str, torch.Tensor],
action: Dict[str, torch.Tensor],
reward: torch.Tensor,
terminal: torch.Tensor,
bootstrap: torch.Tensor,
next_obs: Dict[str, torch.Tensor],
) -> torch.Tensor:
err = self.td_err(obs, action, reward, bootstrap, next_obs)
return err.detach().abs().cpu()
def loss(self, batch):
"""
returns the loss and priority
"""
err = self.td_err(
batch.obs, batch.action, batch.reward, batch.bootstrap, batch.next_obs
)
loss = nn.functional.smooth_l1_loss(
err, torch.zeros_like(err), reduction="none"
)
priority = err.detach().abs().cpu()
return loss, priority | 0.934954 | 0.503052 |
import sys
import os
sys.path.append("src")
args = sys.argv
if len(args) <= 1 :
print("Please specify a command.")
print("Usage recap : ")
print(" sample datapath tempdir nbr_samples")
print("filter datapath tempdir targetpath fieldname")
print("compute_institutions datapath institutionpath")
elif args[1] == "test" :
import WDM_test as test
if args[2] == "all" :
test.testAll()
elif args[2] == "filter" :
test.testFilter()
elif args[2] == "institutions" :
test.testInstitutions()
elif args[2] == "artToAuth" :
test.testArtToAuth()
elif args[2] == "citationCount" :
test.testCitationCount()
else :
print("Unknown test")
elif args[1] == "sample" :
from preliminarySort import getfields
datapath = args[2]
temp_dir = args[3]
nbr_samples = int(args[4])
print(getfields(nbr_samples, datapath, temp_dir))
elif args[1] == "filter" :
from preliminarySort import filter_data
from utilitary import clean_folder
datapath = args[2]
temp_path = args[3]
new_datapath = args[4]
fieldname = args[5]
clean_folder(new_datapath)
filter_data(fieldname, datapath, new_datapath, temp_path)
elif args[1] == "compute_institutions" :
from getInstitutions import putInstitutionsOnDisk as compInsti
from utilitary import clean_folder
datapath = args[2]
institupath = args[3]
compInsti(datapath, institupath)
elif args[1] == "artToAuth" :
from buildArtToAuth import putTableOnDisk
datapath = args[2]
targetpath = args[3]
putTableOnDisk(datapath, targetpath)
elif args[1] == "citation_count" :
from buildArtToAuth import getTableFromDisk
from citationCount import putTableOnDisk
datapath = args[2]
arttapath = args[3]
targetpath = args[4]
artToAuth = getTableFromDisk(arttapath)
timePeriods = [1990, 2000, 2010]
putTableOnDisk(datapath, targetpath, artToAuth, timePeriods)
elif args[1] == "citation_rings" :
from ringsCitations import putListsOnDisk
from citationCount import getTableFromDisk
citacountpath = args[2]
targetpath = args[3]
citationCount = getTableFromDisk(citacountpath)
putListsOnDisk(citationCount, targetpath)
elif args[1] == "self_citation" :
from selfCitation import printStatsFromTables
from citationCount import getTableFromDisk
citacountpath = args[2]
citationCount = getTableFromDisk(citacountpath)
printStatsFromTables(citationCount)
elif args[1] == "parasite" :
from main_Parasite import __main_parasite__ as parasite
datapath = args[2]
maxP = int(args[3])
parasite(datapath, maxP)
else :
print("Unknown command") | main.py | import sys
import os
sys.path.append("src")
args = sys.argv
if len(args) <= 1 :
print("Please specify a command.")
print("Usage recap : ")
print(" sample datapath tempdir nbr_samples")
print("filter datapath tempdir targetpath fieldname")
print("compute_institutions datapath institutionpath")
elif args[1] == "test" :
import WDM_test as test
if args[2] == "all" :
test.testAll()
elif args[2] == "filter" :
test.testFilter()
elif args[2] == "institutions" :
test.testInstitutions()
elif args[2] == "artToAuth" :
test.testArtToAuth()
elif args[2] == "citationCount" :
test.testCitationCount()
else :
print("Unknown test")
elif args[1] == "sample" :
from preliminarySort import getfields
datapath = args[2]
temp_dir = args[3]
nbr_samples = int(args[4])
print(getfields(nbr_samples, datapath, temp_dir))
elif args[1] == "filter" :
from preliminarySort import filter_data
from utilitary import clean_folder
datapath = args[2]
temp_path = args[3]
new_datapath = args[4]
fieldname = args[5]
clean_folder(new_datapath)
filter_data(fieldname, datapath, new_datapath, temp_path)
elif args[1] == "compute_institutions" :
from getInstitutions import putInstitutionsOnDisk as compInsti
from utilitary import clean_folder
datapath = args[2]
institupath = args[3]
compInsti(datapath, institupath)
elif args[1] == "artToAuth" :
from buildArtToAuth import putTableOnDisk
datapath = args[2]
targetpath = args[3]
putTableOnDisk(datapath, targetpath)
elif args[1] == "citation_count" :
from buildArtToAuth import getTableFromDisk
from citationCount import putTableOnDisk
datapath = args[2]
arttapath = args[3]
targetpath = args[4]
artToAuth = getTableFromDisk(arttapath)
timePeriods = [1990, 2000, 2010]
putTableOnDisk(datapath, targetpath, artToAuth, timePeriods)
elif args[1] == "citation_rings" :
from ringsCitations import putListsOnDisk
from citationCount import getTableFromDisk
citacountpath = args[2]
targetpath = args[3]
citationCount = getTableFromDisk(citacountpath)
putListsOnDisk(citationCount, targetpath)
elif args[1] == "self_citation" :
from selfCitation import printStatsFromTables
from citationCount import getTableFromDisk
citacountpath = args[2]
citationCount = getTableFromDisk(citacountpath)
printStatsFromTables(citationCount)
elif args[1] == "parasite" :
from main_Parasite import __main_parasite__ as parasite
datapath = args[2]
maxP = int(args[3])
parasite(datapath, maxP)
else :
print("Unknown command") | 0.087367 | 0.397237 |
import yaml
import os
import re
import train, test
from model import model_lstm
import logging
import torch
import torch.distributed as dist
import torch.nn as nn
# General config
def load_config(path):
''' Loads config file.
Args:
path (str): path to config file
default_path (bool): whether to use default path
'''
# Load configuration from file itself
with open(path, 'r') as f:
cfg_special = yaml.load(f, Loader=yaml.Loader)
return cfg_special
def update_recursive(dict1, dict2):
''' Update two config dictionaries recursively.
Args:
dict1 (dict): first dictionary to be updated
dict2 (dict): second dictionary which entries should be used
'''
for k, v in dict2.items():
if k not in dict1:
dict1[k] = dict()
if isinstance(v, dict):
update_recursive(dict1[k], v)
else:
dict1[k] = v
def build_model(args, cfg):
device = torch.device(
f"cuda:{args.local_rank}" if torch.cuda.is_available() else "cpu"
)
with open(cfg['model']['model_yaml'], 'r') as f:
cfg_special = yaml.load(f, Loader=yaml.Loader)
update_recursive(cfg, cfg_special)
model_name = cfg['model']['model_name']
if model_name == 'resnet_lstm':
model = model_lstm.ResNetLSTMSeqNet(cfg)
## multi gpu?
if cfg['train']['use_multi_gpu']:
logging.info(f"torch.cuda.device_count() {torch.cuda.device_count()}")
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
dist.barrier()
# SyncBN
model = nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device).cuda(args.local_rank)
network = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
broadcast_buffers=False, find_unused_parameters=True)
total_params = network.module.get_num_params()
else:
network = model.to(device)
total_params = network.get_num_params()
logging.info(f'Network "{model_name}" loaded to device {device}, device num {torch.cuda.device_count()}')
logging.info(f"Total number of parameters: {total_params}")
return network
def tryint(s):
try:
return int(s)
except ValueError:
return s
def str2int(v_str):
return [tryint(sub_str) for sub_str in re.split('([0-9]+)', v_str)]
def GetBestModel(path):
names = sorted(os.listdir(path+"/"), key=str2int)
files=[]
for name in names:
if os.path.isfile(os.path.join(os.path.abspath(path), name)):
files.append(name)
# files.sort()
model = os.path.join(os.path.abspath(path), files[-1])
logging.info(f"load model: {model}")
return model
def build_trainer(args, cfg, model, **kwargs):
start_epoch = 0
optim = torch.optim.Adam if cfg['train']['optimizer']['method'] == 'Adam' else torch.optim.SGD
optimizer = optim(model.parameters(), cfg['train']['optimizer']['learning_rate'],
weight_decay=cfg['train']['optimizer']['weight_decay'])
if cfg['train']['use_pretrain_model']:
checkpoint = torch.load(GetBestModel(os.path.join(cfg['train']['out_dir'], "checkpoints")))
start_epoch = checkpoint.get("epoch", 0)
if cfg['train']['use_multi_gpu']:
model.module.load_state_dict(checkpoint.get("model_state_dict"))
else:
model.load_state_dict(checkpoint.get("model_state_dict"))
optimizer.load_state_dict(checkpoint.get("optimizer_state_dict"))
logging.info(f"Continue from epoch {start_epoch}")
return train.trainer(args, cfg, model=model, optimizer=optimizer, start_epoch=start_epoch)
def build_tester(args, cfg, model, **kwargs):
device = torch.device(
"cuda:0" if torch.cuda.is_available() else "cpu"
)
checkpoint = torch.load(GetBestModel(os.path.join(cfg['train']['out_dir'], "checkpoints")), map_location=device)
if cfg['train']['use_multi_gpu']:
model.module.load_state_dict(checkpoint.get("model_state_dict"))
else:
model.load_state_dict(checkpoint.get("model_state_dict"))
model.eval()
tester = test.tester(args, cfg, model)
return tester | config/configer.py | import yaml
import os
import re
import train, test
from model import model_lstm
import logging
import torch
import torch.distributed as dist
import torch.nn as nn
# General config
def load_config(path):
''' Loads config file.
Args:
path (str): path to config file
default_path (bool): whether to use default path
'''
# Load configuration from file itself
with open(path, 'r') as f:
cfg_special = yaml.load(f, Loader=yaml.Loader)
return cfg_special
def update_recursive(dict1, dict2):
''' Update two config dictionaries recursively.
Args:
dict1 (dict): first dictionary to be updated
dict2 (dict): second dictionary which entries should be used
'''
for k, v in dict2.items():
if k not in dict1:
dict1[k] = dict()
if isinstance(v, dict):
update_recursive(dict1[k], v)
else:
dict1[k] = v
def build_model(args, cfg):
device = torch.device(
f"cuda:{args.local_rank}" if torch.cuda.is_available() else "cpu"
)
with open(cfg['model']['model_yaml'], 'r') as f:
cfg_special = yaml.load(f, Loader=yaml.Loader)
update_recursive(cfg, cfg_special)
model_name = cfg['model']['model_name']
if model_name == 'resnet_lstm':
model = model_lstm.ResNetLSTMSeqNet(cfg)
## multi gpu?
if cfg['train']['use_multi_gpu']:
logging.info(f"torch.cuda.device_count() {torch.cuda.device_count()}")
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
dist.barrier()
# SyncBN
model = nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device).cuda(args.local_rank)
network = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
broadcast_buffers=False, find_unused_parameters=True)
total_params = network.module.get_num_params()
else:
network = model.to(device)
total_params = network.get_num_params()
logging.info(f'Network "{model_name}" loaded to device {device}, device num {torch.cuda.device_count()}')
logging.info(f"Total number of parameters: {total_params}")
return network
def tryint(s):
try:
return int(s)
except ValueError:
return s
def str2int(v_str):
return [tryint(sub_str) for sub_str in re.split('([0-9]+)', v_str)]
def GetBestModel(path):
names = sorted(os.listdir(path+"/"), key=str2int)
files=[]
for name in names:
if os.path.isfile(os.path.join(os.path.abspath(path), name)):
files.append(name)
# files.sort()
model = os.path.join(os.path.abspath(path), files[-1])
logging.info(f"load model: {model}")
return model
def build_trainer(args, cfg, model, **kwargs):
start_epoch = 0
optim = torch.optim.Adam if cfg['train']['optimizer']['method'] == 'Adam' else torch.optim.SGD
optimizer = optim(model.parameters(), cfg['train']['optimizer']['learning_rate'],
weight_decay=cfg['train']['optimizer']['weight_decay'])
if cfg['train']['use_pretrain_model']:
checkpoint = torch.load(GetBestModel(os.path.join(cfg['train']['out_dir'], "checkpoints")))
start_epoch = checkpoint.get("epoch", 0)
if cfg['train']['use_multi_gpu']:
model.module.load_state_dict(checkpoint.get("model_state_dict"))
else:
model.load_state_dict(checkpoint.get("model_state_dict"))
optimizer.load_state_dict(checkpoint.get("optimizer_state_dict"))
logging.info(f"Continue from epoch {start_epoch}")
return train.trainer(args, cfg, model=model, optimizer=optimizer, start_epoch=start_epoch)
def build_tester(args, cfg, model, **kwargs):
device = torch.device(
"cuda:0" if torch.cuda.is_available() else "cpu"
)
checkpoint = torch.load(GetBestModel(os.path.join(cfg['train']['out_dir'], "checkpoints")), map_location=device)
if cfg['train']['use_multi_gpu']:
model.module.load_state_dict(checkpoint.get("model_state_dict"))
else:
model.load_state_dict(checkpoint.get("model_state_dict"))
model.eval()
tester = test.tester(args, cfg, model)
return tester | 0.661814 | 0.172276 |
import h5py
import numpy as np
class Dataset(object):
def __init__(self, dataset_path, view='pca', view_dims=(0,), noise=0):
"""
Create a new dataset handler for Lorenz data.
:param dataset_path: The path to the HDF5 dataset.
:param view: Which view to use, 'pca' or 'original'. 'pca' contains the PCA projected view on the data, where
the order of the basis vectors where ordered by the eigenvalues (so the first dimension correspond
to the leading eigenvalue.
:param view_dims: The dimensions to view the data in, by default only the leading dimension is used, resulting
in a 1D principal view of the data. The tuple (0,1,2) would instead give the full data.
:param noise: The amount of noise to add to the observation, this corresponds to the standard deviation of the
Gaussian noise added to each sample.
"""
self.view = view
self.view_dims = view_dims
self.noise = noise
self.dataset_path = dataset_path
self.store = h5py.File(dataset_path)
self.group_names = list(self.store.keys())
self.datasets = []
for group_name, group in sorted(self.store.items()):
for dataset_group_name, dataset_group in sorted(group.items()):
group_datasets = [dataset for name, dataset in dataset_group.items() if view in name]
if not group_datasets:
print("No datasets matching the view {} in group {}/{}".format(view, group_name, dataset_group_name))
self.datasets.extend(group_datasets)
self.dataset_shapes = [dataset.shape for dataset in self.datasets]
def get_n_dims(self):
return len(self.view_dims)
def random_iterator(self, batch_size, sequence_length, n_batches=None, rng=None):
if rng is None:
rng = np.random.RandomState()
# Determine how many batches there are per sequence per dataset
batch_indices = []
for i, shape in enumerate(self.dataset_shapes):
n, l, c = shape # Number of sequences, length of sequences and number of channels
for j in range(n):
for k in range(l//sequence_length):
batch_indices.append((i, j, k))
rng.shuffle(batch_indices)
if n_batches is None:
n_batches = len(batch_indices) // batch_size
else:
n_batches = min(len(batch_indices) // batch_size, n_batches)
datasets = self.datasets
for batch_i in range(n_batches):
start = batch_i*batch_size
end = start + batch_size
indices = batch_indices[start: end]
data = np.array([datasets[i][j, k*sequence_length: (k+1)*sequence_length][:,self.view_dims] for i,j,k in indices])
if self.noise > 0:
data += rng.normal(scale=self.noise, size=data.shape)
yield data | lorenz/dataset.py | import h5py
import numpy as np
class Dataset(object):
def __init__(self, dataset_path, view='pca', view_dims=(0,), noise=0):
"""
Create a new dataset handler for Lorenz data.
:param dataset_path: The path to the HDF5 dataset.
:param view: Which view to use, 'pca' or 'original'. 'pca' contains the PCA projected view on the data, where
the order of the basis vectors where ordered by the eigenvalues (so the first dimension correspond
to the leading eigenvalue.
:param view_dims: The dimensions to view the data in, by default only the leading dimension is used, resulting
in a 1D principal view of the data. The tuple (0,1,2) would instead give the full data.
:param noise: The amount of noise to add to the observation, this corresponds to the standard deviation of the
Gaussian noise added to each sample.
"""
self.view = view
self.view_dims = view_dims
self.noise = noise
self.dataset_path = dataset_path
self.store = h5py.File(dataset_path)
self.group_names = list(self.store.keys())
self.datasets = []
for group_name, group in sorted(self.store.items()):
for dataset_group_name, dataset_group in sorted(group.items()):
group_datasets = [dataset for name, dataset in dataset_group.items() if view in name]
if not group_datasets:
print("No datasets matching the view {} in group {}/{}".format(view, group_name, dataset_group_name))
self.datasets.extend(group_datasets)
self.dataset_shapes = [dataset.shape for dataset in self.datasets]
def get_n_dims(self):
return len(self.view_dims)
def random_iterator(self, batch_size, sequence_length, n_batches=None, rng=None):
if rng is None:
rng = np.random.RandomState()
# Determine how many batches there are per sequence per dataset
batch_indices = []
for i, shape in enumerate(self.dataset_shapes):
n, l, c = shape # Number of sequences, length of sequences and number of channels
for j in range(n):
for k in range(l//sequence_length):
batch_indices.append((i, j, k))
rng.shuffle(batch_indices)
if n_batches is None:
n_batches = len(batch_indices) // batch_size
else:
n_batches = min(len(batch_indices) // batch_size, n_batches)
datasets = self.datasets
for batch_i in range(n_batches):
start = batch_i*batch_size
end = start + batch_size
indices = batch_indices[start: end]
data = np.array([datasets[i][j, k*sequence_length: (k+1)*sequence_length][:,self.view_dims] for i,j,k in indices])
if self.noise > 0:
data += rng.normal(scale=self.noise, size=data.shape)
yield data | 0.764892 | 0.707733 |
from collections import OrderedDict
import multiprocessing
import os
from typing import Tuple
from numba import jitclass, float64
import pandas as pd
from .util import compile_model, hash_string, DATA_DIR, CACHE_DIR, gev_cdf, random_gev
from .param import FloodPriorParameters
@jitclass(
OrderedDict(
loc_base=float64,
loc_trend=float64,
coeff_var=float64,
shape=float64,
zero_time=float64,
scale_min=float64,
)
)
class FloodModel:
"""
This is the state of world, or simulation-specific set of parameters
governing
"""
def __init__(
self,
loc_base: float,
loc_trend: float,
coeff_var: float,
shape: float,
zero_time: float,
scale_min: float,
) -> None:
"""
Build the model
"""
self.loc_base = loc_base
self.loc_trend = loc_trend
self.coeff_var = coeff_var
self.shape = shape
self.zero_time = zero_time
self.scale_min = scale_min
def get_gev_param(self, year: float) -> Tuple[float, float, float]:
"""
Get the location, scale, and shape parameter for a particular year
"""
year_adj = year - self.zero_time
loc = self.loc_base + self.loc_trend * year_adj
scale = max(loc * self.coeff_var, self.scale_min)
shape = self.shape
return loc, scale, shape
def simulate_flood(self, year: float) -> float:
"""
Simulate a storm surge for a particular year
"""
loc, scale, shape = self.get_gev_param(year=year)
return random_gev(loc=loc, scale=scale, shape=shape)
def calc_exceedance_prob(self, year: float, height: float) -> float:
"""
Calculate the probability that the flood in `year` is greater than `height`
"""
loc, scale, shape = self.get_gev_param(year=year)
return 1.0 - gev_cdf(x=height, loc=loc, scale=scale, shape=shape)
class FloodSimulationLibrary:
"""
Stores a large number of simulations of the flood parameters
"""
def __init__(self, param: FloodPriorParameters, **kwargs) -> None:
"""
Build a library of simulation parameters
"""
self.param = param
simulation_fname = self.get_filename()
try:
self.simulations = pd.read_feather(simulation_fname)
except IOError:
self.simulations = self.create_simulation_library(**kwargs)
self.simulations.to_feather(simulation_fname)
def get_filename(self) -> str:
"""
Get a unique filename for the library where the resulting library
can be stored
"""
simulation_hash = hash_string(OrderedDict(self.param._asdict()).__str__())
return os.path.join(CACHE_DIR, f"{simulation_hash}.feather")
def create_simulation_library(
self,
n_sim: int = 100_000,
n_chains: int = multiprocessing.cpu_count(),
n_warmup: int = 2500,
) -> pd.DataFrame:
"""
Run the quadratic model to create many simulations of
the SeaLevelModel parameters
"""
sea_level = pd.read_csv(
os.path.join(DATA_DIR, "sea-level-annual.csv"), index_col="year"
)
stan_fname = os.path.join(DATA_DIR, "gev-trend.stan")
model = compile_model(filename=stan_fname, model_name="stormsurge")
stan_data = dict(
N=sea_level.shape[0],
y=sea_level["storm_surge"].values,
time=sea_level.index.values,
# PRIORS
time_offset=self.param.zero_time,
loc_base_expected=self.param.loc_base_expected,
loc_base_std=self.param.loc_base_std,
loc_trend_expected=self.param.loc_trend_expected,
loc_trend_std=self.param.loc_trend_std,
coeff_var_expected=self.param.coeff_var_expected,
coeff_var_std=self.param.coeff_var_std,
sigma_min=self.param.sigma_min,
)
# calculate the number of iterations needed
n_iter = n_sim / n_chains + n_warmup
fitted = model.sampling(
data=stan_data, chains=int(n_chains), iter=int(n_iter), warmup=int(n_warmup)
)
return pd.DataFrame(
fitted.extract(["loc_base", "loc_trend", "coeff_var", "shape"])
)
def build_model(self) -> FloodModel:
"""
Get a model with randomly sampled state of the world
"""
row = self.simulations.sample(1, axis=0)
return FloodModel(
zero_time=self.param.zero_time,
loc_base=row["loc_base"].values[0],
loc_trend=row["loc_trend"].values[0],
coeff_var=row["coeff_var"].values[0],
shape=row["shape"].values[0],
scale_min=self.param.sigma_min,
) | leveesim/flood.py | from collections import OrderedDict
import multiprocessing
import os
from typing import Tuple
from numba import jitclass, float64
import pandas as pd
from .util import compile_model, hash_string, DATA_DIR, CACHE_DIR, gev_cdf, random_gev
from .param import FloodPriorParameters
@jitclass(
OrderedDict(
loc_base=float64,
loc_trend=float64,
coeff_var=float64,
shape=float64,
zero_time=float64,
scale_min=float64,
)
)
class FloodModel:
"""
This is the state of world, or simulation-specific set of parameters
governing
"""
def __init__(
self,
loc_base: float,
loc_trend: float,
coeff_var: float,
shape: float,
zero_time: float,
scale_min: float,
) -> None:
"""
Build the model
"""
self.loc_base = loc_base
self.loc_trend = loc_trend
self.coeff_var = coeff_var
self.shape = shape
self.zero_time = zero_time
self.scale_min = scale_min
def get_gev_param(self, year: float) -> Tuple[float, float, float]:
"""
Get the location, scale, and shape parameter for a particular year
"""
year_adj = year - self.zero_time
loc = self.loc_base + self.loc_trend * year_adj
scale = max(loc * self.coeff_var, self.scale_min)
shape = self.shape
return loc, scale, shape
def simulate_flood(self, year: float) -> float:
"""
Simulate a storm surge for a particular year
"""
loc, scale, shape = self.get_gev_param(year=year)
return random_gev(loc=loc, scale=scale, shape=shape)
def calc_exceedance_prob(self, year: float, height: float) -> float:
"""
Calculate the probability that the flood in `year` is greater than `height`
"""
loc, scale, shape = self.get_gev_param(year=year)
return 1.0 - gev_cdf(x=height, loc=loc, scale=scale, shape=shape)
class FloodSimulationLibrary:
"""
Stores a large number of simulations of the flood parameters
"""
def __init__(self, param: FloodPriorParameters, **kwargs) -> None:
"""
Build a library of simulation parameters
"""
self.param = param
simulation_fname = self.get_filename()
try:
self.simulations = pd.read_feather(simulation_fname)
except IOError:
self.simulations = self.create_simulation_library(**kwargs)
self.simulations.to_feather(simulation_fname)
def get_filename(self) -> str:
"""
Get a unique filename for the library where the resulting library
can be stored
"""
simulation_hash = hash_string(OrderedDict(self.param._asdict()).__str__())
return os.path.join(CACHE_DIR, f"{simulation_hash}.feather")
def create_simulation_library(
self,
n_sim: int = 100_000,
n_chains: int = multiprocessing.cpu_count(),
n_warmup: int = 2500,
) -> pd.DataFrame:
"""
Run the quadratic model to create many simulations of
the SeaLevelModel parameters
"""
sea_level = pd.read_csv(
os.path.join(DATA_DIR, "sea-level-annual.csv"), index_col="year"
)
stan_fname = os.path.join(DATA_DIR, "gev-trend.stan")
model = compile_model(filename=stan_fname, model_name="stormsurge")
stan_data = dict(
N=sea_level.shape[0],
y=sea_level["storm_surge"].values,
time=sea_level.index.values,
# PRIORS
time_offset=self.param.zero_time,
loc_base_expected=self.param.loc_base_expected,
loc_base_std=self.param.loc_base_std,
loc_trend_expected=self.param.loc_trend_expected,
loc_trend_std=self.param.loc_trend_std,
coeff_var_expected=self.param.coeff_var_expected,
coeff_var_std=self.param.coeff_var_std,
sigma_min=self.param.sigma_min,
)
# calculate the number of iterations needed
n_iter = n_sim / n_chains + n_warmup
fitted = model.sampling(
data=stan_data, chains=int(n_chains), iter=int(n_iter), warmup=int(n_warmup)
)
return pd.DataFrame(
fitted.extract(["loc_base", "loc_trend", "coeff_var", "shape"])
)
def build_model(self) -> FloodModel:
"""
Get a model with randomly sampled state of the world
"""
row = self.simulations.sample(1, axis=0)
return FloodModel(
zero_time=self.param.zero_time,
loc_base=row["loc_base"].values[0],
loc_trend=row["loc_trend"].values[0],
coeff_var=row["coeff_var"].values[0],
shape=row["shape"].values[0],
scale_min=self.param.sigma_min,
) | 0.894508 | 0.36923 |
import os
import argparse
import ujson as json
parser = argparse.ArgumentParser()
parser.add_argument(
"--task",
default=None,
type=str,
required=True,
help="Task name",
)
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="data directory",
)
parser.add_argument(
"--model_type",
default="t5",
type=str,
help="Model type name",
)
parser.add_argument(
"--model",
default="t5-base",
type=str,
help="Model name",
)
parser.add_argument(
"--seed",
default=42,
type=int,
help="Seed",
)
parser.add_argument(
"--min_output_length",
default=5,
type=int,
help="minimum output length during decoding",
)
parser.add_argument(
"--max_turn_length",
default=10,
type=int,
help="maximum turn length",
)
parser.add_argument(
"--max_per_turn_length", default=1, type=int,
help="The maximum length of input per turn.",
)
parser.add_argument(
"--max_input_length",
default=512,
type=int,
help="maximum input length at each step",
)
parser.add_argument(
"--max_output_length", # this is not the same as the max_output_length in t5.py
default=56,
type=int,
help="maximum output length at each step",
)
parser.add_argument(
"--dec_max_output_length", # this is the same as the max_output_length in t5.py
default=200,
type=int,
help="maximum output length during decoding",
)
parser.add_argument(
"--train_batch_size",
default=4,
type=int,
help="Train batch size",
)
parser.add_argument(
"--eval_batch_size",
default=4,
type=int,
help="Eval batch size",
)
parser.add_argument(
"--gradient_accumulation_steps",
default=32,
type=int,
help="Gradient accumulation steps",
)
parser.add_argument(
"--num_train_epochs",
default=70,
type=int,
help="Number of training epochs",
)
parser.add_argument("--learning_rate", default=5e-4, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--memory_type", default="base", type=str, help="The type of model, base or ht5")
parser.add_argument("--scheduler", default="linear", type=str, help="Scheduler name")
parser.add_argument("--optimizer", default="adam", type=str, help="The optimizer to use")
parser.add_argument("--warmup_steps", default=0., type=float, help="Linear warmup over warmup_steps.")
parser.add_argument("--zero_shot", action="store_true", help="Whether to update parameter.")
parser.add_argument("--from_pretrain", action="store_true", help="Load from pretrained model")
parser.add_argument("--pretrained_checkpoint", default=None, type=str, help="Load from pretrained model")
parser.add_argument("--from_scratch", action="store_true", help="Load from pretrained model")
parser.add_argument("--test_only", action="store_true", help="Whether only test")
parser.add_argument("--two_ref", action="store_true", help="Evaluate with two references")
args = parser.parse_args()
data_dir = args.data_dir
output_dir = f"train/{args.model_type}_{args.task}_seed{args.seed}_{args.memory_type}_{args.model}"
os.system(f"python3 t5.py --model_type {args.model_type} --model_name_or_path {args.model} "
f"{f'--from_pretrain --pretrained_checkpoint {args.pretrained_checkpoint}' if args.from_pretrain else ''} "
f"{'--from_scratch' if args.from_scratch else ''} "
f"--task_name {args.task} {'' if args.test_only else '--do_train --do_eval'} --do_test "
f"--data_dir {data_dir} "
f"--data_file {data_dir}/train_{args.max_input_length}_{args.max_output_length}.t5 "
f"--eval_file {data_dir}/train.target --source_file {data_dir}/train.source "
f"--dev_data_file {data_dir}/dev_{args.max_input_length}_{args.max_output_length}.t5 "
f"--dev_eval_file {data_dir}/dev.target --dev_source_file {data_dir}/dev.source "
f"--test_data_file {data_dir}/test_{args.max_input_length}_{args.max_output_length}.t5 "
f"--test_eval_file {data_dir}/test.target --test_source_file {data_dir}/test.source "
f"{f'--test_eval_file1 {data_dir}/test.target1' if args.two_ref else ''} "
f"--cache_dir train/cache --max_input_length {args.max_input_length} --do_lower_case "
f"--min_output_length {args.min_output_length} --max_output_length {args.dec_max_output_length} "
f"--max_turn_length {args.max_turn_length} "
f"--per_gpu_eval_batch_size={args.eval_batch_size} --per_gpu_train_batch_size={args.train_batch_size} "
f"--learning_rate {args.learning_rate} --warmup_steps {args.warmup_steps} "
f"--scheduler {args.scheduler} --optimizer {args.optimizer} "
f"--gradient_accumulation_steps {args.gradient_accumulation_steps} --num_train_epochs {args.num_train_epochs} "
f"--output_dir {output_dir} --memory_type {args.memory_type} "
f"--overwrite_output_dir --evaluate_during_training --seed {args.seed} ") | run.py | import os
import argparse
import ujson as json
parser = argparse.ArgumentParser()
parser.add_argument(
"--task",
default=None,
type=str,
required=True,
help="Task name",
)
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="data directory",
)
parser.add_argument(
"--model_type",
default="t5",
type=str,
help="Model type name",
)
parser.add_argument(
"--model",
default="t5-base",
type=str,
help="Model name",
)
parser.add_argument(
"--seed",
default=42,
type=int,
help="Seed",
)
parser.add_argument(
"--min_output_length",
default=5,
type=int,
help="minimum output length during decoding",
)
parser.add_argument(
"--max_turn_length",
default=10,
type=int,
help="maximum turn length",
)
parser.add_argument(
"--max_per_turn_length", default=1, type=int,
help="The maximum length of input per turn.",
)
parser.add_argument(
"--max_input_length",
default=512,
type=int,
help="maximum input length at each step",
)
parser.add_argument(
"--max_output_length", # this is not the same as the max_output_length in t5.py
default=56,
type=int,
help="maximum output length at each step",
)
parser.add_argument(
"--dec_max_output_length", # this is the same as the max_output_length in t5.py
default=200,
type=int,
help="maximum output length during decoding",
)
parser.add_argument(
"--train_batch_size",
default=4,
type=int,
help="Train batch size",
)
parser.add_argument(
"--eval_batch_size",
default=4,
type=int,
help="Eval batch size",
)
parser.add_argument(
"--gradient_accumulation_steps",
default=32,
type=int,
help="Gradient accumulation steps",
)
parser.add_argument(
"--num_train_epochs",
default=70,
type=int,
help="Number of training epochs",
)
parser.add_argument("--learning_rate", default=5e-4, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--memory_type", default="base", type=str, help="The type of model, base or ht5")
parser.add_argument("--scheduler", default="linear", type=str, help="Scheduler name")
parser.add_argument("--optimizer", default="adam", type=str, help="The optimizer to use")
parser.add_argument("--warmup_steps", default=0., type=float, help="Linear warmup over warmup_steps.")
parser.add_argument("--zero_shot", action="store_true", help="Whether to update parameter.")
parser.add_argument("--from_pretrain", action="store_true", help="Load from pretrained model")
parser.add_argument("--pretrained_checkpoint", default=None, type=str, help="Load from pretrained model")
parser.add_argument("--from_scratch", action="store_true", help="Load from pretrained model")
parser.add_argument("--test_only", action="store_true", help="Whether only test")
parser.add_argument("--two_ref", action="store_true", help="Evaluate with two references")
args = parser.parse_args()
data_dir = args.data_dir
output_dir = f"train/{args.model_type}_{args.task}_seed{args.seed}_{args.memory_type}_{args.model}"
os.system(f"python3 t5.py --model_type {args.model_type} --model_name_or_path {args.model} "
f"{f'--from_pretrain --pretrained_checkpoint {args.pretrained_checkpoint}' if args.from_pretrain else ''} "
f"{'--from_scratch' if args.from_scratch else ''} "
f"--task_name {args.task} {'' if args.test_only else '--do_train --do_eval'} --do_test "
f"--data_dir {data_dir} "
f"--data_file {data_dir}/train_{args.max_input_length}_{args.max_output_length}.t5 "
f"--eval_file {data_dir}/train.target --source_file {data_dir}/train.source "
f"--dev_data_file {data_dir}/dev_{args.max_input_length}_{args.max_output_length}.t5 "
f"--dev_eval_file {data_dir}/dev.target --dev_source_file {data_dir}/dev.source "
f"--test_data_file {data_dir}/test_{args.max_input_length}_{args.max_output_length}.t5 "
f"--test_eval_file {data_dir}/test.target --test_source_file {data_dir}/test.source "
f"{f'--test_eval_file1 {data_dir}/test.target1' if args.two_ref else ''} "
f"--cache_dir train/cache --max_input_length {args.max_input_length} --do_lower_case "
f"--min_output_length {args.min_output_length} --max_output_length {args.dec_max_output_length} "
f"--max_turn_length {args.max_turn_length} "
f"--per_gpu_eval_batch_size={args.eval_batch_size} --per_gpu_train_batch_size={args.train_batch_size} "
f"--learning_rate {args.learning_rate} --warmup_steps {args.warmup_steps} "
f"--scheduler {args.scheduler} --optimizer {args.optimizer} "
f"--gradient_accumulation_steps {args.gradient_accumulation_steps} --num_train_epochs {args.num_train_epochs} "
f"--output_dir {output_dir} --memory_type {args.memory_type} "
f"--overwrite_output_dir --evaluate_during_training --seed {args.seed} ") | 0.592431 | 0.075653 |
import os
import pickle
import tempfile
import unittest
import base64
import sys
import pcapfile.test.fixture as fixture
from pcapfile import savefile
def create_pcap():
"""
Create a capture file from the test fixtures.
"""
tfile = tempfile.NamedTemporaryFile()
if sys.version_info[0] >= 3: # python3
capture = pickle.loads(base64.b64decode(fixture.TESTPCAP3))
else: # python2 unsupported pickle protocol: 3
capture = pickle.loads(fixture.TESTPCAP2.decode('base64'))
with open(tfile.name, 'wb') as f:
f.write(capture)
return tfile
class TestCase(unittest.TestCase):
"""
Test case for the savefile code.
"""
capfile = None
def init_capfile(self, layers=0):
"""Initialise the capture file."""
tfile = create_pcap()
self.capfile = savefile.load_savefile(tfile, layers=layers)
tfile.close()
if os.path.exists(tfile.name):
os.unlink(tfile.name)
@classmethod
def setUpClass(cls):
"""
Print an intro to identify this test suite when running multiple tests.
"""
print('[+] loading basic tests')
def setUp(self):
"""
Set up a default capture file.
"""
# only need to initialise capfile on the first time, while being able
# load it with additional decoding layers.
if not self.capfile:
self.init_capfile()
def test_network_load(self):
"""
Test that the code that loads network layer packets from the
top level works.
"""
self.init_capfile(layers=2)
for packet in self.capfile.packets:
for field in ['src', 'dst', 'v', 'hl', 'tos', 'ttl']:
ipkt = packet.packet.payload
self.assertTrue(hasattr(ipkt, field), 'invalid packet!')
def test_frame_load(self):
"""
Ensure that ethernet frames load from the top level.
"""
self.init_capfile(layers=1)
for packet in self.capfile.packets:
for field in ['src', 'dst', 'type', 'payload']:
self.assertTrue(hasattr(packet.packet, field),
'invalid frame!')
def test_packet_valid(self):
"""
Make sure raw packets load properly.
"""
packet = self.capfile.packets[0].raw()
self.assertEqual(int(bytearray(packet)[14]), 69, 'invalid packet')
for packet in self.capfile.packets:
for field in ['capture_len', 'timestamp', 'timestamp_us',
'packet', 'header', 'packet_len']:
self.assertTrue(hasattr(packet, field), 'invalid packet!')
def test_header_valid(self):
"""
Test to ensure the header validation code works.
"""
header = self.capfile.header
self.assertEqual(header.major, 2, 'invalid major version!')
self.assertEqual(header.minor, 4, 'invalid minor version!')
def test_basic_import(self):
"""
Validate basic parameters of a simple savefile load.
"""
self.assertTrue(self.capfile.valid, 'invalid capture file')
self.assertEqual(len(self.capfile.packets), 23,
'wrong number of packets!')
self.assertEqual(self.capfile.__length__(), 23,
'__length__ not reporting correct number of packets')
def test_lazy_import(self):
"""
Test the lazy packet parsing against the regular implementation.
"""
# Load the savefile again, but create an iterator for the
# packets instead of reading them all into memory at once.
tfile = create_pcap()
capfile_gen = savefile.load_savefile(tfile, lazy=True)
# Create a list of packets using the iterator. This way the
# length can be checked before comparing any content.
packets = list(capfile_gen.packets)
tfile.close()
if os.path.exists(tfile.name):
os.unlink(tfile.name)
self.assertEqual(len(packets), len(self.capfile.packets),
'lazy parsing gives different number of packets!')
# Compare the relevant parts of the packets.
fields = ['timestamp', 'timestamp_us', 'capture_len',
'packet_len', 'packet']
for act, ref in zip(packets, capfile_gen.packets):
for field in fields:
self.assertEqual(getattr(act, field), getattr(ref, field),
'lazy parsing gives different data!') | pcapfile/test/savefile_test.py | import os
import pickle
import tempfile
import unittest
import base64
import sys
import pcapfile.test.fixture as fixture
from pcapfile import savefile
def create_pcap():
"""
Create a capture file from the test fixtures.
"""
tfile = tempfile.NamedTemporaryFile()
if sys.version_info[0] >= 3: # python3
capture = pickle.loads(base64.b64decode(fixture.TESTPCAP3))
else: # python2 unsupported pickle protocol: 3
capture = pickle.loads(fixture.TESTPCAP2.decode('base64'))
with open(tfile.name, 'wb') as f:
f.write(capture)
return tfile
class TestCase(unittest.TestCase):
"""
Test case for the savefile code.
"""
capfile = None
def init_capfile(self, layers=0):
"""Initialise the capture file."""
tfile = create_pcap()
self.capfile = savefile.load_savefile(tfile, layers=layers)
tfile.close()
if os.path.exists(tfile.name):
os.unlink(tfile.name)
@classmethod
def setUpClass(cls):
"""
Print an intro to identify this test suite when running multiple tests.
"""
print('[+] loading basic tests')
def setUp(self):
"""
Set up a default capture file.
"""
# only need to initialise capfile on the first time, while being able
# load it with additional decoding layers.
if not self.capfile:
self.init_capfile()
def test_network_load(self):
"""
Test that the code that loads network layer packets from the
top level works.
"""
self.init_capfile(layers=2)
for packet in self.capfile.packets:
for field in ['src', 'dst', 'v', 'hl', 'tos', 'ttl']:
ipkt = packet.packet.payload
self.assertTrue(hasattr(ipkt, field), 'invalid packet!')
def test_frame_load(self):
"""
Ensure that ethernet frames load from the top level.
"""
self.init_capfile(layers=1)
for packet in self.capfile.packets:
for field in ['src', 'dst', 'type', 'payload']:
self.assertTrue(hasattr(packet.packet, field),
'invalid frame!')
def test_packet_valid(self):
"""
Make sure raw packets load properly.
"""
packet = self.capfile.packets[0].raw()
self.assertEqual(int(bytearray(packet)[14]), 69, 'invalid packet')
for packet in self.capfile.packets:
for field in ['capture_len', 'timestamp', 'timestamp_us',
'packet', 'header', 'packet_len']:
self.assertTrue(hasattr(packet, field), 'invalid packet!')
def test_header_valid(self):
"""
Test to ensure the header validation code works.
"""
header = self.capfile.header
self.assertEqual(header.major, 2, 'invalid major version!')
self.assertEqual(header.minor, 4, 'invalid minor version!')
def test_basic_import(self):
"""
Validate basic parameters of a simple savefile load.
"""
self.assertTrue(self.capfile.valid, 'invalid capture file')
self.assertEqual(len(self.capfile.packets), 23,
'wrong number of packets!')
self.assertEqual(self.capfile.__length__(), 23,
'__length__ not reporting correct number of packets')
def test_lazy_import(self):
"""
Test the lazy packet parsing against the regular implementation.
"""
# Load the savefile again, but create an iterator for the
# packets instead of reading them all into memory at once.
tfile = create_pcap()
capfile_gen = savefile.load_savefile(tfile, lazy=True)
# Create a list of packets using the iterator. This way the
# length can be checked before comparing any content.
packets = list(capfile_gen.packets)
tfile.close()
if os.path.exists(tfile.name):
os.unlink(tfile.name)
self.assertEqual(len(packets), len(self.capfile.packets),
'lazy parsing gives different number of packets!')
# Compare the relevant parts of the packets.
fields = ['timestamp', 'timestamp_us', 'capture_len',
'packet_len', 'packet']
for act, ref in zip(packets, capfile_gen.packets):
for field in fields:
self.assertEqual(getattr(act, field), getattr(ref, field),
'lazy parsing gives different data!') | 0.581184 | 0.383295 |
import torch
import torch.nn.functional as F
import torch.utils.data
import torchvision.datasets
import time
batch_size = 64
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('Training MNIST Model on', device)
print("=" * 60)
train_dataset = torchvision.datasets.MNIST(root='../data',
train=True,
transform=torchvision.transforms.ToTensor(),
download=True)
test_dataset = torchvision.datasets.MNIST(root='../data',
train=False,
transform=torchvision.transforms.ToTensor(),
download=True)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True)
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
# 컨볼루션 layer 수 늘리기, FC layer 수 늘리기
self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=3)
self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=3)
self.conv3 = torch.nn.Conv2d(20, 30, kernel_size=3)
self.maxpool = torch.nn.MaxPool2d(2)
self.fc1 = torch.nn.Linear(30, 40)
self.fc2 = torch.nn.Linear(40, 20)
self.fc3 = torch.nn.Linear(20, 10)
def forward(self, x):
in_size = x.size(0)
x = self.conv1(x)
x = self.maxpool(x)
x = F.relu(x)
x = self.conv2(x)
x = self.maxpool(x)
x = F.relu(x)
x = self.conv3(x)
x = self.maxpool(x)
x = F.relu(x)
x = x.view(in_size, -1)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return F.log_softmax(x, dim=1)
model = Net()
model.to(device)
criterion = torch.nn.NLLLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
def train(epoch):
model.train()
for batch_idx, (x_data, y_data) in enumerate(train_loader):
x_data, y_data = x_data.to(device), y_data.to(device)
y_pred = model(x_data)
loss = criterion(y_pred, y_data)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_idx % 10 == 0:
print('Train Epoch: {} | Batch: {}/{} ({:.0f}%) | Loss: {:.6f}'.format(
epoch, batch_idx * len(x_data), len(train_loader.dataset),
100.0 * batch_idx / len(train_loader), loss.item()))
def test():
model.eval()
loss = 0
correct = 0
for x_data, y_data in test_loader:
x_data, y_data = x_data.to(device), y_data.to(device)
y_pred = model(x_data)
loss += criterion(y_pred, y_data).item()
pred = torch.max(y_pred, 1)[1]
correct += pred.eq(y_data.view_as(pred)).cpu().sum()
loss /= len(test_loader.dataset)
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.1f}%)'.format(
loss, correct, len(test_loader.dataset), 100.0 * correct / len(test_loader.dataset)))
if __name__ == '__main__':
since = time.time()
for epoch in range(1, 10):
epoch_start = time.time()
train(epoch)
min, sec = divmod(time.time() - epoch_start, 60)
print('Training time: {:.0f}m {:.0f}s'.format(min, sec))
test()
min, sec = divmod(time.time() - epoch_start, 60)
print('Testing time: {:.0f}m {:.0f}s'.format(min, sec))
print("=" * 60)
min, sec = divmod(time.time() - since, 60)
print('Total time: {:.0f}m {:.0f}s'.format(min, sec)) | PyTorch Zero To All S1/10-2Basic CNN Exercise.py | import torch
import torch.nn.functional as F
import torch.utils.data
import torchvision.datasets
import time
batch_size = 64
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('Training MNIST Model on', device)
print("=" * 60)
train_dataset = torchvision.datasets.MNIST(root='../data',
train=True,
transform=torchvision.transforms.ToTensor(),
download=True)
test_dataset = torchvision.datasets.MNIST(root='../data',
train=False,
transform=torchvision.transforms.ToTensor(),
download=True)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True)
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
# 컨볼루션 layer 수 늘리기, FC layer 수 늘리기
self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=3)
self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=3)
self.conv3 = torch.nn.Conv2d(20, 30, kernel_size=3)
self.maxpool = torch.nn.MaxPool2d(2)
self.fc1 = torch.nn.Linear(30, 40)
self.fc2 = torch.nn.Linear(40, 20)
self.fc3 = torch.nn.Linear(20, 10)
def forward(self, x):
in_size = x.size(0)
x = self.conv1(x)
x = self.maxpool(x)
x = F.relu(x)
x = self.conv2(x)
x = self.maxpool(x)
x = F.relu(x)
x = self.conv3(x)
x = self.maxpool(x)
x = F.relu(x)
x = x.view(in_size, -1)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return F.log_softmax(x, dim=1)
model = Net()
model.to(device)
criterion = torch.nn.NLLLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
def train(epoch):
model.train()
for batch_idx, (x_data, y_data) in enumerate(train_loader):
x_data, y_data = x_data.to(device), y_data.to(device)
y_pred = model(x_data)
loss = criterion(y_pred, y_data)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_idx % 10 == 0:
print('Train Epoch: {} | Batch: {}/{} ({:.0f}%) | Loss: {:.6f}'.format(
epoch, batch_idx * len(x_data), len(train_loader.dataset),
100.0 * batch_idx / len(train_loader), loss.item()))
def test():
model.eval()
loss = 0
correct = 0
for x_data, y_data in test_loader:
x_data, y_data = x_data.to(device), y_data.to(device)
y_pred = model(x_data)
loss += criterion(y_pred, y_data).item()
pred = torch.max(y_pred, 1)[1]
correct += pred.eq(y_data.view_as(pred)).cpu().sum()
loss /= len(test_loader.dataset)
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.1f}%)'.format(
loss, correct, len(test_loader.dataset), 100.0 * correct / len(test_loader.dataset)))
if __name__ == '__main__':
since = time.time()
for epoch in range(1, 10):
epoch_start = time.time()
train(epoch)
min, sec = divmod(time.time() - epoch_start, 60)
print('Training time: {:.0f}m {:.0f}s'.format(min, sec))
test()
min, sec = divmod(time.time() - epoch_start, 60)
print('Testing time: {:.0f}m {:.0f}s'.format(min, sec))
print("=" * 60)
min, sec = divmod(time.time() - since, 60)
print('Total time: {:.0f}m {:.0f}s'.format(min, sec)) | 0.906423 | 0.680215 |
"""Tests for modeling_strategy_descriptor."""
from absl.testing import absltest
from typing import Dict
from typing import Type
import numpy as np
from wfa_planning_evaluation_framework.models.goerg_model import (
GoergModel,
)
from wfa_planning_evaluation_framework.models.reach_curve import (
ReachCurve,
)
from wfa_planning_evaluation_framework.models.reach_surface import (
ReachSurface,
)
from wfa_planning_evaluation_framework.models.pairwise_union_reach_surface import (
PairwiseUnionReachSurface,
)
from wfa_planning_evaluation_framework.simulator.modeling_strategy import (
ModelingStrategy,
)
from wfa_planning_evaluation_framework.driver.modeling_strategy_descriptor import (
MODELING_STRATEGIES,
ModelingStrategyDescriptor,
)
class FakeModelingStrategy(ModelingStrategy):
def __init__(
self,
single_pub_model: Type[ReachCurve],
single_pub_model_kwargs: Dict,
multi_pub_model: Type[ReachSurface],
multi_pub_model_kwargs: Dict,
x: int,
):
self.name = "fake"
self.x = 1
super().__init__(
single_pub_model,
single_pub_model_kwargs,
multi_pub_model,
multi_pub_model_kwargs,
)
class ModelingStrategyDescriptorTest(absltest.TestCase):
def test_modeling_strategy_descriptor(self):
MODELING_STRATEGIES["fake"] = FakeModelingStrategy
desc = ModelingStrategyDescriptor(
"fake", {"x": 1}, "goerg", {}, "pairwise_union", {}
)
strategy = desc.instantiate_strategy()
self.assertEqual(strategy.name, "fake")
self.assertEqual(strategy.x, 1)
self.assertEqual(strategy._single_pub_model, GoergModel)
self.assertEqual(strategy._single_pub_model_kwargs, {})
self.assertEqual(strategy._multi_pub_model, PairwiseUnionReachSurface)
self.assertEqual(strategy._multi_pub_model_kwargs, {})
self.assertEqual(str(desc), "fake(x=1),goerg,pairwise_union")
if __name__ == "__main__":
absltest.main() | src/driver/tests/modeling_strategy_descriptor_test.py | """Tests for modeling_strategy_descriptor."""
from absl.testing import absltest
from typing import Dict
from typing import Type
import numpy as np
from wfa_planning_evaluation_framework.models.goerg_model import (
GoergModel,
)
from wfa_planning_evaluation_framework.models.reach_curve import (
ReachCurve,
)
from wfa_planning_evaluation_framework.models.reach_surface import (
ReachSurface,
)
from wfa_planning_evaluation_framework.models.pairwise_union_reach_surface import (
PairwiseUnionReachSurface,
)
from wfa_planning_evaluation_framework.simulator.modeling_strategy import (
ModelingStrategy,
)
from wfa_planning_evaluation_framework.driver.modeling_strategy_descriptor import (
MODELING_STRATEGIES,
ModelingStrategyDescriptor,
)
class FakeModelingStrategy(ModelingStrategy):
def __init__(
self,
single_pub_model: Type[ReachCurve],
single_pub_model_kwargs: Dict,
multi_pub_model: Type[ReachSurface],
multi_pub_model_kwargs: Dict,
x: int,
):
self.name = "fake"
self.x = 1
super().__init__(
single_pub_model,
single_pub_model_kwargs,
multi_pub_model,
multi_pub_model_kwargs,
)
class ModelingStrategyDescriptorTest(absltest.TestCase):
def test_modeling_strategy_descriptor(self):
MODELING_STRATEGIES["fake"] = FakeModelingStrategy
desc = ModelingStrategyDescriptor(
"fake", {"x": 1}, "goerg", {}, "pairwise_union", {}
)
strategy = desc.instantiate_strategy()
self.assertEqual(strategy.name, "fake")
self.assertEqual(strategy.x, 1)
self.assertEqual(strategy._single_pub_model, GoergModel)
self.assertEqual(strategy._single_pub_model_kwargs, {})
self.assertEqual(strategy._multi_pub_model, PairwiseUnionReachSurface)
self.assertEqual(strategy._multi_pub_model_kwargs, {})
self.assertEqual(str(desc), "fake(x=1),goerg,pairwise_union")
if __name__ == "__main__":
absltest.main() | 0.900169 | 0.316422 |
"""The abstract robot class."""
import abc
from typing import Optional, Sequence
# Action names for robots operating kinematically.
LINEAR_VELOCITY = "linear_velocity"
ANGULAR_VELOCITY = "angular_velocity"
class RobotBase(metaclass=abc.ABCMeta):
"""The base class for all robots used in the mobility team."""
@abc.abstractmethod
def reset(
self,
base_position: Optional[Sequence[float]] = None,
base_orientation_quaternion: Optional[Sequence[float]] = None) -> None:
"""Resets the states (e.g. pose and sensor readings) of the robot.
This is called at the start of each episode by the environment.
Args:
base_position: Robot base position after reset. If None, robot stay where
it was after reset. For robot that does not support reset with position
change, a ValueError should be raised.
base_orientation_quaternion: Robot base orientation after reset. If None,
robot stays in pre-reset orientation. For robot that does not support
reset with orientation change, a ValueError should be raised.
"""
pass
@abc.abstractmethod
def terminate(self):
"""Shuts down the robot."""
pass
@abc.abstractmethod
def pre_control_step(self, action):
"""Processes the input action before the action repeat loop.
We assume that an action sent to the real robot is sticky, i.e. it will be
executed until a new action is received after some time. To simulate this,
we introduced the action_repeat parameter, to reflect how many time steps it
takes for the policy to generate a new action. That is, for each control
step, the simulation contains an inner loop:
robot.pre_control_step(action) # Smooth or interpolate the action
for i in range(action_repeat):
robot.apply_action(action)
bullet.stepSimulation(time_step) # Step the sim for one time step
robot.receive_observation() # Update the sensor observations
robot.post_control_step() # Update some internal variables.
Args:
action: Data type depends on the robot. Can be desired motor
position/torques for legged robots, or desired velocity/angular velocity
for wheeled robots.
"""
pass
@abc.abstractmethod
def apply_action(self, action):
"""Applies the action to the robot."""
pass
@abc.abstractmethod
def receive_observation(self):
"""Updates the robot sensor readings."""
pass
@abc.abstractmethod
def post_control_step(self):
"""Updates some internal variables such as step counters."""
pass
@property
def action_space(self):
"""The action spec of the robot."""
raise NotImplementedError("action_space is not implemented")
@property
@abc.abstractmethod
def action_names(self):
"""Name of each action in the action_space.
This is a structure of strings with the same shape as the action space,
where each string describes the corresponding element of the action space
(for example, a kinematic robot might return ("linear_velocity",
"angular_velocity")). Used for logging in the safety layer.
"""
@property
def sensors(self):
"""Returns the sensors on this robot.
Sensors are the main interface between the robot class and the gym
environment. Sensors can return what the robot can measure (e.g.
joint angles, IMU readings), and can represent more general quantities, i.e.
the last action taken, that can be part of the observation space.
Sensor classes are used by the robot class to the specify its observation
space.
"""
raise NotImplementedError("sensors property not implemented")
@property
def base_orientation_quaternion(self):
"""Returns the base pose as a quaternion in format (x, y, z, w).
These properties differ from the sensor interfaces, as they represent
the built-in measurable quantities. We assume most robots have an IMU at
its base to measure the base pose. Actually, some sensor classes like the
base pose sensor and joint angle sensor will call these built-in methods. In
general, how these quantities can be extracted depends on the specific real
robots.
"""
raise NotImplementedError("base_orientation_quaternion is not implemented")
@property
def base_roll_pitch_yaw(self):
"""Returns the base roll, pitch, and yaw angles."""
raise NotImplementedError("base_roll_pitch_yaw is not implemented")
@property
def base_roll_pitch_yaw_rate(self):
raise NotImplementedError("base_roll_pitch_yaw_rate is not implemented")
@property
def base_position(self):
raise NotImplementedError("base_position is not implemented") | examples/pybullet/gym/pybullet_envs/minitaur/robots/robot_base.py | """The abstract robot class."""
import abc
from typing import Optional, Sequence
# Action names for robots operating kinematically.
LINEAR_VELOCITY = "linear_velocity"
ANGULAR_VELOCITY = "angular_velocity"
class RobotBase(metaclass=abc.ABCMeta):
"""The base class for all robots used in the mobility team."""
@abc.abstractmethod
def reset(
self,
base_position: Optional[Sequence[float]] = None,
base_orientation_quaternion: Optional[Sequence[float]] = None) -> None:
"""Resets the states (e.g. pose and sensor readings) of the robot.
This is called at the start of each episode by the environment.
Args:
base_position: Robot base position after reset. If None, robot stay where
it was after reset. For robot that does not support reset with position
change, a ValueError should be raised.
base_orientation_quaternion: Robot base orientation after reset. If None,
robot stays in pre-reset orientation. For robot that does not support
reset with orientation change, a ValueError should be raised.
"""
pass
@abc.abstractmethod
def terminate(self):
"""Shuts down the robot."""
pass
@abc.abstractmethod
def pre_control_step(self, action):
"""Processes the input action before the action repeat loop.
We assume that an action sent to the real robot is sticky, i.e. it will be
executed until a new action is received after some time. To simulate this,
we introduced the action_repeat parameter, to reflect how many time steps it
takes for the policy to generate a new action. That is, for each control
step, the simulation contains an inner loop:
robot.pre_control_step(action) # Smooth or interpolate the action
for i in range(action_repeat):
robot.apply_action(action)
bullet.stepSimulation(time_step) # Step the sim for one time step
robot.receive_observation() # Update the sensor observations
robot.post_control_step() # Update some internal variables.
Args:
action: Data type depends on the robot. Can be desired motor
position/torques for legged robots, or desired velocity/angular velocity
for wheeled robots.
"""
pass
@abc.abstractmethod
def apply_action(self, action):
"""Applies the action to the robot."""
pass
@abc.abstractmethod
def receive_observation(self):
"""Updates the robot sensor readings."""
pass
@abc.abstractmethod
def post_control_step(self):
"""Updates some internal variables such as step counters."""
pass
@property
def action_space(self):
"""The action spec of the robot."""
raise NotImplementedError("action_space is not implemented")
@property
@abc.abstractmethod
def action_names(self):
"""Name of each action in the action_space.
This is a structure of strings with the same shape as the action space,
where each string describes the corresponding element of the action space
(for example, a kinematic robot might return ("linear_velocity",
"angular_velocity")). Used for logging in the safety layer.
"""
@property
def sensors(self):
"""Returns the sensors on this robot.
Sensors are the main interface between the robot class and the gym
environment. Sensors can return what the robot can measure (e.g.
joint angles, IMU readings), and can represent more general quantities, i.e.
the last action taken, that can be part of the observation space.
Sensor classes are used by the robot class to the specify its observation
space.
"""
raise NotImplementedError("sensors property not implemented")
@property
def base_orientation_quaternion(self):
"""Returns the base pose as a quaternion in format (x, y, z, w).
These properties differ from the sensor interfaces, as they represent
the built-in measurable quantities. We assume most robots have an IMU at
its base to measure the base pose. Actually, some sensor classes like the
base pose sensor and joint angle sensor will call these built-in methods. In
general, how these quantities can be extracted depends on the specific real
robots.
"""
raise NotImplementedError("base_orientation_quaternion is not implemented")
@property
def base_roll_pitch_yaw(self):
"""Returns the base roll, pitch, and yaw angles."""
raise NotImplementedError("base_roll_pitch_yaw is not implemented")
@property
def base_roll_pitch_yaw_rate(self):
raise NotImplementedError("base_roll_pitch_yaw_rate is not implemented")
@property
def base_position(self):
raise NotImplementedError("base_position is not implemented") | 0.967302 | 0.691761 |
from flask import jsonify, request, current_app as app
from flask_api import status
from rest.decorators import handle_errors
from web_exceptions import BadRequest
from service.membership_service import MembershipService
from service.user_profile_service import UserProfileService
@app.route("/api/membership", methods=["GET"])
@handle_errors
def get_membership(membership_service: MembershipService):
try:
membership = membership_service.get()
if membership is None:
return {}, status.HTTP_404_NOT_FOUND
return jsonify(membership), status.HTTP_200_OK
except BadRequest as be:
return str(be), status.HTTP_400_BAD_REQUEST
@app.route("/api/membership", methods=["POST"])
@handle_errors
def create_membership(membership_service: MembershipService):
try:
end_date = request.json["end_date"]
start_date = request.json["end_date"]
membership_type = request.json["type"] if "type" in request.json else None
membership = membership_service.new_membership(end_date, start_date, membership_type)
return jsonify(membership), status.HTTP_201_CREATED
except BadRequest as be:
return str(be), status.HTTP_400_BAD_REQUEST
@app.route("/api/membership", methods=["PUT"])
@handle_errors
def update_membership(membership_service: MembershipService):
try:
start_date = request.json["start_date"]
end_date = request.json["end_date"]
membership_type = request.json["type"]
membership = membership_service.update_membership(start_date, end_date, membership_type)
return jsonify(membership), status.HTTP_202_ACCEPTED
except BadRequest as be:
return str(be), status.HTTP_400_BAD_REQUEST
@app.route("/api/membership", methods=["DELETE"])
@handle_errors
def cancel_membership(membership_service: MembershipService):
try:
membership = membership_service.cancel_membership()
if membership:
return jsonify(membership), status.HTTP_202_ACCEPTED
else:
return jsonify(None), status.HTTP_204_NO_CONTENT
except BadRequest as be:
return str(be), status.HTTP_400_BAD_REQUEST
@app.route("/api/membership/get_eligible_type", methods=["GET"])
@handle_errors
def get_eligible_type(profile_service: UserProfileService):
profile = profile_service.get()
return get_membership_type(points=profile["points"]), status.HTTP_200_OK
def get_membership_type(points: int) -> str:
if points >= 5:
return "Gold"
elif points >= 3:
return "Silver"
else:
return "Smart" | sis-web/rest/membership_controller.py | from flask import jsonify, request, current_app as app
from flask_api import status
from rest.decorators import handle_errors
from web_exceptions import BadRequest
from service.membership_service import MembershipService
from service.user_profile_service import UserProfileService
@app.route("/api/membership", methods=["GET"])
@handle_errors
def get_membership(membership_service: MembershipService):
try:
membership = membership_service.get()
if membership is None:
return {}, status.HTTP_404_NOT_FOUND
return jsonify(membership), status.HTTP_200_OK
except BadRequest as be:
return str(be), status.HTTP_400_BAD_REQUEST
@app.route("/api/membership", methods=["POST"])
@handle_errors
def create_membership(membership_service: MembershipService):
try:
end_date = request.json["end_date"]
start_date = request.json["end_date"]
membership_type = request.json["type"] if "type" in request.json else None
membership = membership_service.new_membership(end_date, start_date, membership_type)
return jsonify(membership), status.HTTP_201_CREATED
except BadRequest as be:
return str(be), status.HTTP_400_BAD_REQUEST
@app.route("/api/membership", methods=["PUT"])
@handle_errors
def update_membership(membership_service: MembershipService):
try:
start_date = request.json["start_date"]
end_date = request.json["end_date"]
membership_type = request.json["type"]
membership = membership_service.update_membership(start_date, end_date, membership_type)
return jsonify(membership), status.HTTP_202_ACCEPTED
except BadRequest as be:
return str(be), status.HTTP_400_BAD_REQUEST
@app.route("/api/membership", methods=["DELETE"])
@handle_errors
def cancel_membership(membership_service: MembershipService):
try:
membership = membership_service.cancel_membership()
if membership:
return jsonify(membership), status.HTTP_202_ACCEPTED
else:
return jsonify(None), status.HTTP_204_NO_CONTENT
except BadRequest as be:
return str(be), status.HTTP_400_BAD_REQUEST
@app.route("/api/membership/get_eligible_type", methods=["GET"])
@handle_errors
def get_eligible_type(profile_service: UserProfileService):
profile = profile_service.get()
return get_membership_type(points=profile["points"]), status.HTTP_200_OK
def get_membership_type(points: int) -> str:
if points >= 5:
return "Gold"
elif points >= 3:
return "Silver"
else:
return "Smart" | 0.342681 | 0.041191 |
from copy import deepcopy
import extras
class Piece:
moves = []
def __init__(self, name, symbol, team, row, col, points):
self.name = name
self.symbol = symbol
self.team = team
self.row = row
self.col = col
self.points = points
self.has_moved = False
def update_indexes(self, r, c):
self.row = r
self.col = c
def get_allowed_moves(self, _board):
allowed_moves = []
# go through every move available for this piece
for move in self.moves:
row = self.row
col = self.col
# potential location to move to
new_row = row + move[0]
new_col = col + move[1]
# check that destination is not out of range
if _board.in_range(new_row, new_col):
# check that initial location is not empty
if not _board.is_cell_empty(row, col):
# check that the move is possible for that piece
if _board.is_move_possible(self, new_row, new_col):
# check that there are no team-mates at destination
if not _board.team_at_destination(self, new_row, new_col):
# check that there are no team-mates blocking the path
if not _board.team_in_path(self, new_row, new_col):
# form string representation of the move
move = extras.un_parse_input(row, col, new_row, new_col)
# add to our list
if move not in allowed_moves:
allowed_moves.append(move)
return allowed_moves
def get_copy(self):
copy_ = deepcopy(self)
copy_.name = deepcopy(self.name)
copy_.symbol = deepcopy(self.symbol)
copy_.team = deepcopy(self.team)
copy_.row = deepcopy(self.row)
copy_.col = deepcopy(self.col)
copy_.points = deepcopy(self.points)
copy_.has_moved = deepcopy(self.has_moved)
copy_.moves = deepcopy(self.moves)
return deepcopy(copy_)
class King(Piece):
def __init__(self, symbol, team, row, col):
super().__init__('king', symbol, team, row, col, 99999)
self.moves = [
[0, 1], [1, 0], [0, -1], [-1, 0], # right, down, left, up
[-1, -1], [1, 1], [-1, 1], [-1, -1] # up-left, down-right, up-right, down-left diagonals
]
class Queen(Piece):
def __init__(self, symbol, team, row, col):
super().__init__('queen', symbol, team, row, col, 9)
self.moves = [
[0, 1], [0, 2], [0, 3], [0, 4], [0, 5], [0, 6], [0, 7], # going right
[1, 0], [2, 0], [3, 0], [4, 0], [5, 0], [6, 0], [7, 0], # going down
[0, -1], [0, -2], [0, -3], [0, -4], [0, -5], [0, -6], [0, -7], # going left
[-1, 0], [-2, 0], [-3, 0], [-4, 0], [-5, 0], [-6, 0], [-7, 0], # going up
[1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 7], # down-right diagonal
[1, -1], [2, -2], [3, -3], [4, -4], [5, -5], [6, -6], [7, -7], # down-left diagonal
[-1, 1], [-2, 2], [-3, 3], [-4, 4], [-5, 5], [-6, 6], [-7, 7], # up-right diagonal
[-1, -1], [-2, -2], [-3, -3], [-4, -4], [-5, -5], [-6, -6], [-7, -7]] # up-left diagonal
class Rook(Piece):
def __init__(self, symbol, team, row, col):
super().__init__('rook', symbol, team, row, col, 5)
# (x,y) where x = rows and y = columns
self.moves = [
[0, 1], [0, 2], [0, 3], [0, 4], [0, 5], [0, 6], [0, 7],
[1, 0], [2, 0], [3, 0], [4, 0], [5, 0], [6, 0], [7, 0],
[0, -1], [0, -2], [0, -3], [0, -4], [0, -5], [0, -6], [0, -7],
[-1, 0], [-2, 0], [-3, 0], [-4, 0], [-5, 0], [-6, 0], [-7, 0]]
class Bishop(Piece):
def __init__(self, symbol, team, row, col):
super().__init__('bishop', symbol, team, row, col, 3)
self.moves = [
[1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 7],
[1, -1], [2, -2], [3, -3], [4, -4], [5, -5], [6, -6], [7, -7],
[-1, 1], [-2, 2], [-3, 3], [-4, 4], [-5, 5], [-6, 6], [-7, 7],
[-1, -1], [-2, -2], [-3, -3], [-4, -4], [-5, -5], [-6, -6], [-7, -7]]
class Knight(Piece):
def __init__(self, symbol, team, row, col):
super().__init__('knight', symbol, team, row, col, 3)
self.moves = [
[-2, -1], [-2, 1], [2, -1], [2, 1], [-1, -2], [-1, 2], [1, -2], [1, 2]]
class Pawn(Piece):
def __init__(self, symbol, team, row, col):
super().__init__('pawn', symbol, team, row, col, 1)
self.moves = [
[-1, 0], [-2, 0], [-1, 1], [-1, -1]] # Up one cell, Up two cell, Up-right diagonal, Up-left diagonal
def remove_two_step_move(self):
if [2, 0] in self.moves:
self.moves.remove([2, 0])
if [-2, 0] in self.moves:
self.moves.remove([-2, 0]) | piece.py | from copy import deepcopy
import extras
class Piece:
moves = []
def __init__(self, name, symbol, team, row, col, points):
self.name = name
self.symbol = symbol
self.team = team
self.row = row
self.col = col
self.points = points
self.has_moved = False
def update_indexes(self, r, c):
self.row = r
self.col = c
def get_allowed_moves(self, _board):
allowed_moves = []
# go through every move available for this piece
for move in self.moves:
row = self.row
col = self.col
# potential location to move to
new_row = row + move[0]
new_col = col + move[1]
# check that destination is not out of range
if _board.in_range(new_row, new_col):
# check that initial location is not empty
if not _board.is_cell_empty(row, col):
# check that the move is possible for that piece
if _board.is_move_possible(self, new_row, new_col):
# check that there are no team-mates at destination
if not _board.team_at_destination(self, new_row, new_col):
# check that there are no team-mates blocking the path
if not _board.team_in_path(self, new_row, new_col):
# form string representation of the move
move = extras.un_parse_input(row, col, new_row, new_col)
# add to our list
if move not in allowed_moves:
allowed_moves.append(move)
return allowed_moves
def get_copy(self):
copy_ = deepcopy(self)
copy_.name = deepcopy(self.name)
copy_.symbol = deepcopy(self.symbol)
copy_.team = deepcopy(self.team)
copy_.row = deepcopy(self.row)
copy_.col = deepcopy(self.col)
copy_.points = deepcopy(self.points)
copy_.has_moved = deepcopy(self.has_moved)
copy_.moves = deepcopy(self.moves)
return deepcopy(copy_)
class King(Piece):
def __init__(self, symbol, team, row, col):
super().__init__('king', symbol, team, row, col, 99999)
self.moves = [
[0, 1], [1, 0], [0, -1], [-1, 0], # right, down, left, up
[-1, -1], [1, 1], [-1, 1], [-1, -1] # up-left, down-right, up-right, down-left diagonals
]
class Queen(Piece):
def __init__(self, symbol, team, row, col):
super().__init__('queen', symbol, team, row, col, 9)
self.moves = [
[0, 1], [0, 2], [0, 3], [0, 4], [0, 5], [0, 6], [0, 7], # going right
[1, 0], [2, 0], [3, 0], [4, 0], [5, 0], [6, 0], [7, 0], # going down
[0, -1], [0, -2], [0, -3], [0, -4], [0, -5], [0, -6], [0, -7], # going left
[-1, 0], [-2, 0], [-3, 0], [-4, 0], [-5, 0], [-6, 0], [-7, 0], # going up
[1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 7], # down-right diagonal
[1, -1], [2, -2], [3, -3], [4, -4], [5, -5], [6, -6], [7, -7], # down-left diagonal
[-1, 1], [-2, 2], [-3, 3], [-4, 4], [-5, 5], [-6, 6], [-7, 7], # up-right diagonal
[-1, -1], [-2, -2], [-3, -3], [-4, -4], [-5, -5], [-6, -6], [-7, -7]] # up-left diagonal
class Rook(Piece):
def __init__(self, symbol, team, row, col):
super().__init__('rook', symbol, team, row, col, 5)
# (x,y) where x = rows and y = columns
self.moves = [
[0, 1], [0, 2], [0, 3], [0, 4], [0, 5], [0, 6], [0, 7],
[1, 0], [2, 0], [3, 0], [4, 0], [5, 0], [6, 0], [7, 0],
[0, -1], [0, -2], [0, -3], [0, -4], [0, -5], [0, -6], [0, -7],
[-1, 0], [-2, 0], [-3, 0], [-4, 0], [-5, 0], [-6, 0], [-7, 0]]
class Bishop(Piece):
def __init__(self, symbol, team, row, col):
super().__init__('bishop', symbol, team, row, col, 3)
self.moves = [
[1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 7],
[1, -1], [2, -2], [3, -3], [4, -4], [5, -5], [6, -6], [7, -7],
[-1, 1], [-2, 2], [-3, 3], [-4, 4], [-5, 5], [-6, 6], [-7, 7],
[-1, -1], [-2, -2], [-3, -3], [-4, -4], [-5, -5], [-6, -6], [-7, -7]]
class Knight(Piece):
def __init__(self, symbol, team, row, col):
super().__init__('knight', symbol, team, row, col, 3)
self.moves = [
[-2, -1], [-2, 1], [2, -1], [2, 1], [-1, -2], [-1, 2], [1, -2], [1, 2]]
class Pawn(Piece):
def __init__(self, symbol, team, row, col):
super().__init__('pawn', symbol, team, row, col, 1)
self.moves = [
[-1, 0], [-2, 0], [-1, 1], [-1, -1]] # Up one cell, Up two cell, Up-right diagonal, Up-left diagonal
def remove_two_step_move(self):
if [2, 0] in self.moves:
self.moves.remove([2, 0])
if [-2, 0] in self.moves:
self.moves.remove([-2, 0]) | 0.630457 | 0.406302 |
from random import choice, randint
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
from faker import Faker
from backend.commands import models
fake = Faker()
def make_text(min_paragraphs, max_paragraphs):
return '\n'.join(
fake.paragraphs(nb=randint(min_paragraphs, max_paragraphs))
)
class Command(BaseCommand):
help = 'Fill fake data for dev server'
def handle(self, *args, **options):
self._create_admin_user()
groups = self._create_groups()
commands = self._create_commands(groups=groups)
self._create_calls(commands=commands)
self.stdout.write(self.style.SUCCESS('Fake data filled!'))
@staticmethod
def _create_admin_user():
return get_user_model().objects.create_user(
username='admin',
password='<PASSWORD>',
is_staff=True,
is_superuser=True,
)
def _create_groups(self):
groups = []
for _ in range(15):
group = models.Group(
title=fake.slug(),
description=make_text(1, 3),
)
group.save()
groups.append(group)
return groups
def _create_commands(self, groups):
commands = []
for _ in range(100):
command = models.Command(
group=choice(groups),
title=fake.slug(),
description=make_text(1, 3),
body=choice(['ls .', 'pwd', 'time', 'date']),
is_disabled=choice([True, False, False]),
)
command.save()
commands.append(command)
return commands
def _create_calls(self, commands):
for _ in range(1000):
models.Call(
command=choice(commands),
source=choice([
models.Call.API,
models.Call.ADMIN,
]),
result=choice([
models.Call.SUCCESS_RESULT,
models.Call.FAIL_RESULT,
]),
output=make_text(1, 3),
).save() | backend/commands/management/commands/fill_fake_data.py | from random import choice, randint
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
from faker import Faker
from backend.commands import models
fake = Faker()
def make_text(min_paragraphs, max_paragraphs):
return '\n'.join(
fake.paragraphs(nb=randint(min_paragraphs, max_paragraphs))
)
class Command(BaseCommand):
help = 'Fill fake data for dev server'
def handle(self, *args, **options):
self._create_admin_user()
groups = self._create_groups()
commands = self._create_commands(groups=groups)
self._create_calls(commands=commands)
self.stdout.write(self.style.SUCCESS('Fake data filled!'))
@staticmethod
def _create_admin_user():
return get_user_model().objects.create_user(
username='admin',
password='<PASSWORD>',
is_staff=True,
is_superuser=True,
)
def _create_groups(self):
groups = []
for _ in range(15):
group = models.Group(
title=fake.slug(),
description=make_text(1, 3),
)
group.save()
groups.append(group)
return groups
def _create_commands(self, groups):
commands = []
for _ in range(100):
command = models.Command(
group=choice(groups),
title=fake.slug(),
description=make_text(1, 3),
body=choice(['ls .', 'pwd', 'time', 'date']),
is_disabled=choice([True, False, False]),
)
command.save()
commands.append(command)
return commands
def _create_calls(self, commands):
for _ in range(1000):
models.Call(
command=choice(commands),
source=choice([
models.Call.API,
models.Call.ADMIN,
]),
result=choice([
models.Call.SUCCESS_RESULT,
models.Call.FAIL_RESULT,
]),
output=make_text(1, 3),
).save() | 0.4917 | 0.079496 |
import errno
import os
import socket
import ssl as sys_ssl
from typing import Union
from thor.dns import lookup
from thor.loop import LoopBase
from thor.tcp import TcpClient, TcpConnection
TcpConnection.block_errs.add(sys_ssl.SSL_ERROR_WANT_READ)
TcpConnection.block_errs.add(sys_ssl.SSL_ERROR_WANT_WRITE)
TcpConnection.close_errs.add(sys_ssl.SSL_ERROR_EOF)
TcpConnection.close_errs.add(sys_ssl.SSL_ERROR_SSL)
# TODO: TlsServer
# TODO: expose cipher info, peer info
class TlsClient(TcpClient):
"""
An asynchronous SSL/TLS client.
Emits:
- connect (tcp_conn): upon connection
- connect_error (err_type, err): if there's a problem before getting
a connection. err_type is socket.error or socket.gaierror; err
is the specific error encountered.
To connect to a server:
> c = TlsClient()
> c.on('connect', conn_handler)
> c.on('connect_error', error_handler)
> c.connect(host, port)
conn_handler will be called with the tcp_conn as the argument
when the connection is made.
"""
def __init__(self, loop: LoopBase = None) -> None:
TcpClient.__init__(self, loop)
self.tls_sock = None
def handle_connect(self) -> None:
tls_context = sys_ssl.create_default_context()
tls_context.check_hostname = False
tls_context.verify_mode = sys_ssl.CERT_NONE
self.tls_sock = tls_context.wrap_socket( # type: ignore
self.sock,
do_handshake_on_connect=False,
server_hostname=self.host.decode("idna"),
)
self.once("fd_writable", self.handshake)
def handshake(self) -> None:
try:
self.tls_sock.do_handshake() # type: ignore
self.once("fd_writable", self.handle_tls_connect)
except sys_ssl.SSLError as why:
if isinstance(why, sys_ssl.SSLWantReadError):
self.once("fd_writable", self.handshake) # Oh, Linux...
elif isinstance(why, sys_ssl.SSLWantWriteError):
self.once("fd_writable", self.handshake)
else:
self.handle_socket_error(why, "ssl")
except socket.error as why:
self.handle_socket_error(why, "ssl")
except AttributeError:
# For some reason, wrap_context is returning None. Try again.
self.once("fd_writable", self.handshake)
def handle_tls_connect(self) -> None:
self.unregister_fd()
if self._timeout_ev:
self._timeout_ev.delete()
tls_conn = TcpConnection(self.tls_sock, self.host, self.port, self._loop)
self.emit("connect", tls_conn)
if __name__ == "__main__":
import sys
from thor import run
test_host = sys.argv[1].encode("utf-8")
def out(outbytes: bytes) -> None:
sys.stdout.write(outbytes.decode("utf-8", "replace"))
def go(conn: TcpConnection) -> None:
conn.on("data", out)
conn.write(b"GET / HTTP/1.1\r\nHost: %s\r\n\r\n" % test_host)
conn.pause(False)
c = TlsClient()
c.on("connect", go)
c.connect(test_host, 443)
run() | thor/tls.py | import errno
import os
import socket
import ssl as sys_ssl
from typing import Union
from thor.dns import lookup
from thor.loop import LoopBase
from thor.tcp import TcpClient, TcpConnection
TcpConnection.block_errs.add(sys_ssl.SSL_ERROR_WANT_READ)
TcpConnection.block_errs.add(sys_ssl.SSL_ERROR_WANT_WRITE)
TcpConnection.close_errs.add(sys_ssl.SSL_ERROR_EOF)
TcpConnection.close_errs.add(sys_ssl.SSL_ERROR_SSL)
# TODO: TlsServer
# TODO: expose cipher info, peer info
class TlsClient(TcpClient):
"""
An asynchronous SSL/TLS client.
Emits:
- connect (tcp_conn): upon connection
- connect_error (err_type, err): if there's a problem before getting
a connection. err_type is socket.error or socket.gaierror; err
is the specific error encountered.
To connect to a server:
> c = TlsClient()
> c.on('connect', conn_handler)
> c.on('connect_error', error_handler)
> c.connect(host, port)
conn_handler will be called with the tcp_conn as the argument
when the connection is made.
"""
def __init__(self, loop: LoopBase = None) -> None:
TcpClient.__init__(self, loop)
self.tls_sock = None
def handle_connect(self) -> None:
tls_context = sys_ssl.create_default_context()
tls_context.check_hostname = False
tls_context.verify_mode = sys_ssl.CERT_NONE
self.tls_sock = tls_context.wrap_socket( # type: ignore
self.sock,
do_handshake_on_connect=False,
server_hostname=self.host.decode("idna"),
)
self.once("fd_writable", self.handshake)
def handshake(self) -> None:
try:
self.tls_sock.do_handshake() # type: ignore
self.once("fd_writable", self.handle_tls_connect)
except sys_ssl.SSLError as why:
if isinstance(why, sys_ssl.SSLWantReadError):
self.once("fd_writable", self.handshake) # Oh, Linux...
elif isinstance(why, sys_ssl.SSLWantWriteError):
self.once("fd_writable", self.handshake)
else:
self.handle_socket_error(why, "ssl")
except socket.error as why:
self.handle_socket_error(why, "ssl")
except AttributeError:
# For some reason, wrap_context is returning None. Try again.
self.once("fd_writable", self.handshake)
def handle_tls_connect(self) -> None:
self.unregister_fd()
if self._timeout_ev:
self._timeout_ev.delete()
tls_conn = TcpConnection(self.tls_sock, self.host, self.port, self._loop)
self.emit("connect", tls_conn)
if __name__ == "__main__":
import sys
from thor import run
test_host = sys.argv[1].encode("utf-8")
def out(outbytes: bytes) -> None:
sys.stdout.write(outbytes.decode("utf-8", "replace"))
def go(conn: TcpConnection) -> None:
conn.on("data", out)
conn.write(b"GET / HTTP/1.1\r\nHost: %s\r\n\r\n" % test_host)
conn.pause(False)
c = TlsClient()
c.on("connect", go)
c.connect(test_host, 443)
run() | 0.219338 | 0.07373 |
import numpy as np
import logging
import sys
from enum import Enum
import matplotlib.pyplot as plt
from collections import namedtuple
from sklearn.datasets import load_boston
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import train_test_split, KFold
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
class TrainType(Enum):
NORMAL_EQUATION = 0
GRADIENT_DESCENT = 1
Model_Parameter = namedtuple(
'Model_Parameter', ('lambda_ridge, training_method, alpha, epochs')
)
class Preprocessing:
def __init__(self):
self.mean = None
self.std = None
@staticmethod
def standardize(X, mean=None, std=None, inplace=False):
if mean is None:
mean = np.mean(X, axis=0)
if std is None:
std = np.std(X, axis=0)
std = np.where(std == 0, 1, std)
if inplace:
X -= mean
X /= std
else:
X = (X - mean) / std
return X
@staticmethod
def insert_bias_term(X):
bias_arr = np.ones(X.shape[0])
return np.c_[bias_arr, X]
def standardize_save_state(self, X, mean=None, std=None, inplace=False):
if mean is None:
mean = np.mean(X, axis=0)
if std is None:
std = np.std(X, axis=0)
std = np.where(std == 0, 1, std)
self.mean = mean
self.std = std
if inplace:
X -= mean
X /= std
else:
X = (X - mean) / std
return X
def fit(self, X, inplace=False):
if self.mean is None or self.std is None:
raise ValueError("Mean or std is not for the preprocessing object")
if inplace:
X -= self.mean
X /= self.std
else:
X = (X - self.mean) / self.std
return X
class LinearRegr:
__slots__ = ['theta']
def __init__(self):
self.theta = None
def __repr__(self):
return ' '.join([str(parm) for parm in np.ndarray.flatten(self.theta)])
def fit(self, X, y,
model_params=Model_Parameter(lambda_ridge=0,
training_method=TrainType.NORMAL_EQUATION,
alpha=0.5,
epochs=1000)):
"""
Fit/train the linear model
It has been assumed that the bias has been added to X
"""
if X.shape[0] != y.shape[0]:
raise ValueError(
f"X shape {X.shape[0]} != y shape {y.shape[0]}. Dimensions not matching")
if model_params.training_method == TrainType.NORMAL_EQUATION:
self._normal_equation_method(X, y, model_params)
elif model_params.training_method == TrainType.GRADIENT_DESCENT:
self._gradient_descent_method(X, y, model_params)
else:
raise ValueError("Model type not supplied")
def _normal_equation_method(self, X, y, model_params):
# Feature Scaling is not required
# theta = (XtX + LE*)-1 . Xt.y
# Almost identity matrix E where the first row, first col elem is 0
# since we do not regularize the bias input, x0 = 1
lambda_ridge = model_params.lambda_ridge
E_start = np.identity(X.shape[1])
E_start[0][0] = 0
E_start *= lambda_ridge
X_t = np.matrix.transpose(X)
dot_Xt_X = np.dot(X_t, X) # XtX
self.theta = np.dot(
np.dot(np.linalg.pinv(dot_Xt_X+E_start), X_t), y)
def _gradient_descent_method(self, X, y, model_params):
"""
WARNING Feature scaling should already be done for X
Batch Gradient Descent
"""
lambda_ridge, training_method, alpha, epochs = model_params
self.theta = np.zeros(X.shape[1])
loss_overtime = []
m = y.shape[0]
for epoch in range(epochs):
gradient_wout_regu = (1/m)*np.dot(
np.matrix.transpose(X), np.dot(X, self.theta)-y)
# 0th parameter/bias is not regularized
self.theta[0] = self.theta[0] - alpha*gradient_wout_regu[0]
gradient_with_regu = gradient_wout_regu + \
((lambda_ridge/m)*self.theta)
# All other parameters regularized
self.theta[1:] = self.theta[1:] - alpha*gradient_with_regu[1:]
if epoch % 1 == 0:
current_loss = self.loss(X, y, lambda_ridge)
logging.info(
f"Current loss at epoch {epoch} is {current_loss}")
loss_overtime.append(current_loss)
self.plot_loss_curve(loss_overtime, epochs)
def plot_loss_curve(self, loss_arr, iterations, log_mode=False):
if log_mode:
plt.semilogx(range(iterations), loss_arr)
else:
plt.plot(loss_arr)
plt.title("Loss function")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.grid(True)
plt.show()
@staticmethod
def mse_loss(X, y, theta, lambda_ridge=0):
""" Calculates the MSE loss for linear regression """
if X.shape[0] != y.shape[0]:
raise ValueError(
f"X shape {X.shape[0]} != y shape {y.shape[0]}. Dimensions not matching")
elif X.shape[1] != theta.shape[0]:
raise ValueError(
f"X shape {X.shape[1]} != theta shape {theta.shape[0]}. Dimensions not matching")
X_theta_minus_y = np.dot(X, theta)-y
X_theta_minus_y_t = np.matrix.transpose(X_theta_minus_y)
return (1/(2*X.shape[0])) * (
(np.dot(X_theta_minus_y_t, X_theta_minus_y)) +
(lambda_ridge*np.dot(np.matrix.transpose(theta[1:]), theta[1:])))
@staticmethod
def predict_X(X, theta):
"""
Predict using the linear model
"""
if theta is None:
raise ValueError("Model has not been trained yet")
# prediction = X*theta
return np.dot(X, theta)
@staticmethod
def score_X(X, y, theta):
"""
Returns the coefficient of determination
"""
if theta is None:
raise ValueError("Model has not been trained yet")
y_mean = np.mean(y)
y_pred = LinearRegr.predict_X(X, theta)
ss_total = sum((y-y_mean)**2) # total sum of squares
ss_res = sum((y-y_pred)**2) # sum of squared residuals
return 1 - (ss_res / ss_total)
def loss(self, X, y, lambda_ridge=0):
"""
Calculates the current loss
"""
if self.theta is None:
raise ValueError("Model has not been trained yet")
return LinearRegr.mse_loss(X, y, self.theta, lambda_ridge)
def score(self, X, y):
"""
Returns the coefficient of determination
"""
return LinearRegr.score_X(X, y, self.theta)
def predict(self, X):
"""
Predict using the linear model
"""
return LinearRegr.predict_X(X, self.theta)
class KFoldCrossValidator:
__slots__ = ['train_loss', 'test_loss', 'theta']
def __init__(self):
self.train_loss = []
self.test_loss = []
self.theta = None
def cross_validate(self,
model,
model_params,
X, y, k=10,
custom_kfold=False,
seed=np.random.randint(10000)):
"""
Cross validation function, the theta parameter chosen is from the split with the least test error
"""
m = X.shape[0]
lambda_ridge, training_method, alpha, epochs = model_params
min_test_error = float('inf') # tracks the minimum error with k-folds
best_fit_theta = None # saves the best theta value with the min_test_error
if custom_kfold:
logging.info(
f"Running Custom KFoldCrossValidator with {k} folds and lambda={lambda_ridge}")
np.random.seed(seed) # seed random shuffler
if m < k:
raise ValueError(
f"No of k splits {k} cannot be greater than no. of samples {m}")
# Randomly shuffle X and y inplace while matching corresponding feat and target
for i in range(m):
swap_idx = np.random.randint(i, m)
# ensures the corresponding feat-target values match
X[[i, swap_idx]] = X[[swap_idx, i]]
y[[i, swap_idx]] = y[[swap_idx, i]]
# test start and end idx
fold_step = m//k
start = 0
end = fold_step
for i in range(k):
end = min(end, m) # prevent array idx out of bounds
X_train, X_test = np.concatenate(
[X[0:start], X[end:m]], axis=0), X[start:end]
y_train, y_test = np.concatenate(
[y[0:start], y[end:m]], axis=0), y[start:end]
start += fold_step
end += fold_step
model.fit(X_train, y_train, model_params)
cur_train_loss = model.loss(X_train, y_train, lambda_ridge)
cur_test_loss = model.loss(X_test, y_test, lambda_ridge)
self.train_loss.append(cur_train_loss)
self.test_loss.append(cur_test_loss)
if cur_test_loss < min_test_error:
min_test_error = cur_test_loss
best_fit_theta = model.theta
else:
logging.info(
f"Running Sklearn KFoldCrossValidator with {k} folds and lambda {lambda_ridge}")
kf = KFold(n_splits=k, random_state=seed, shuffle=True)
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
model.fit(X_train, y_train, model_params)
cur_train_loss = model.loss(X_train, y_train, lambda_ridge)
cur_test_loss = model.loss(X_test, y_test, lambda_ridge)
self.train_loss.append(cur_train_loss)
self.test_loss.append(cur_test_loss)
if cur_test_loss < min_test_error:
min_test_error = cur_test_loss
best_fit_theta = model.theta
self.theta = best_fit_theta
if __name__ == "__main__":
X,y = load_boston(return_X_y=True)
bh_model = LinearRegr()
kfold_linear_regr = KFoldCrossValidator()
X_feat = Preprocessing.insert_bias_term(X)
model_params = Model_Parameter(lambda_ridge=0, training_method=TrainType.NORMAL_EQUATION, alpha=0, epochs=0)
kfold_linear_regr.cross_validate(bh_model, model_params, X_feat, y, k=10, custom_kfold=False)
lregr_train_loss = kfold_linear_regr.train_loss
lregr_test_loss = kfold_linear_regr.test_loss
print(f'Average train loss is: {sum(lregr_train_loss)/len(lregr_train_loss)}')
print(f'Average test loss is: {sum(lregr_test_loss)/len(lregr_test_loss)}')
print(f"R squared for the entire dataset is {bh_model.score(X_feat, y)}") | src/linear_regression.py | import numpy as np
import logging
import sys
from enum import Enum
import matplotlib.pyplot as plt
from collections import namedtuple
from sklearn.datasets import load_boston
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import train_test_split, KFold
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
class TrainType(Enum):
NORMAL_EQUATION = 0
GRADIENT_DESCENT = 1
Model_Parameter = namedtuple(
'Model_Parameter', ('lambda_ridge, training_method, alpha, epochs')
)
class Preprocessing:
def __init__(self):
self.mean = None
self.std = None
@staticmethod
def standardize(X, mean=None, std=None, inplace=False):
if mean is None:
mean = np.mean(X, axis=0)
if std is None:
std = np.std(X, axis=0)
std = np.where(std == 0, 1, std)
if inplace:
X -= mean
X /= std
else:
X = (X - mean) / std
return X
@staticmethod
def insert_bias_term(X):
bias_arr = np.ones(X.shape[0])
return np.c_[bias_arr, X]
def standardize_save_state(self, X, mean=None, std=None, inplace=False):
if mean is None:
mean = np.mean(X, axis=0)
if std is None:
std = np.std(X, axis=0)
std = np.where(std == 0, 1, std)
self.mean = mean
self.std = std
if inplace:
X -= mean
X /= std
else:
X = (X - mean) / std
return X
def fit(self, X, inplace=False):
if self.mean is None or self.std is None:
raise ValueError("Mean or std is not for the preprocessing object")
if inplace:
X -= self.mean
X /= self.std
else:
X = (X - self.mean) / self.std
return X
class LinearRegr:
__slots__ = ['theta']
def __init__(self):
self.theta = None
def __repr__(self):
return ' '.join([str(parm) for parm in np.ndarray.flatten(self.theta)])
def fit(self, X, y,
model_params=Model_Parameter(lambda_ridge=0,
training_method=TrainType.NORMAL_EQUATION,
alpha=0.5,
epochs=1000)):
"""
Fit/train the linear model
It has been assumed that the bias has been added to X
"""
if X.shape[0] != y.shape[0]:
raise ValueError(
f"X shape {X.shape[0]} != y shape {y.shape[0]}. Dimensions not matching")
if model_params.training_method == TrainType.NORMAL_EQUATION:
self._normal_equation_method(X, y, model_params)
elif model_params.training_method == TrainType.GRADIENT_DESCENT:
self._gradient_descent_method(X, y, model_params)
else:
raise ValueError("Model type not supplied")
def _normal_equation_method(self, X, y, model_params):
# Feature Scaling is not required
# theta = (XtX + LE*)-1 . Xt.y
# Almost identity matrix E where the first row, first col elem is 0
# since we do not regularize the bias input, x0 = 1
lambda_ridge = model_params.lambda_ridge
E_start = np.identity(X.shape[1])
E_start[0][0] = 0
E_start *= lambda_ridge
X_t = np.matrix.transpose(X)
dot_Xt_X = np.dot(X_t, X) # XtX
self.theta = np.dot(
np.dot(np.linalg.pinv(dot_Xt_X+E_start), X_t), y)
def _gradient_descent_method(self, X, y, model_params):
"""
WARNING Feature scaling should already be done for X
Batch Gradient Descent
"""
lambda_ridge, training_method, alpha, epochs = model_params
self.theta = np.zeros(X.shape[1])
loss_overtime = []
m = y.shape[0]
for epoch in range(epochs):
gradient_wout_regu = (1/m)*np.dot(
np.matrix.transpose(X), np.dot(X, self.theta)-y)
# 0th parameter/bias is not regularized
self.theta[0] = self.theta[0] - alpha*gradient_wout_regu[0]
gradient_with_regu = gradient_wout_regu + \
((lambda_ridge/m)*self.theta)
# All other parameters regularized
self.theta[1:] = self.theta[1:] - alpha*gradient_with_regu[1:]
if epoch % 1 == 0:
current_loss = self.loss(X, y, lambda_ridge)
logging.info(
f"Current loss at epoch {epoch} is {current_loss}")
loss_overtime.append(current_loss)
self.plot_loss_curve(loss_overtime, epochs)
def plot_loss_curve(self, loss_arr, iterations, log_mode=False):
if log_mode:
plt.semilogx(range(iterations), loss_arr)
else:
plt.plot(loss_arr)
plt.title("Loss function")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.grid(True)
plt.show()
@staticmethod
def mse_loss(X, y, theta, lambda_ridge=0):
""" Calculates the MSE loss for linear regression """
if X.shape[0] != y.shape[0]:
raise ValueError(
f"X shape {X.shape[0]} != y shape {y.shape[0]}. Dimensions not matching")
elif X.shape[1] != theta.shape[0]:
raise ValueError(
f"X shape {X.shape[1]} != theta shape {theta.shape[0]}. Dimensions not matching")
X_theta_minus_y = np.dot(X, theta)-y
X_theta_minus_y_t = np.matrix.transpose(X_theta_minus_y)
return (1/(2*X.shape[0])) * (
(np.dot(X_theta_minus_y_t, X_theta_minus_y)) +
(lambda_ridge*np.dot(np.matrix.transpose(theta[1:]), theta[1:])))
@staticmethod
def predict_X(X, theta):
"""
Predict using the linear model
"""
if theta is None:
raise ValueError("Model has not been trained yet")
# prediction = X*theta
return np.dot(X, theta)
@staticmethod
def score_X(X, y, theta):
"""
Returns the coefficient of determination
"""
if theta is None:
raise ValueError("Model has not been trained yet")
y_mean = np.mean(y)
y_pred = LinearRegr.predict_X(X, theta)
ss_total = sum((y-y_mean)**2) # total sum of squares
ss_res = sum((y-y_pred)**2) # sum of squared residuals
return 1 - (ss_res / ss_total)
def loss(self, X, y, lambda_ridge=0):
"""
Calculates the current loss
"""
if self.theta is None:
raise ValueError("Model has not been trained yet")
return LinearRegr.mse_loss(X, y, self.theta, lambda_ridge)
def score(self, X, y):
"""
Returns the coefficient of determination
"""
return LinearRegr.score_X(X, y, self.theta)
def predict(self, X):
"""
Predict using the linear model
"""
return LinearRegr.predict_X(X, self.theta)
class KFoldCrossValidator:
__slots__ = ['train_loss', 'test_loss', 'theta']
def __init__(self):
self.train_loss = []
self.test_loss = []
self.theta = None
def cross_validate(self,
model,
model_params,
X, y, k=10,
custom_kfold=False,
seed=np.random.randint(10000)):
"""
Cross validation function, the theta parameter chosen is from the split with the least test error
"""
m = X.shape[0]
lambda_ridge, training_method, alpha, epochs = model_params
min_test_error = float('inf') # tracks the minimum error with k-folds
best_fit_theta = None # saves the best theta value with the min_test_error
if custom_kfold:
logging.info(
f"Running Custom KFoldCrossValidator with {k} folds and lambda={lambda_ridge}")
np.random.seed(seed) # seed random shuffler
if m < k:
raise ValueError(
f"No of k splits {k} cannot be greater than no. of samples {m}")
# Randomly shuffle X and y inplace while matching corresponding feat and target
for i in range(m):
swap_idx = np.random.randint(i, m)
# ensures the corresponding feat-target values match
X[[i, swap_idx]] = X[[swap_idx, i]]
y[[i, swap_idx]] = y[[swap_idx, i]]
# test start and end idx
fold_step = m//k
start = 0
end = fold_step
for i in range(k):
end = min(end, m) # prevent array idx out of bounds
X_train, X_test = np.concatenate(
[X[0:start], X[end:m]], axis=0), X[start:end]
y_train, y_test = np.concatenate(
[y[0:start], y[end:m]], axis=0), y[start:end]
start += fold_step
end += fold_step
model.fit(X_train, y_train, model_params)
cur_train_loss = model.loss(X_train, y_train, lambda_ridge)
cur_test_loss = model.loss(X_test, y_test, lambda_ridge)
self.train_loss.append(cur_train_loss)
self.test_loss.append(cur_test_loss)
if cur_test_loss < min_test_error:
min_test_error = cur_test_loss
best_fit_theta = model.theta
else:
logging.info(
f"Running Sklearn KFoldCrossValidator with {k} folds and lambda {lambda_ridge}")
kf = KFold(n_splits=k, random_state=seed, shuffle=True)
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
model.fit(X_train, y_train, model_params)
cur_train_loss = model.loss(X_train, y_train, lambda_ridge)
cur_test_loss = model.loss(X_test, y_test, lambda_ridge)
self.train_loss.append(cur_train_loss)
self.test_loss.append(cur_test_loss)
if cur_test_loss < min_test_error:
min_test_error = cur_test_loss
best_fit_theta = model.theta
self.theta = best_fit_theta
if __name__ == "__main__":
X,y = load_boston(return_X_y=True)
bh_model = LinearRegr()
kfold_linear_regr = KFoldCrossValidator()
X_feat = Preprocessing.insert_bias_term(X)
model_params = Model_Parameter(lambda_ridge=0, training_method=TrainType.NORMAL_EQUATION, alpha=0, epochs=0)
kfold_linear_regr.cross_validate(bh_model, model_params, X_feat, y, k=10, custom_kfold=False)
lregr_train_loss = kfold_linear_regr.train_loss
lregr_test_loss = kfold_linear_regr.test_loss
print(f'Average train loss is: {sum(lregr_train_loss)/len(lregr_train_loss)}')
print(f'Average test loss is: {sum(lregr_test_loss)/len(lregr_test_loss)}')
print(f"R squared for the entire dataset is {bh_model.score(X_feat, y)}") | 0.714628 | 0.394872 |
import logging
import os
from datetime import timedelta
from pathlib import Path
import environ
from django.utils.log import DEFAULT_LOGGING
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
env = environ.Env(
# set casting, default value
DEBUG=(bool, False),
SECRET_KEY=(str, ""),
ALLOWED_HOSTS=(list, []),
CORS_ALLOWED_ORIGINS=(list, []),
CORS_ALLOW_ALL_ORIGINS=(bool, False),
CORS_ALLOW_CREDENTIALS=(bool, False),
JWT_ACCESS_TOKEN_LIFETIME=(int, 999),
JWT_REFRESH_TOKEN_LIFETIME=(int, 1),
PRODUCT_DATA_API_CACHE_TIME=(int, 3600),
PAGE_SIZE=(int, 50),
)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env("DEBUG")
ALLOWED_HOSTS = env("ALLOWED_HOSTS")
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"corsheaders",
"rest_framework",
"django_filters",
"drf_yasg",
"customers",
"wishlist",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.common.CommonMiddleware",
]
ROOT_URLCONF = "config.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "config.wsgi.application"
# rest_framework
# https://www.django-rest-framework.org/
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.SessionAuthentication",
"rest_framework_simplejwt.authentication.JWTAuthentication",
),
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
"DEFAULT_RENDERER_CLASSES": (
"rest_framework.renderers.JSONRenderer",
"rest_framework.renderers.BrowsableAPIRenderer",
),
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.PageNumberPagination",
"PAGE_SIZE": env("PAGE_SIZE"),
"DEFAULT_FILTER_BACKENDS": ["django_filters.rest_framework.DjangoFilterBackend"],
}
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
"default": env.db(),
}
SIMPLE_JWT = {
"ACCESS_TOKEN_LIFETIME": timedelta(minutes=env("JWT_ACCESS_TOKEN_LIFETIME")),
"REFRESH_TOKEN_LIFETIME": timedelta(days=env("JWT_REFRESH_TOKEN_LIFETIME")),
"ROTATE_REFRESH_TOKENS": False,
"BLACKLIST_AFTER_ROTATION": True,
"UPDATE_LAST_LOGIN": False,
"ALGORITHM": "HS256",
"SIGNING_KEY": SECRET_KEY,
"VERIFYING_KEY": None,
"AUDIENCE": None,
"ISSUER": None,
"AUTH_HEADER_TYPES": ("Bearer",),
"AUTH_HEADER_NAME": "HTTP_AUTHORIZATION",
"USER_ID_FIELD": "id",
"USER_ID_CLAIM": "user_id",
"AUTH_TOKEN_CLASSES": ("rest_framework_simplejwt.tokens.AccessToken",),
"TOKEN_TYPE_CLAIM": "token_type",
"JTI_CLAIM": "jti",
"SLIDING_TOKEN_REFRESH_EXP_CLAIM": "refresh_exp",
"SLIDING_TOKEN_LIFETIME": timedelta(minutes=5),
"SLIDING_TOKEN_REFRESH_LIFETIME": timedelta(days=1),
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "pt-br"
TIME_ZONE = "America/Sao_Paulo"
USE_I18N = False
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = "static/"
# LOGGING definition
# Disable Django's logging setup
LOGGING_CONFIG = None
LOGFILE = "logs/api.log"
os.makedirs(os.path.dirname(LOGFILE), exist_ok=True)
logging.config.dictConfig(
{
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
"format": "%(asctime)s - %(name)-12s:%(lineno)d - %(levelname)s - %(message)s -",
},
"django.server": DEFAULT_LOGGING["formatters"]["django.server"],
},
"handlers": {
"file": {
"level": "INFO",
"class": "logging.FileHandler",
"filename": LOGFILE,
"formatter": "default",
},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "default",
},
"null": {
"class": "logging.NullHandler",
},
"django.server": DEFAULT_LOGGING["handlers"]["django.server"],
},
"loggers": {
"": {
"handlers": ["file", "console"],
"propagate": True,
"level": os.getenv("DJANGO_LOG_LEVEL", "INFO"),
},
},
"django.server": DEFAULT_LOGGING["loggers"]["django.server"],
}
)
PRODUCT_DATA_API = env("PRODUCT_DATA_API")
PRODUCT_DATA_API_CACHE_TIME = env("PRODUCT_DATA_API_CACHE_TIME") | backend/config/settings.py | import logging
import os
from datetime import timedelta
from pathlib import Path
import environ
from django.utils.log import DEFAULT_LOGGING
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
env = environ.Env(
# set casting, default value
DEBUG=(bool, False),
SECRET_KEY=(str, ""),
ALLOWED_HOSTS=(list, []),
CORS_ALLOWED_ORIGINS=(list, []),
CORS_ALLOW_ALL_ORIGINS=(bool, False),
CORS_ALLOW_CREDENTIALS=(bool, False),
JWT_ACCESS_TOKEN_LIFETIME=(int, 999),
JWT_REFRESH_TOKEN_LIFETIME=(int, 1),
PRODUCT_DATA_API_CACHE_TIME=(int, 3600),
PAGE_SIZE=(int, 50),
)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env("DEBUG")
ALLOWED_HOSTS = env("ALLOWED_HOSTS")
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"corsheaders",
"rest_framework",
"django_filters",
"drf_yasg",
"customers",
"wishlist",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.common.CommonMiddleware",
]
ROOT_URLCONF = "config.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "config.wsgi.application"
# rest_framework
# https://www.django-rest-framework.org/
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.SessionAuthentication",
"rest_framework_simplejwt.authentication.JWTAuthentication",
),
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
"DEFAULT_RENDERER_CLASSES": (
"rest_framework.renderers.JSONRenderer",
"rest_framework.renderers.BrowsableAPIRenderer",
),
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.PageNumberPagination",
"PAGE_SIZE": env("PAGE_SIZE"),
"DEFAULT_FILTER_BACKENDS": ["django_filters.rest_framework.DjangoFilterBackend"],
}
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
"default": env.db(),
}
SIMPLE_JWT = {
"ACCESS_TOKEN_LIFETIME": timedelta(minutes=env("JWT_ACCESS_TOKEN_LIFETIME")),
"REFRESH_TOKEN_LIFETIME": timedelta(days=env("JWT_REFRESH_TOKEN_LIFETIME")),
"ROTATE_REFRESH_TOKENS": False,
"BLACKLIST_AFTER_ROTATION": True,
"UPDATE_LAST_LOGIN": False,
"ALGORITHM": "HS256",
"SIGNING_KEY": SECRET_KEY,
"VERIFYING_KEY": None,
"AUDIENCE": None,
"ISSUER": None,
"AUTH_HEADER_TYPES": ("Bearer",),
"AUTH_HEADER_NAME": "HTTP_AUTHORIZATION",
"USER_ID_FIELD": "id",
"USER_ID_CLAIM": "user_id",
"AUTH_TOKEN_CLASSES": ("rest_framework_simplejwt.tokens.AccessToken",),
"TOKEN_TYPE_CLAIM": "token_type",
"JTI_CLAIM": "jti",
"SLIDING_TOKEN_REFRESH_EXP_CLAIM": "refresh_exp",
"SLIDING_TOKEN_LIFETIME": timedelta(minutes=5),
"SLIDING_TOKEN_REFRESH_LIFETIME": timedelta(days=1),
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "pt-br"
TIME_ZONE = "America/Sao_Paulo"
USE_I18N = False
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = "static/"
# LOGGING definition
# Disable Django's logging setup
LOGGING_CONFIG = None
LOGFILE = "logs/api.log"
os.makedirs(os.path.dirname(LOGFILE), exist_ok=True)
logging.config.dictConfig(
{
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
"format": "%(asctime)s - %(name)-12s:%(lineno)d - %(levelname)s - %(message)s -",
},
"django.server": DEFAULT_LOGGING["formatters"]["django.server"],
},
"handlers": {
"file": {
"level": "INFO",
"class": "logging.FileHandler",
"filename": LOGFILE,
"formatter": "default",
},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "default",
},
"null": {
"class": "logging.NullHandler",
},
"django.server": DEFAULT_LOGGING["handlers"]["django.server"],
},
"loggers": {
"": {
"handlers": ["file", "console"],
"propagate": True,
"level": os.getenv("DJANGO_LOG_LEVEL", "INFO"),
},
},
"django.server": DEFAULT_LOGGING["loggers"]["django.server"],
}
)
PRODUCT_DATA_API = env("PRODUCT_DATA_API")
PRODUCT_DATA_API_CACHE_TIME = env("PRODUCT_DATA_API_CACHE_TIME") | 0.47098 | 0.153327 |
import asyncio
from typing import List, Union
from bscscan import BscScan
from web3 import Web3
from senkalib.chain.bsc.bsc_transaction import BscTransaction
from senkalib.chain.transaction import Transaction
from senkalib.chain.transaction_generator import TransactionGenerator
class BscTransactionGenerator(TransactionGenerator):
chain = "bsc"
@classmethod
def get_transactions(cls, transaction_params: dict) -> List[Transaction]:
settings_bsc = transaction_params["settings"].get_settings()
startblock: int = transaction_params.get("startblock", 0)
endblock: Union[int, None] = transaction_params.get("endblock", None)
w3 = Web3(Web3.HTTPProvider("https://bsc-dataseed.binance.org/"))
transactions = []
page = 1
while True:
txs = asyncio.run(
BscTransactionGenerator.get_txs(
settings_bsc,
transaction_params["data"],
startblock,
endblock,
page,
)
)
for tx in txs:
if "1" in tx["isError"]:
continue
else:
receipt = w3.eth.get_transaction_receipt(tx["hash"])
transactions.append(
BscTransaction(
tx["hash"],
receipt,
tx["timeStamp"],
tx["gasUsed"],
tx["gasPrice"],
)
)
page += 1
if len(txs) < 10000: # bscscan api return 10000 results for each page
break
return transactions
@classmethod
async def get_txs(
cls,
settings: dict,
address: str,
arg_startblock: Union[int, None] = None,
arg_endblock: Union[int, None] = None,
arg_page: Union[int, None] = None,
):
async with BscScan(settings["bscscan_key"]) as bscscan:
txs = await bscscan.get_normal_txs_by_address_paginated(
address=address,
startblock=arg_startblock,
endblock=arg_endblock,
page=arg_page,
offset=0,
sort="asc",
)
return txs | src/senkalib/chain/bsc/bsc_transaction_generator.py | import asyncio
from typing import List, Union
from bscscan import BscScan
from web3 import Web3
from senkalib.chain.bsc.bsc_transaction import BscTransaction
from senkalib.chain.transaction import Transaction
from senkalib.chain.transaction_generator import TransactionGenerator
class BscTransactionGenerator(TransactionGenerator):
chain = "bsc"
@classmethod
def get_transactions(cls, transaction_params: dict) -> List[Transaction]:
settings_bsc = transaction_params["settings"].get_settings()
startblock: int = transaction_params.get("startblock", 0)
endblock: Union[int, None] = transaction_params.get("endblock", None)
w3 = Web3(Web3.HTTPProvider("https://bsc-dataseed.binance.org/"))
transactions = []
page = 1
while True:
txs = asyncio.run(
BscTransactionGenerator.get_txs(
settings_bsc,
transaction_params["data"],
startblock,
endblock,
page,
)
)
for tx in txs:
if "1" in tx["isError"]:
continue
else:
receipt = w3.eth.get_transaction_receipt(tx["hash"])
transactions.append(
BscTransaction(
tx["hash"],
receipt,
tx["timeStamp"],
tx["gasUsed"],
tx["gasPrice"],
)
)
page += 1
if len(txs) < 10000: # bscscan api return 10000 results for each page
break
return transactions
@classmethod
async def get_txs(
cls,
settings: dict,
address: str,
arg_startblock: Union[int, None] = None,
arg_endblock: Union[int, None] = None,
arg_page: Union[int, None] = None,
):
async with BscScan(settings["bscscan_key"]) as bscscan:
txs = await bscscan.get_normal_txs_by_address_paginated(
address=address,
startblock=arg_startblock,
endblock=arg_endblock,
page=arg_page,
offset=0,
sort="asc",
)
return txs | 0.705886 | 0.222964 |
import mock
import unittest
import os
from dcipipeline.main import (
process_args,
overload_dicts,
get_prev_stages,
pre_process_stage,
post_process_stage,
upload_junit_files_from_dir,
)
class TestMain(unittest.TestCase):
def test_process_args_empty(self):
args = ["dci-pipeline"]
result, args = process_args(args)
self.assertEqual(args, [])
self.assertEqual(result, [])
def test_process_args_single(self):
args = ["dci-pipeline", "stage:key=value"]
result, args = process_args(args)
self.assertEqual(args, [])
self.assertEqual(result, [{"stage": {"key": "value"}}])
def test_process_args_list(self):
args = ["dci-pipeline", "stage:key=value=toto,value2"]
result, args = process_args(args)
self.assertEqual(args, [])
self.assertEqual(result, [{"stage": {"key": ["value=toto", "value2"]}}])
def test_process_args_dict(self):
args = ["dci-pipeline", "stage:key=subkey:value", "stage:key=subkey2:value2"]
result, args = process_args(args)
self.assertEqual(args, [])
self.assertEqual(
result,
[
{"stage": {"key": {"subkey": "value"}}},
{"stage": {"key": {"subkey2": "value2"}}},
],
)
def test_process_args_dict_list(self):
args = ["dci-pipeline", "stage:key=subkey:value,value2"]
result, args = process_args(args)
self.assertEqual(args, [])
self.assertEqual(result, [{"stage": {"key": {"subkey": ["value", "value2"]}}}])
def test_process_args_list1(self):
args = ["dci-pipeline", "stage:key=value=toto,"]
result, args = process_args(args)
self.assertEqual(args, [])
self.assertEqual(result, [{"stage": {"key": ["value=toto"]}}])
def test_process_args_only_files(self):
args = ["dci-pipeline", "file1", "file2"]
result, args = process_args(args)
self.assertEqual(args, ["file1", "file2"])
self.assertEqual(result, [])
def test_process_args_http(self):
args = ["dci-pipeline", "stage:key=http://lwn.net/"]
result, args = process_args(args)
self.assertEqual(args, [])
self.assertEqual(result, [{"stage": {"key": "http://lwn.net/"}}])
def test_process_args_https(self):
args = ["dci-pipeline", "stage:key=https://lwn.net/"]
result, args = process_args(args)
self.assertEqual(args, [])
self.assertEqual(result, [{"stage": {"key": "https://lwn.net/"}}])
def test_overload_dicts_add(self):
stage = {"first": "value"}
overload = {"key": ["value=toto", "value2"]}
self.assertEqual(
overload_dicts(overload, stage),
{"first": "value", "key": ["value=toto", "value2"]},
)
def test_overload_dicts_replace_list(self):
overload = {"components": ["ocp=12", "ose-tests"]}
stage = {"components": ["ocp", "cnf-tests"], "topic": "OCP-4.4"}
self.assertEqual(
overload_dicts(overload, stage),
{"components": ["ocp=12", "cnf-tests", "ose-tests"], "topic": "OCP-4.4"},
)
def test_overload_dicts_add_dict(self):
overload = {"ansible_extravars": {"dci_comment": "universal answer"}}
stage = {"ansible_extravars": {"answer": 42}}
self.assertEqual(
overload_dicts(overload, stage),
{"ansible_extravars": {"answer": 42, "dci_comment": "universal answer"}},
)
def test_overload_dicts_add_list_in_dict(self):
overload = {"ansible_extravars": {"dci_comment": "universal answer"}}
stage = {"ansible_extravars": {"answer": 42}}
self.assertEqual(
overload_dicts(overload, stage),
{"ansible_extravars": {"answer": 42, "dci_comment": "universal answer"}},
)
def test_prev_stages(self):
stage1 = {"name": "1", "type": "ocp"}
stage2 = {
"name": "2",
"type": "ocp-upgrade",
"prev_stages": ["ocp-upgrade", "ocp"],
}
stage3 = {
"name": "3",
"type": "ocp-upgrade2",
"prev_stages": ["ocp-upgrade", "ocp"],
}
stage4 = {"name": "4", "type": "cnf2"}
pipeline = [stage1, stage2, stage3, stage4]
prev_stages = get_prev_stages(stage3, pipeline)
self.assertEqual(prev_stages, [stage2, stage1])
@mock.patch("dcipipeline.main.tempfile.mkdtemp")
def test_pre_process_stage(self, m):
stage = {"ansible_envvars": {"envvar": "/@tmpdir"}}
m.return_value = "/tmp/tmppath"
stage_metas, stage = pre_process_stage(stage)
self.assertEqual(stage_metas["tmpdirs"][0]["path"], "/tmp/tmppath")
@mock.patch("dcipipeline.main.shutil.rmtree")
@mock.patch("dcipipeline.main.upload_junit_files_from_dir")
def test_post_process_stage(self, m_upload_junit, m_rmtree):
metas = {
"tmpdirs": [{"name": "JUNIT_OUTPUT_DIR", "path": "/tmp/junit_tmppath"}]
}
post_process_stage("context", "stage", metas)
m_upload_junit.assert_called_with("context", "stage", "/tmp/junit_tmppath")
m_rmtree.assert_called_with("/tmp/junit_tmppath")
m_upload_junit.reset_mock()
m_rmtree.reset_mock()
metas = {"tmpdirs": [{"name": "envvar1", "path": "/tmp/tmppath"}]}
post_process_stage("context", "stage", metas)
self.assertTrue(not m_upload_junit.called)
m_rmtree.assert_called_with("/tmp/tmppath")
@mock.patch("dcipipeline.main.dci_file.create")
def test_upload_junit_files_from_dir(self, m):
try:
os.makedirs("/tmp/junit-tmppath")
except Exception:
pass
open("/tmp/junit-tmppath/junit-tests.xml", "a+").close()
stage = {"job_info": {"job": {"id": "1"}}}
upload_junit_files_from_dir("context", stage, "/tmp/junit-tmppath")
m.assert_called_with(
"context",
"junit-tests",
file_path="/tmp/junit-tmppath/junit-tests.xml",
mime="application/junit",
job_id="1",
)
if __name__ == "__main__":
unittest.main()
# test_main.py ends here | dcipipeline/test_main.py |
import mock
import unittest
import os
from dcipipeline.main import (
process_args,
overload_dicts,
get_prev_stages,
pre_process_stage,
post_process_stage,
upload_junit_files_from_dir,
)
class TestMain(unittest.TestCase):
def test_process_args_empty(self):
args = ["dci-pipeline"]
result, args = process_args(args)
self.assertEqual(args, [])
self.assertEqual(result, [])
def test_process_args_single(self):
args = ["dci-pipeline", "stage:key=value"]
result, args = process_args(args)
self.assertEqual(args, [])
self.assertEqual(result, [{"stage": {"key": "value"}}])
def test_process_args_list(self):
args = ["dci-pipeline", "stage:key=value=toto,value2"]
result, args = process_args(args)
self.assertEqual(args, [])
self.assertEqual(result, [{"stage": {"key": ["value=toto", "value2"]}}])
def test_process_args_dict(self):
args = ["dci-pipeline", "stage:key=subkey:value", "stage:key=subkey2:value2"]
result, args = process_args(args)
self.assertEqual(args, [])
self.assertEqual(
result,
[
{"stage": {"key": {"subkey": "value"}}},
{"stage": {"key": {"subkey2": "value2"}}},
],
)
def test_process_args_dict_list(self):
args = ["dci-pipeline", "stage:key=subkey:value,value2"]
result, args = process_args(args)
self.assertEqual(args, [])
self.assertEqual(result, [{"stage": {"key": {"subkey": ["value", "value2"]}}}])
def test_process_args_list1(self):
args = ["dci-pipeline", "stage:key=value=toto,"]
result, args = process_args(args)
self.assertEqual(args, [])
self.assertEqual(result, [{"stage": {"key": ["value=toto"]}}])
def test_process_args_only_files(self):
args = ["dci-pipeline", "file1", "file2"]
result, args = process_args(args)
self.assertEqual(args, ["file1", "file2"])
self.assertEqual(result, [])
def test_process_args_http(self):
args = ["dci-pipeline", "stage:key=http://lwn.net/"]
result, args = process_args(args)
self.assertEqual(args, [])
self.assertEqual(result, [{"stage": {"key": "http://lwn.net/"}}])
def test_process_args_https(self):
args = ["dci-pipeline", "stage:key=https://lwn.net/"]
result, args = process_args(args)
self.assertEqual(args, [])
self.assertEqual(result, [{"stage": {"key": "https://lwn.net/"}}])
def test_overload_dicts_add(self):
stage = {"first": "value"}
overload = {"key": ["value=toto", "value2"]}
self.assertEqual(
overload_dicts(overload, stage),
{"first": "value", "key": ["value=toto", "value2"]},
)
def test_overload_dicts_replace_list(self):
overload = {"components": ["ocp=12", "ose-tests"]}
stage = {"components": ["ocp", "cnf-tests"], "topic": "OCP-4.4"}
self.assertEqual(
overload_dicts(overload, stage),
{"components": ["ocp=12", "cnf-tests", "ose-tests"], "topic": "OCP-4.4"},
)
def test_overload_dicts_add_dict(self):
overload = {"ansible_extravars": {"dci_comment": "universal answer"}}
stage = {"ansible_extravars": {"answer": 42}}
self.assertEqual(
overload_dicts(overload, stage),
{"ansible_extravars": {"answer": 42, "dci_comment": "universal answer"}},
)
def test_overload_dicts_add_list_in_dict(self):
overload = {"ansible_extravars": {"dci_comment": "universal answer"}}
stage = {"ansible_extravars": {"answer": 42}}
self.assertEqual(
overload_dicts(overload, stage),
{"ansible_extravars": {"answer": 42, "dci_comment": "universal answer"}},
)
def test_prev_stages(self):
stage1 = {"name": "1", "type": "ocp"}
stage2 = {
"name": "2",
"type": "ocp-upgrade",
"prev_stages": ["ocp-upgrade", "ocp"],
}
stage3 = {
"name": "3",
"type": "ocp-upgrade2",
"prev_stages": ["ocp-upgrade", "ocp"],
}
stage4 = {"name": "4", "type": "cnf2"}
pipeline = [stage1, stage2, stage3, stage4]
prev_stages = get_prev_stages(stage3, pipeline)
self.assertEqual(prev_stages, [stage2, stage1])
@mock.patch("dcipipeline.main.tempfile.mkdtemp")
def test_pre_process_stage(self, m):
stage = {"ansible_envvars": {"envvar": "/@tmpdir"}}
m.return_value = "/tmp/tmppath"
stage_metas, stage = pre_process_stage(stage)
self.assertEqual(stage_metas["tmpdirs"][0]["path"], "/tmp/tmppath")
@mock.patch("dcipipeline.main.shutil.rmtree")
@mock.patch("dcipipeline.main.upload_junit_files_from_dir")
def test_post_process_stage(self, m_upload_junit, m_rmtree):
metas = {
"tmpdirs": [{"name": "JUNIT_OUTPUT_DIR", "path": "/tmp/junit_tmppath"}]
}
post_process_stage("context", "stage", metas)
m_upload_junit.assert_called_with("context", "stage", "/tmp/junit_tmppath")
m_rmtree.assert_called_with("/tmp/junit_tmppath")
m_upload_junit.reset_mock()
m_rmtree.reset_mock()
metas = {"tmpdirs": [{"name": "envvar1", "path": "/tmp/tmppath"}]}
post_process_stage("context", "stage", metas)
self.assertTrue(not m_upload_junit.called)
m_rmtree.assert_called_with("/tmp/tmppath")
@mock.patch("dcipipeline.main.dci_file.create")
def test_upload_junit_files_from_dir(self, m):
try:
os.makedirs("/tmp/junit-tmppath")
except Exception:
pass
open("/tmp/junit-tmppath/junit-tests.xml", "a+").close()
stage = {"job_info": {"job": {"id": "1"}}}
upload_junit_files_from_dir("context", stage, "/tmp/junit-tmppath")
m.assert_called_with(
"context",
"junit-tests",
file_path="/tmp/junit-tmppath/junit-tests.xml",
mime="application/junit",
job_id="1",
)
if __name__ == "__main__":
unittest.main()
# test_main.py ends here | 0.409339 | 0.327295 |
import math
import time
import numpy as np
import taichi as ti
ti.init(arch=ti.gpu)
res = 1280, 720
color_buffer = ti.Vector.field(3, dtype=ti.f32, shape=res)
max_ray_depth = 15
eps = 1e-4
inf = 8.2e0 # Scatter Radius
fov = 0.7 # field of view
dist_limit = 100
camera_pos = ti.Vector([0.00, 0.00, 5.0]) # [x, y, zoom]
light_pos = [0.0, 0.0, 0.0] # [x, y, zoom]
light_normal = [0.00, 0.00, -0.3]
light_radius = 0.4
@ti.func
def intersect_light(pos, d):
light_loc = ti.Vector(light_pos)
dot = -d.dot(ti.Vector(light_normal))
dist = d.dot(light_loc - pos)
dist_to_light = inf
if dot > 0 and dist > 0:
D = dist / dot
dist_to_center = (light_loc - (pos + D * d)).norm_sqr()
if dist_to_center < light_radius ** 2:
dist_to_light = D ** 2
return dist_to_light
@ti.func
def out_dir(n):
u = ti.Vector([0.5, 1.0, 0.5])
if abs(n[1]) < 1 - eps:
u = n.cross(ti.Vector([0.5, 1.0, 0.5])).normalized()
v = n.cross(u)
phi = 2 * math.pi * ti.random()
ay = ti.sqrt(ti.random())
ax = ti.sqrt(1 - ay ** 2) # distribution
return (
ax * (ti.cos(phi) * u + ti.sin(phi) * v) + ay * n
) # Electron Cloud Orientation and Positioning
# https://www.iquilezles.org/www/articles/distfunctions/distfunctions.htm
@ti.func
def sdf(o):
wall = min(o[1] + 10 * fov, o[2] + 3 * fov) ## Cloud
# Let's draw a Boron
# Neutrons and protons:
# Protons
proton = (o - ti.Vector([0.09, 0.05, 0.03])).norm() - 0.05
proton_2 = (o - ti.Vector([0.01, 0.02, 0.05])).norm() - 0.05
proton_3 = (o - ti.Vector([0.05, -0.07, 0.02])).norm() - 0.05
# Neutrons
neutron = (o - ti.Vector([0.15, 0.05, 0.03])).norm() - 0.065
neutron_2 = (o - ti.Vector([0.01, 0.09, 0.05])).norm() - 0.065
neutron_3 = (o - ti.Vector([0.09, -0.04, 0.09])).norm() - 0.065
# || x - bhat || - c :
# c changes the size of the sphere
# bhat is the center
proton_geometry = max(proton, -(0.52 - (o[1] * 0.6 + o[2] * 0.8)))
proton_2_geometry = max(proton_2, -(0.52 - (o[1] * 0.6 + o[2] * 0.8)))
proton_3_geometry = max(proton_3, -(0.52 - (o[1] * 0.6 + o[2] * 0.8)))
neutron_geometry = max(neutron, -(0.52 - (o[1] * 0.6 + o[2] * 0.8)))
neutron_2_geometry = max(neutron_2, -(0.52 - (o[1] * 0.2 + o[2] * 0.8)))
neutron_3_geometry = max(neutron_3, -(0.52 - (o[1] * 0.6 + o[2] * 0.8)))
return min(
wall,
proton_geometry,
proton_2_geometry,
proton_3_geometry,
neutron_geometry,
neutron_2_geometry,
neutron_3_geometry,
)
@ti.func
def ray_march(p, d):
j = 0 # steps, should march 20 times or more
dist = 0.0
while j < 25 and sdf(p + dist * d) > 1e-6 and dist < inf:
dist += sdf(p + dist * d)
j += 1
return min(inf, dist)
@ti.func
def sdf_normal(p):
d = 1e-4
n = ti.Vector([0.0, 1.0, 0.0])
sdf_center = sdf(p)
for i in ti.static(range(3)):
inc = p
inc[i] += d
n[i] = (1 / d) * (sdf(inc))
return n.normalized()
@ti.func
def next_hit(pos, d):
closest, normal, c = inf, ti.Vector.zero(ti.f32, 3), ti.Vector.zero(ti.f32, 3)
ray_march_dist = ray_march(pos, d)
if ray_march_dist < dist_limit and ray_march_dist < closest:
closest = ray_march_dist
normal = sdf_normal(pos + d * closest)
hit_pos = pos + d * closest
t = hit_pos.norm()
c = ti.Vector([0.2 * t, 0.2 * t, 0.2 * t]) # color
return closest, normal, c
@ti.kernel
def render(time: float, total: int):
for u, v in color_buffer:
aspect_ratio = res[0] / res[1]
pos = camera_pos
d = ti.Vector(
[
(2 * fov * (u + ti.random()) / res[1] - fov * aspect_ratio - 1e-5),
2 * fov * (v + ti.random()) / res[1] - fov - 1e-5,
-1.0,
]
)
d = d.normalized()
throughput = ti.Vector([1.0, 1.0, 1.0]) # color tone changes
depth = 0
hit_light = 0.0
while depth < max_ray_depth:
closest, normal, c = next_hit(pos, d)
depth += 1
dist_to_light = intersect_light(pos, d)
if dist_to_light < closest:
hit_light = 1
depth = max_ray_depth
else:
hit_pos = pos + closest * d
if normal.norm_sqr() != 0:
d = out_dir(normal)
pos = hit_pos + 1e-4 * d
throughput *= c
else:
depth = max_ray_depth
color_buffer[u, v] += throughput * hit_light
gui = ti.GUI("A Tiny World: Atom", res)
video_manager = ti.VideoManager(output_dir="./", framerate=24, automatic_build=False)
last_t = 0
for i in range(2000):
render(float(i), 2000)
interval = 10
if i % interval == 0 and i > 0:
print("{:.2f} samples/s".format(interval / (time.time() - last_t)))
last_t = time.time()
img = color_buffer.to_numpy() * (1 / (i + 1))
img = img / img.mean() * 0.24 # Normalize
gui.set_image(np.sqrt(img)) # color smoothing
video_manager.write_frame(np.sqrt(img))
gui.show()
video_manager.make_video(gif=True, mp4=True) | src/prototype/atom.py | import math
import time
import numpy as np
import taichi as ti
ti.init(arch=ti.gpu)
res = 1280, 720
color_buffer = ti.Vector.field(3, dtype=ti.f32, shape=res)
max_ray_depth = 15
eps = 1e-4
inf = 8.2e0 # Scatter Radius
fov = 0.7 # field of view
dist_limit = 100
camera_pos = ti.Vector([0.00, 0.00, 5.0]) # [x, y, zoom]
light_pos = [0.0, 0.0, 0.0] # [x, y, zoom]
light_normal = [0.00, 0.00, -0.3]
light_radius = 0.4
@ti.func
def intersect_light(pos, d):
light_loc = ti.Vector(light_pos)
dot = -d.dot(ti.Vector(light_normal))
dist = d.dot(light_loc - pos)
dist_to_light = inf
if dot > 0 and dist > 0:
D = dist / dot
dist_to_center = (light_loc - (pos + D * d)).norm_sqr()
if dist_to_center < light_radius ** 2:
dist_to_light = D ** 2
return dist_to_light
@ti.func
def out_dir(n):
u = ti.Vector([0.5, 1.0, 0.5])
if abs(n[1]) < 1 - eps:
u = n.cross(ti.Vector([0.5, 1.0, 0.5])).normalized()
v = n.cross(u)
phi = 2 * math.pi * ti.random()
ay = ti.sqrt(ti.random())
ax = ti.sqrt(1 - ay ** 2) # distribution
return (
ax * (ti.cos(phi) * u + ti.sin(phi) * v) + ay * n
) # Electron Cloud Orientation and Positioning
# https://www.iquilezles.org/www/articles/distfunctions/distfunctions.htm
@ti.func
def sdf(o):
wall = min(o[1] + 10 * fov, o[2] + 3 * fov) ## Cloud
# Let's draw a Boron
# Neutrons and protons:
# Protons
proton = (o - ti.Vector([0.09, 0.05, 0.03])).norm() - 0.05
proton_2 = (o - ti.Vector([0.01, 0.02, 0.05])).norm() - 0.05
proton_3 = (o - ti.Vector([0.05, -0.07, 0.02])).norm() - 0.05
# Neutrons
neutron = (o - ti.Vector([0.15, 0.05, 0.03])).norm() - 0.065
neutron_2 = (o - ti.Vector([0.01, 0.09, 0.05])).norm() - 0.065
neutron_3 = (o - ti.Vector([0.09, -0.04, 0.09])).norm() - 0.065
# || x - bhat || - c :
# c changes the size of the sphere
# bhat is the center
proton_geometry = max(proton, -(0.52 - (o[1] * 0.6 + o[2] * 0.8)))
proton_2_geometry = max(proton_2, -(0.52 - (o[1] * 0.6 + o[2] * 0.8)))
proton_3_geometry = max(proton_3, -(0.52 - (o[1] * 0.6 + o[2] * 0.8)))
neutron_geometry = max(neutron, -(0.52 - (o[1] * 0.6 + o[2] * 0.8)))
neutron_2_geometry = max(neutron_2, -(0.52 - (o[1] * 0.2 + o[2] * 0.8)))
neutron_3_geometry = max(neutron_3, -(0.52 - (o[1] * 0.6 + o[2] * 0.8)))
return min(
wall,
proton_geometry,
proton_2_geometry,
proton_3_geometry,
neutron_geometry,
neutron_2_geometry,
neutron_3_geometry,
)
@ti.func
def ray_march(p, d):
j = 0 # steps, should march 20 times or more
dist = 0.0
while j < 25 and sdf(p + dist * d) > 1e-6 and dist < inf:
dist += sdf(p + dist * d)
j += 1
return min(inf, dist)
@ti.func
def sdf_normal(p):
d = 1e-4
n = ti.Vector([0.0, 1.0, 0.0])
sdf_center = sdf(p)
for i in ti.static(range(3)):
inc = p
inc[i] += d
n[i] = (1 / d) * (sdf(inc))
return n.normalized()
@ti.func
def next_hit(pos, d):
closest, normal, c = inf, ti.Vector.zero(ti.f32, 3), ti.Vector.zero(ti.f32, 3)
ray_march_dist = ray_march(pos, d)
if ray_march_dist < dist_limit and ray_march_dist < closest:
closest = ray_march_dist
normal = sdf_normal(pos + d * closest)
hit_pos = pos + d * closest
t = hit_pos.norm()
c = ti.Vector([0.2 * t, 0.2 * t, 0.2 * t]) # color
return closest, normal, c
@ti.kernel
def render(time: float, total: int):
for u, v in color_buffer:
aspect_ratio = res[0] / res[1]
pos = camera_pos
d = ti.Vector(
[
(2 * fov * (u + ti.random()) / res[1] - fov * aspect_ratio - 1e-5),
2 * fov * (v + ti.random()) / res[1] - fov - 1e-5,
-1.0,
]
)
d = d.normalized()
throughput = ti.Vector([1.0, 1.0, 1.0]) # color tone changes
depth = 0
hit_light = 0.0
while depth < max_ray_depth:
closest, normal, c = next_hit(pos, d)
depth += 1
dist_to_light = intersect_light(pos, d)
if dist_to_light < closest:
hit_light = 1
depth = max_ray_depth
else:
hit_pos = pos + closest * d
if normal.norm_sqr() != 0:
d = out_dir(normal)
pos = hit_pos + 1e-4 * d
throughput *= c
else:
depth = max_ray_depth
color_buffer[u, v] += throughput * hit_light
gui = ti.GUI("A Tiny World: Atom", res)
video_manager = ti.VideoManager(output_dir="./", framerate=24, automatic_build=False)
last_t = 0
for i in range(2000):
render(float(i), 2000)
interval = 10
if i % interval == 0 and i > 0:
print("{:.2f} samples/s".format(interval / (time.time() - last_t)))
last_t = time.time()
img = color_buffer.to_numpy() * (1 / (i + 1))
img = img / img.mean() * 0.24 # Normalize
gui.set_image(np.sqrt(img)) # color smoothing
video_manager.write_frame(np.sqrt(img))
gui.show()
video_manager.make_video(gif=True, mp4=True) | 0.579162 | 0.51818 |
import collections
import gym
import numpy as np
from gym import spaces
__all__ = [
'FlatBoxView',
'FlattenObservations',
'BufferObservations',
'SemiSupervisedFiniteReward',
]
def _flatten_space(space):
"""Flaten a space to a 1D box.
Args:
space: A `gym.Space` instance.
Returns:
low: Lower bounds on the box. A 1D `numpy.ndarray`.
high: Upper bounds on the box. A 1D `numpy.ndarray`.
transformer: A function mapping samples from `space` to samples from
`flat_space`.
"""
if isinstance(space, spaces.Box):
# Flatten to a 1D box
low = space.low.flatten()
high = space.high.flatten()
def transformer(x):
return np.asarray(x).flatten()
elif isinstance(space, spaces.Discrete):
# Convert to 1-hot vectors
n = space.n
low = np.zeros((n, ))
high = np.ones((n, ))
ident = np.eye(n)
def transformer(x):
return ident[x]
elif isinstance(space, spaces.Tuple):
if not space.spaces: # Empty tuple space
low = np.empty((0, ))
high = np.empty((0, ))
def transformer(x):
del x
return np.empty((0, ))
return low, high, transformer
# Append dimensions
lows, highs, transformers = zip(*(_flatten_space(subspace)
for subspace in space.spaces))
low = np.concatenate(lows)
high = np.concatenate(highs)
def transformer(x):
return np.concatenate(
[sub_t(sub_x) for sub_t, sub_x in zip(transformers, x)])
else:
raise ValueError('Flattening not supported for space {}'.format(space))
return low, high, transformer
class FlatBoxView(spaces.Box):
""""Wraps a space to appear as a 1D box space."""
def __init__(self, space):
self.original_space = space
low, high, self._transformer = _flatten_space(space)
super().__init__(low=low, high=high)
def sample(self):
return self.convert(self.original_space.sample())
def convert(self, x):
"""Convert a sample from original space to a sample from this space.
Args:
x: A sample from `self.original_space`.
Returns:
The associated sample from this space.
"""
return self._transformer(x)
class FlattenObservations(gym.ObservationWrapper):
"""Wrapper that flattens the observation space into a 1D Box space."""
def __init__(self, env):
super().__init__(env)
self.observation_space = FlatBoxView(self.env.observation_space)
def _observation(self, observation):
return self.observation_space.convert(observation)
def _ensure_no_double_wrap(self):
# I'll wrap as many times as I please, you're not the boss of me.
pass
class BufferObservations(gym.ObservationWrapper):
"""Environment wrapper, observations are a buffer of recent observations.
Wrapped environment observation space must be a `Box` space. Prepends
the history buffer dimension to the space.
"""
def __init__(self, env, buffer_size):
"""Initialize BufferObservations.
Args:
env: Environment to wrap.
buffer_size: Number of `env` observations to include in the buffer.
"""
super().__init__(env)
original_observation_space = env.observation_space
try:
low = original_observation_space.low
high = original_observation_space.high
except AttributeError as e:
raise ValueError('Environment observation space must be '
'an instance of gym.spaces.Box') from e
# Prepend a new 'history' dimension
reps = (buffer_size, ) + (1, ) * low.ndim
low = np.tile(low, reps)
high = np.tile(high, reps)
self.observation_space = spaces.Box(low=low, high=high)
self.buffer_size = buffer_size
self._observation_buffer = collections.deque([], maxlen=buffer_size)
def _reset(self):
observation = self.env.reset()
self._observation_buffer.clear()
for i in range(self.buffer_size):
self._observation_buffer.append(observation)
return self._get_observation()
def _step(self, action):
observation, reward, done, info = self.env.step(action)
self._observation_buffer.append(observation)
return self._get_observation(), reward, done, info
def _get_observation(self):
try:
return np.stack(self._observation_buffer)
except ValueError:
# np.stack fails if the buffer has size 0
if self.buffer_size == 0:
return self.observation_space.sample()
raise
class SemiSupervisedFiniteReward(gym.Wrapper):
"""Convert an environment into a finite-reward semi-supervised RL env.
At most `max_rewards` nonzero reward observations are given. This may
either be the all reward observations until the limit, or reward may be
specifically requested. All other reward observations are zero.
The reward limit persists through resets.
"""
def __init__(self,
env,
max_rewards,
reward_indicator_observation=False,
reward_on_request=False):
"""Initialize SemiSupervisedFixedFiniteReward environment.
Args:
env: Environment to wrap.
max_rewards: Maximum allowed reward observations.
reward_indicator_observation: Augment observation space with a bit
that indicates whether the reward is the true reward (1) or
constant zero (0).
reward_on_request: If `True`, augment the action space with a bit
that requests the true reward for the current step.
"""
super().__init__(env)
self.rewards_given = 0
self.max_rewards = max_rewards
self.reward_indicator_observation = reward_indicator_observation
self.reward_on_request = reward_on_request
if reward_indicator_observation:
self.observation_space = spaces.Tuple((self.observation_space,
spaces.Discrete(2)))
if reward_on_request:
self.action_space = spaces.Tuple((self.action_space,
spaces.Discrete(2)))
# Ensure 0 is contained in the reward range.
self.reward_range = (min(self.reward_range[0], 0), max(
self.reward_range[1], 0))
def _step(self, action):
can_give_reward = self.rewards_given < self.max_rewards
if self.reward_on_request:
action, requesting_reward = action
give_reward = can_give_reward and requesting_reward
else:
give_reward = can_give_reward
observation, reward, done, info = self.env.step(action)
info['true_reward'] = reward
info['is_true_reward'] = give_reward
if give_reward:
self.rewards_given += 1
else:
reward = 0
if self.reward_indicator_observation:
observation = (observation, give_reward)
return observation, reward, done, info | src/gym_utils/env_wrappers.py | import collections
import gym
import numpy as np
from gym import spaces
__all__ = [
'FlatBoxView',
'FlattenObservations',
'BufferObservations',
'SemiSupervisedFiniteReward',
]
def _flatten_space(space):
"""Flaten a space to a 1D box.
Args:
space: A `gym.Space` instance.
Returns:
low: Lower bounds on the box. A 1D `numpy.ndarray`.
high: Upper bounds on the box. A 1D `numpy.ndarray`.
transformer: A function mapping samples from `space` to samples from
`flat_space`.
"""
if isinstance(space, spaces.Box):
# Flatten to a 1D box
low = space.low.flatten()
high = space.high.flatten()
def transformer(x):
return np.asarray(x).flatten()
elif isinstance(space, spaces.Discrete):
# Convert to 1-hot vectors
n = space.n
low = np.zeros((n, ))
high = np.ones((n, ))
ident = np.eye(n)
def transformer(x):
return ident[x]
elif isinstance(space, spaces.Tuple):
if not space.spaces: # Empty tuple space
low = np.empty((0, ))
high = np.empty((0, ))
def transformer(x):
del x
return np.empty((0, ))
return low, high, transformer
# Append dimensions
lows, highs, transformers = zip(*(_flatten_space(subspace)
for subspace in space.spaces))
low = np.concatenate(lows)
high = np.concatenate(highs)
def transformer(x):
return np.concatenate(
[sub_t(sub_x) for sub_t, sub_x in zip(transformers, x)])
else:
raise ValueError('Flattening not supported for space {}'.format(space))
return low, high, transformer
class FlatBoxView(spaces.Box):
""""Wraps a space to appear as a 1D box space."""
def __init__(self, space):
self.original_space = space
low, high, self._transformer = _flatten_space(space)
super().__init__(low=low, high=high)
def sample(self):
return self.convert(self.original_space.sample())
def convert(self, x):
"""Convert a sample from original space to a sample from this space.
Args:
x: A sample from `self.original_space`.
Returns:
The associated sample from this space.
"""
return self._transformer(x)
class FlattenObservations(gym.ObservationWrapper):
"""Wrapper that flattens the observation space into a 1D Box space."""
def __init__(self, env):
super().__init__(env)
self.observation_space = FlatBoxView(self.env.observation_space)
def _observation(self, observation):
return self.observation_space.convert(observation)
def _ensure_no_double_wrap(self):
# I'll wrap as many times as I please, you're not the boss of me.
pass
class BufferObservations(gym.ObservationWrapper):
"""Environment wrapper, observations are a buffer of recent observations.
Wrapped environment observation space must be a `Box` space. Prepends
the history buffer dimension to the space.
"""
def __init__(self, env, buffer_size):
"""Initialize BufferObservations.
Args:
env: Environment to wrap.
buffer_size: Number of `env` observations to include in the buffer.
"""
super().__init__(env)
original_observation_space = env.observation_space
try:
low = original_observation_space.low
high = original_observation_space.high
except AttributeError as e:
raise ValueError('Environment observation space must be '
'an instance of gym.spaces.Box') from e
# Prepend a new 'history' dimension
reps = (buffer_size, ) + (1, ) * low.ndim
low = np.tile(low, reps)
high = np.tile(high, reps)
self.observation_space = spaces.Box(low=low, high=high)
self.buffer_size = buffer_size
self._observation_buffer = collections.deque([], maxlen=buffer_size)
def _reset(self):
observation = self.env.reset()
self._observation_buffer.clear()
for i in range(self.buffer_size):
self._observation_buffer.append(observation)
return self._get_observation()
def _step(self, action):
observation, reward, done, info = self.env.step(action)
self._observation_buffer.append(observation)
return self._get_observation(), reward, done, info
def _get_observation(self):
try:
return np.stack(self._observation_buffer)
except ValueError:
# np.stack fails if the buffer has size 0
if self.buffer_size == 0:
return self.observation_space.sample()
raise
class SemiSupervisedFiniteReward(gym.Wrapper):
"""Convert an environment into a finite-reward semi-supervised RL env.
At most `max_rewards` nonzero reward observations are given. This may
either be the all reward observations until the limit, or reward may be
specifically requested. All other reward observations are zero.
The reward limit persists through resets.
"""
def __init__(self,
env,
max_rewards,
reward_indicator_observation=False,
reward_on_request=False):
"""Initialize SemiSupervisedFixedFiniteReward environment.
Args:
env: Environment to wrap.
max_rewards: Maximum allowed reward observations.
reward_indicator_observation: Augment observation space with a bit
that indicates whether the reward is the true reward (1) or
constant zero (0).
reward_on_request: If `True`, augment the action space with a bit
that requests the true reward for the current step.
"""
super().__init__(env)
self.rewards_given = 0
self.max_rewards = max_rewards
self.reward_indicator_observation = reward_indicator_observation
self.reward_on_request = reward_on_request
if reward_indicator_observation:
self.observation_space = spaces.Tuple((self.observation_space,
spaces.Discrete(2)))
if reward_on_request:
self.action_space = spaces.Tuple((self.action_space,
spaces.Discrete(2)))
# Ensure 0 is contained in the reward range.
self.reward_range = (min(self.reward_range[0], 0), max(
self.reward_range[1], 0))
def _step(self, action):
can_give_reward = self.rewards_given < self.max_rewards
if self.reward_on_request:
action, requesting_reward = action
give_reward = can_give_reward and requesting_reward
else:
give_reward = can_give_reward
observation, reward, done, info = self.env.step(action)
info['true_reward'] = reward
info['is_true_reward'] = give_reward
if give_reward:
self.rewards_given += 1
else:
reward = 0
if self.reward_indicator_observation:
observation = (observation, give_reward)
return observation, reward, done, info | 0.912859 | 0.603581 |
import traceback
import uuid
import humanfriendly
from flask import Flask, request, render_template, abort, send_from_directory
from functools import wraps, update_wrapper
from datetime import datetime
from flask import make_response
from panoptes.database import init_db, db_session
from panoptes.models import Workflows
from panoptes.routes import *
from panoptes.schema_forms import SnakemakeUpdateForm
from panoptes.server_utilities.db_queries import maintain_jobs
app = Flask(__name__, template_folder="static/src/")
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.register_blueprint(routes)
app.jinja_env.globals.update(get_jobs=get_jobs)
app.jinja_env.globals.update(get_job=get_job)
init_db()
# you can add @nocache in any endpoint to disable caching
def nocache(view):
@wraps(view)
def no_cache(*args, **kwargs):
response = make_response(view(*args, **kwargs))
response.headers['Last-Modified'] = datetime.now()
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = '-1'
return response
return update_wrapper(no_cache, view)
@app.route('/')
def index():
wf = [w.get_workflow() for w in get_db_workflows()]
info = {
'workflows': len(wf),
'completed': sum([1 if w['status'] == 'Done' else 0 for w in wf]),
'jobs_done': sum([w['jobs_done'] if w['jobs_done'] else 0 for w in wf]),
'jobs_total': sum([w['jobs_total'] if w['jobs_total'] else 0 for w in wf]),
}
return render_template("index.html", info=info)
@app.route('/workflows/')
@nocache
def workflows_page():
workflows = [w.get_workflow() for w in get_db_workflows()]
return render_template('workflows.html', workflows=workflows)
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/contribute')
def contribute():
return render_template('contribute.html')
@app.route('/searchResults')
@nocache
def search_results():
userinput = request.args.get('q')
workflows = [w.get_workflow() for w in get_db_workflows()]
filteredworkflows = [w for w in workflows if userinput in w['name']]
alljobs = []
for wf in workflows:
jobs = [j.get_job_json() for j in get_db_jobs(wf['id'])]
filteredjobs = [j for j in jobs if userinput in j['name']]
alljobs.extend(filteredjobs)
return render_template('searchResults.html', workflows=filteredworkflows, alljobs=alljobs)
@app.route('/workflow/<id>', methods=['GET'])
@nocache
def get_status(id):
try:
workflow = get_db_workflows_by_id(id).get_workflow()
if workflow:
return render_template('workflow.html', workflow=workflow)
else:
return render_template('404.html')
except:
traceback.print_exc()
return render_template('404.html')
@app.route('/workflow/<wf_id>/job/<job_id>', methods=['GET'])
def get_job_status(wf_id, job_id):
return render_template('job.html', job=get_job(wf_id, job_id))
@app.route('/create_workflow', methods=['GET'])
def create_workflow():
try:
w = Workflows(str(uuid.uuid4()), "Running")
db_session.add(w)
db_session.commit()
return w.get_workflow()
except:
traceback.print_exc()
return render_template('404.html')
@app.route('/update_workflow_status', methods=['POST'])
def update_status():
update_form = SnakemakeUpdateForm()
errors = update_form.validate(request.form)
if errors:
abort(404, str(errors))
else:
r = update_form.load(request.form)
# now all required fields exist and are the right type
maintain_jobs(msg=r["msg"], wf_id=r["id"])
return "ok"
@app.route('/vendor/<path:path>')
def send_vendor(path):
return send_from_directory('static/vendor', path)
@app.route('/node_modules/<path:path>')
def send_node_modules_charts(path):
return send_from_directory('static/node_modules', path)
@app.route('/<path:path>')
def send_js(path):
return send_from_directory('static/src', path)
@app.template_filter('formatdatetime')
def format_datetime(value, format="%d %b %Y %I:%M %p"):
"""Format a date time to (Default): d Mon YYYY HH:MM P"""
if value is None:
return ""
return value.strftime(format)\
@app.template_filter('formatdelta')
def format_delta(value):
"""Format a date time to (Default): d Mon YYYY HH:MM P"""
if value is None:
return ""
return humanfriendly.format_timespan(value)
@app.errorhandler(Exception)
def handle_bad_request(e):
return render_template('404.html')
if __name__ == '__main__':
app.run() | panoptes/app.py | import traceback
import uuid
import humanfriendly
from flask import Flask, request, render_template, abort, send_from_directory
from functools import wraps, update_wrapper
from datetime import datetime
from flask import make_response
from panoptes.database import init_db, db_session
from panoptes.models import Workflows
from panoptes.routes import *
from panoptes.schema_forms import SnakemakeUpdateForm
from panoptes.server_utilities.db_queries import maintain_jobs
app = Flask(__name__, template_folder="static/src/")
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.register_blueprint(routes)
app.jinja_env.globals.update(get_jobs=get_jobs)
app.jinja_env.globals.update(get_job=get_job)
init_db()
# you can add @nocache in any endpoint to disable caching
def nocache(view):
@wraps(view)
def no_cache(*args, **kwargs):
response = make_response(view(*args, **kwargs))
response.headers['Last-Modified'] = datetime.now()
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = '-1'
return response
return update_wrapper(no_cache, view)
@app.route('/')
def index():
wf = [w.get_workflow() for w in get_db_workflows()]
info = {
'workflows': len(wf),
'completed': sum([1 if w['status'] == 'Done' else 0 for w in wf]),
'jobs_done': sum([w['jobs_done'] if w['jobs_done'] else 0 for w in wf]),
'jobs_total': sum([w['jobs_total'] if w['jobs_total'] else 0 for w in wf]),
}
return render_template("index.html", info=info)
@app.route('/workflows/')
@nocache
def workflows_page():
workflows = [w.get_workflow() for w in get_db_workflows()]
return render_template('workflows.html', workflows=workflows)
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/contribute')
def contribute():
return render_template('contribute.html')
@app.route('/searchResults')
@nocache
def search_results():
userinput = request.args.get('q')
workflows = [w.get_workflow() for w in get_db_workflows()]
filteredworkflows = [w for w in workflows if userinput in w['name']]
alljobs = []
for wf in workflows:
jobs = [j.get_job_json() for j in get_db_jobs(wf['id'])]
filteredjobs = [j for j in jobs if userinput in j['name']]
alljobs.extend(filteredjobs)
return render_template('searchResults.html', workflows=filteredworkflows, alljobs=alljobs)
@app.route('/workflow/<id>', methods=['GET'])
@nocache
def get_status(id):
try:
workflow = get_db_workflows_by_id(id).get_workflow()
if workflow:
return render_template('workflow.html', workflow=workflow)
else:
return render_template('404.html')
except:
traceback.print_exc()
return render_template('404.html')
@app.route('/workflow/<wf_id>/job/<job_id>', methods=['GET'])
def get_job_status(wf_id, job_id):
return render_template('job.html', job=get_job(wf_id, job_id))
@app.route('/create_workflow', methods=['GET'])
def create_workflow():
try:
w = Workflows(str(uuid.uuid4()), "Running")
db_session.add(w)
db_session.commit()
return w.get_workflow()
except:
traceback.print_exc()
return render_template('404.html')
@app.route('/update_workflow_status', methods=['POST'])
def update_status():
update_form = SnakemakeUpdateForm()
errors = update_form.validate(request.form)
if errors:
abort(404, str(errors))
else:
r = update_form.load(request.form)
# now all required fields exist and are the right type
maintain_jobs(msg=r["msg"], wf_id=r["id"])
return "ok"
@app.route('/vendor/<path:path>')
def send_vendor(path):
return send_from_directory('static/vendor', path)
@app.route('/node_modules/<path:path>')
def send_node_modules_charts(path):
return send_from_directory('static/node_modules', path)
@app.route('/<path:path>')
def send_js(path):
return send_from_directory('static/src', path)
@app.template_filter('formatdatetime')
def format_datetime(value, format="%d %b %Y %I:%M %p"):
"""Format a date time to (Default): d Mon YYYY HH:MM P"""
if value is None:
return ""
return value.strftime(format)\
@app.template_filter('formatdelta')
def format_delta(value):
"""Format a date time to (Default): d Mon YYYY HH:MM P"""
if value is None:
return ""
return humanfriendly.format_timespan(value)
@app.errorhandler(Exception)
def handle_bad_request(e):
return render_template('404.html')
if __name__ == '__main__':
app.run() | 0.393735 | 0.050988 |
import itertools
import numpy as np
import pytest
import qutip
from qutip.core import data as _data
def expected(qobj, sel):
if qobj.isbra or qobj.isket:
qobj = qobj.proj()
sel = sorted(sel)
dims = [[x for i, x in enumerate(qobj.dims[0]) if i in sel]]*2
new_shape = (np.prod(dims[0]),) * 2
out = qobj.full()
before, after = 1, qobj.shape[0]
for i, dim in enumerate(qobj.dims[0]):
after //= dim
if i in sel:
before = before * dim
continue
tmp_dims = (before, dim, after) * 2
out = np.einsum('aibcid->abcd', out.reshape(tmp_dims))
return qutip.Qobj(out.reshape(new_shape), dims=dims)
@pytest.fixture(params=[_data.CSR, _data.Dense], ids=['CSR', 'Dense'])
def dtype(request):
return request.param
@pytest.fixture(params=[True, False], ids=['dm', 'ket'])
def dm(request):
return request.param
@pytest.fixture
def state(dtype, dm):
dims = [2, 3, 4]
state = qutip.rand_ket(np.prod(dims), dims=[dims, [1]*len(dims)])
if dm:
state = state.proj()
return state.to(dtype)
def test_ptrace_noncompound_rand(dtype, dm):
"""Test `A.ptrace(0) == A` when `A` is in a non-tensored Hilbert space."""
for _ in range(5):
state = qutip.rand_ket(5)
if dm:
state = state.proj()
state = state.to(dtype)
assert state.ptrace(0) == (state if dm else state.proj())
@pytest.mark.parametrize('pair', list(itertools.combinations(range(3), 2)))
def test_ptrace_unsorted_selection_subset(state, pair):
"""
Regression test for gh-1325. ptrace should work the same independently of
the order of the input; no transposition in done in the trace operation.
"""
# pair is always sorted.
state_ordered = state.ptrace(pair)
state_reversed = state.ptrace(pair[::-1])
assert state_ordered.dims == state_reversed.dims
assert state_ordered == state_reversed
@pytest.mark.parametrize('permutation', list(itertools.permutations(range(3))))
def test_ptrace_unsorted_selection_all(state, permutation):
state_ptraced = state.ptrace(permutation)
if state.isket:
state = state.proj()
assert state.dims == state_ptraced.dims
assert state == state_ptraced
@pytest.mark.parametrize(['selection', 'exception'], [
pytest.param(4, IndexError, id='too big'),
pytest.param(-1, IndexError, id='too small'),
pytest.param([0, 0], ValueError, id='duplicate'),
# 'too many' may throw either from duplication or invalid index.
pytest.param([0, 1, 2, 3], Exception, id='too many'),
])
def test_ptrace_fails_on_invalid_input(state, selection, exception):
with pytest.raises(exception):
state.ptrace(selection)
def test_ptrace_rand(dtype):
'ptrace : randomized tests'
for _ in range(5):
A = qutip.tensor(
qutip.rand_ket(5), qutip.rand_ket(2), qutip.rand_ket(3),
).to(dtype)
for sel in ([2, 1], [0, 2], [0, 1]):
assert A.ptrace(sel) == expected(A, sel)
A = qutip.tensor(
qutip.rand_dm(2), qutip.thermal_dm(10, 1), qutip.rand_unitary(3),
).to(dtype)
for sel in ([1, 2], [0, 2], [0, 1]):
assert A.ptrace(sel) == expected(A, sel)
A = qutip.tensor(
qutip.rand_ket(2), qutip.rand_ket(2), qutip.rand_ket(2),
qutip.rand_ket(2), qutip.rand_ket(2), qutip.rand_ket(2),
).to(dtype)
for sel in ([3, 2], [0, 2], [0, 1]):
assert A.ptrace(sel) == expected(A, sel)
A = qutip.rand_dm(64, 0.5, dims=[[4, 4, 4], [4, 4, 4]]).to(dtype)
for sel in ([0], [1], [0, 2]):
assert A.ptrace(sel) == expected(A, sel) | qutip/tests/core/test_ptrace.py |
import itertools
import numpy as np
import pytest
import qutip
from qutip.core import data as _data
def expected(qobj, sel):
if qobj.isbra or qobj.isket:
qobj = qobj.proj()
sel = sorted(sel)
dims = [[x for i, x in enumerate(qobj.dims[0]) if i in sel]]*2
new_shape = (np.prod(dims[0]),) * 2
out = qobj.full()
before, after = 1, qobj.shape[0]
for i, dim in enumerate(qobj.dims[0]):
after //= dim
if i in sel:
before = before * dim
continue
tmp_dims = (before, dim, after) * 2
out = np.einsum('aibcid->abcd', out.reshape(tmp_dims))
return qutip.Qobj(out.reshape(new_shape), dims=dims)
@pytest.fixture(params=[_data.CSR, _data.Dense], ids=['CSR', 'Dense'])
def dtype(request):
return request.param
@pytest.fixture(params=[True, False], ids=['dm', 'ket'])
def dm(request):
return request.param
@pytest.fixture
def state(dtype, dm):
dims = [2, 3, 4]
state = qutip.rand_ket(np.prod(dims), dims=[dims, [1]*len(dims)])
if dm:
state = state.proj()
return state.to(dtype)
def test_ptrace_noncompound_rand(dtype, dm):
"""Test `A.ptrace(0) == A` when `A` is in a non-tensored Hilbert space."""
for _ in range(5):
state = qutip.rand_ket(5)
if dm:
state = state.proj()
state = state.to(dtype)
assert state.ptrace(0) == (state if dm else state.proj())
@pytest.mark.parametrize('pair', list(itertools.combinations(range(3), 2)))
def test_ptrace_unsorted_selection_subset(state, pair):
"""
Regression test for gh-1325. ptrace should work the same independently of
the order of the input; no transposition in done in the trace operation.
"""
# pair is always sorted.
state_ordered = state.ptrace(pair)
state_reversed = state.ptrace(pair[::-1])
assert state_ordered.dims == state_reversed.dims
assert state_ordered == state_reversed
@pytest.mark.parametrize('permutation', list(itertools.permutations(range(3))))
def test_ptrace_unsorted_selection_all(state, permutation):
state_ptraced = state.ptrace(permutation)
if state.isket:
state = state.proj()
assert state.dims == state_ptraced.dims
assert state == state_ptraced
@pytest.mark.parametrize(['selection', 'exception'], [
pytest.param(4, IndexError, id='too big'),
pytest.param(-1, IndexError, id='too small'),
pytest.param([0, 0], ValueError, id='duplicate'),
# 'too many' may throw either from duplication or invalid index.
pytest.param([0, 1, 2, 3], Exception, id='too many'),
])
def test_ptrace_fails_on_invalid_input(state, selection, exception):
with pytest.raises(exception):
state.ptrace(selection)
def test_ptrace_rand(dtype):
'ptrace : randomized tests'
for _ in range(5):
A = qutip.tensor(
qutip.rand_ket(5), qutip.rand_ket(2), qutip.rand_ket(3),
).to(dtype)
for sel in ([2, 1], [0, 2], [0, 1]):
assert A.ptrace(sel) == expected(A, sel)
A = qutip.tensor(
qutip.rand_dm(2), qutip.thermal_dm(10, 1), qutip.rand_unitary(3),
).to(dtype)
for sel in ([1, 2], [0, 2], [0, 1]):
assert A.ptrace(sel) == expected(A, sel)
A = qutip.tensor(
qutip.rand_ket(2), qutip.rand_ket(2), qutip.rand_ket(2),
qutip.rand_ket(2), qutip.rand_ket(2), qutip.rand_ket(2),
).to(dtype)
for sel in ([3, 2], [0, 2], [0, 1]):
assert A.ptrace(sel) == expected(A, sel)
A = qutip.rand_dm(64, 0.5, dims=[[4, 4, 4], [4, 4, 4]]).to(dtype)
for sel in ([0], [1], [0, 2]):
assert A.ptrace(sel) == expected(A, sel) | 0.539226 | 0.672758 |
from django import forms
from django.forms import widgets
from .models import Product, Supplier, ReceiveGood, DeliveryGood
class DeliveryGoodCreateForm(forms.ModelForm):
class Meta:
model = DeliveryGood
fields = "__all__"
exclude = ["created_by", "updated_by"]
widgets = {
"delivery_date": forms.DateInput(attrs={"type": "date"}),
"delivery_comment": forms.Textarea(attrs={"rows": 3}),
}
class DeliveryGoodUpdateForm(forms.ModelForm):
class Meta:
model = DeliveryGood
fields = "__all__"
exclude = ["created_by", "updated_by"]
widgets = {
"delivery_comment": forms.Textarea(attrs={"rows": 3}),
}
class ReceiveGoodCreateForm(forms.ModelForm):
class Meta:
model = ReceiveGood
fields = "__all__"
exclude = ["created_by", "updated_by"]
widgets = {
"received_date": forms.DateInput(attrs={"type": "date"}),
"received_comment": forms.Textarea(attrs={"rows": 3}),
}
class ReceiveGoodUpdateForm(forms.ModelForm):
class Meta:
model = ReceiveGood
fields = "__all__"
exclude = ["created_by", "updated_by"]
widgets = {
"received_comment": forms.Textarea(attrs={"rows": 3}),
}
class SupplierForm(forms.ModelForm):
class Meta:
model = Supplier
fields = "__all__"
exclude = ["created_by", "updated_by"]
class SupplierUpdateForm(forms.ModelForm):
class Meta:
model = Supplier
fields = "__all__"
exclude = ["created_by", "updated_by"]
class ProductForm(forms.ModelForm):
class Meta:
model = Product
fields = "__all__"
exclude = ["created_by", "updated_by"]
widgets = {
"date_received": forms.DateInput(attrs={"type": "date"}),
"description": forms.Textarea(attrs={"rows": 3}),
}
class ProductUpdateForm(forms.ModelForm):
class Meta:
model = Product
fields = "__all__"
exclude = ["created_by", "updated_by"]
widgets = {
"date_received": forms.DateInput(attrs={"type": "date"}),
"description": forms.Textarea(attrs={"rows": 3}),
} | app/stores/forms.py | from django import forms
from django.forms import widgets
from .models import Product, Supplier, ReceiveGood, DeliveryGood
class DeliveryGoodCreateForm(forms.ModelForm):
class Meta:
model = DeliveryGood
fields = "__all__"
exclude = ["created_by", "updated_by"]
widgets = {
"delivery_date": forms.DateInput(attrs={"type": "date"}),
"delivery_comment": forms.Textarea(attrs={"rows": 3}),
}
class DeliveryGoodUpdateForm(forms.ModelForm):
class Meta:
model = DeliveryGood
fields = "__all__"
exclude = ["created_by", "updated_by"]
widgets = {
"delivery_comment": forms.Textarea(attrs={"rows": 3}),
}
class ReceiveGoodCreateForm(forms.ModelForm):
class Meta:
model = ReceiveGood
fields = "__all__"
exclude = ["created_by", "updated_by"]
widgets = {
"received_date": forms.DateInput(attrs={"type": "date"}),
"received_comment": forms.Textarea(attrs={"rows": 3}),
}
class ReceiveGoodUpdateForm(forms.ModelForm):
class Meta:
model = ReceiveGood
fields = "__all__"
exclude = ["created_by", "updated_by"]
widgets = {
"received_comment": forms.Textarea(attrs={"rows": 3}),
}
class SupplierForm(forms.ModelForm):
class Meta:
model = Supplier
fields = "__all__"
exclude = ["created_by", "updated_by"]
class SupplierUpdateForm(forms.ModelForm):
class Meta:
model = Supplier
fields = "__all__"
exclude = ["created_by", "updated_by"]
class ProductForm(forms.ModelForm):
class Meta:
model = Product
fields = "__all__"
exclude = ["created_by", "updated_by"]
widgets = {
"date_received": forms.DateInput(attrs={"type": "date"}),
"description": forms.Textarea(attrs={"rows": 3}),
}
class ProductUpdateForm(forms.ModelForm):
class Meta:
model = Product
fields = "__all__"
exclude = ["created_by", "updated_by"]
widgets = {
"date_received": forms.DateInput(attrs={"type": "date"}),
"description": forms.Textarea(attrs={"rows": 3}),
} | 0.492188 | 0.180269 |
import os
import argparse
from translations import Translations
from framed_image import FramedImage
device_to_frame = {
# All available device_frames can be seen in the device_frames folder
"phone": "pixel2xl",
"sevenInch": "tablet1200x2048",
"tenInch": "tablet1600x2560",
}
def frame_fastlane_screenshots(folder, background, translations):
"""
Frame all png images inside the given folder, including subfolders
:param folder: base folder
:param background: background image to use
:param translations: translations to use
"""
for root, dirs, files in os.walk(folder):
for file in files:
if ".png" in file:
frame = "pixel2xl"
device = os.path.basename(root).replace("Screenshots", "")
if device in device_to_frame:
frame = device_to_frame[device]
FramedImage(background_name=background, frame=frame,
screen_shot=os.path.join(root, file), output_name=os.path.join(root, file)) \
.add_text(translations.get_title(root, file), translations.get_message(root, file),
title_font='fonts/MYRIADPRO-BOLDCOND.otf',
text_font='fonts/OpenSansCondensed-Light.ttf') \
.save()
def main():
parser = argparse.ArgumentParser(description='Frame screenshots with a device frame, background, etc...')
parser.add_argument('folder', nargs="?",
help='specify the base folder where all the screenshots can be found')
parser.add_argument('--background', dest="background", default="default.jpg",
help='background image to use')
parser.add_argument('--translations', dest="translations", default="default.json",
help='translations file to use')
args = parser.parse_args()
translations = Translations(args.translations)
if args.folder:
frame_fastlane_screenshots(args.folder, background=args.background, translations=translations)
else:
parser.parse_args(["-h"])
if __name__ == '__main__':
main() | src/frameit.py | import os
import argparse
from translations import Translations
from framed_image import FramedImage
device_to_frame = {
# All available device_frames can be seen in the device_frames folder
"phone": "pixel2xl",
"sevenInch": "tablet1200x2048",
"tenInch": "tablet1600x2560",
}
def frame_fastlane_screenshots(folder, background, translations):
"""
Frame all png images inside the given folder, including subfolders
:param folder: base folder
:param background: background image to use
:param translations: translations to use
"""
for root, dirs, files in os.walk(folder):
for file in files:
if ".png" in file:
frame = "pixel2xl"
device = os.path.basename(root).replace("Screenshots", "")
if device in device_to_frame:
frame = device_to_frame[device]
FramedImage(background_name=background, frame=frame,
screen_shot=os.path.join(root, file), output_name=os.path.join(root, file)) \
.add_text(translations.get_title(root, file), translations.get_message(root, file),
title_font='fonts/MYRIADPRO-BOLDCOND.otf',
text_font='fonts/OpenSansCondensed-Light.ttf') \
.save()
def main():
parser = argparse.ArgumentParser(description='Frame screenshots with a device frame, background, etc...')
parser.add_argument('folder', nargs="?",
help='specify the base folder where all the screenshots can be found')
parser.add_argument('--background', dest="background", default="default.jpg",
help='background image to use')
parser.add_argument('--translations', dest="translations", default="default.json",
help='translations file to use')
args = parser.parse_args()
translations = Translations(args.translations)
if args.folder:
frame_fastlane_screenshots(args.folder, background=args.background, translations=translations)
else:
parser.parse_args(["-h"])
if __name__ == '__main__':
main() | 0.451085 | 0.191781 |
def add_filter(bot, update):
from chats_data import chats_data
from filter_dict import filter_dict
chat_id = update.message.chat_id
msg = update.message.text
user = bot.get_chat_member(chat_id, update.message.from_user.id)['status']
if update.message.chat_id in chats_data.keys():
if chats_data[chat_id]['filters'] == True:
if user in ["administrator", "creator"]:
if '-' in msg:
msg_list = msg.split(' ', 1)[1].split('-', 1)
filter_name = msg_list[0].strip().lower()
if msg_list[1]:
response = msg_list[1].strip()
if chat_id in filter_dict.keys():
if filter_name not in filter_dict[chat_id].keys():
filter_dict[chat_id][filter_name] = response
from pickle import dump
filter_db = open('filter.db', 'wb')
dump(filter_dict, filter_db)
filter_db.close()
text = f'Filter `{filter_name}` succesfully added'
bot.send_message(chat_id = update.message.chat_id, text = text, parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
text = f'A Filter already exists by the name `{filter_name}`'
bot.send_message(chat_id = update.message.chat_id, text = text, parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
filter_dict[chat_id] = {}
filter_dict[chat_id][filter_name] = response
from pickle import dump
filter_db = open('filter.db', 'wb')
dump(filter_dict, filter_db)
filter_db.close()
text = f'Filter `{filter_name}` succesfully added'
bot.send_message(chat_id = update.message.chat_id, text = text, parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
bot.send_message(chat_id = update.message.chat_id, text = '*Format:*\n/filter _filter_\__name_ - _response_', parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
bot.send_message(chat_id = update.message.chat_id, text = '*Format:*\n/filter _filter_\__name_ - _response_', parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
bot.send_message(chat_id = update.message.chat_id, text = 'Fuck off.', parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
bot.send_message(chat_id = update.message.chat_id, text = 'The /filter plugin is disabled. You can enable it using `/enable filters` or by /plugins.', parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
def check_filters(bot, update):
from chats_data import chats_data
from filter_dict import filter_dict
from re import search
msg = update.message
chat_id = msg.chat_id
if chats_data.get(chat_id, None) and chats_data[chat_id].get('filters', None):
txt = msg.text.lower()
if chat_id in filter_dict.keys():
for trigger in filter_dict[chat_id].keys():
if search(rf"\b{trigger}\b", txt):
bot.send_message(chat_id = chat_id, text = filter_dict[chat_id][trigger], reply_to_message_id = msg.message_id)
def filter_list(bot, update):
from filter_dict import filter_dict
from chats_data import chats_data
chat_id = update.message.chat_id
chat_title = update.message.chat.title
if chats_data.get(chat_id, None) and chats_data[chat_id].get('filters', None):
if chat_id in filter_dict.keys():
if filter_dict[chat_id]:
msg = f'Filters for {chat_title}:\n'
filter_list = filter_dict[chat_id].keys()
for filter_name in filter_list:
msg += f'`{filter_name}`\n'
bot.send_message(chat_id = update.message.chat_id, text = msg, parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
bot.send_message(chat_id = update.message.chat_id, text = 'Filters have not been added yet. Use /filter to add.', parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
bot.send_message(chat_id = update.message.chat_id, text = 'Filters have not been added yet. Use /filter to add', parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
bot.send_message(chat_id = update.message.chat_id, text = 'The /filters plugin is disabled. You can enable it using `/enable filters` or by /plugins.', parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
def remove_filter(bot, update):
from chats_data import chats_data
from filter_dict import filter_dict
chat_id = update.message.chat_id
msg = update.message.text
if chats_data.get(chat_id, None) and chats_data[chat_id].get('filters', None):
user = bot.get_chat_member(chat_id, update.message.from_user.id)['status']
if user in ["administrator", "creator"]:
msg_list = msg.strip().split(' ', 1)
if len(msg_list) == 2:
trigger = msg_list[1].lower().strip()
if chat_id in filter_dict.keys():
if trigger in filter_dict[chat_id].keys():
del filter_dict[chat_id][trigger]
from pickle import dump
filter_db = open('filter.db', 'wb')
dump(filter_dict, filter_db)
filter_db.close()
bot.send_message(chat_id = update.message.chat_id, text = f'Successfully deleted `{trigger}`', parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
bot.send_message(chat_id = update.message.chat_id, text = "Such a filter doesn't exist for your group.", parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
bot.send_message(chat_id = update.message.chat_id, text = "Filters have not been added yet", parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
bot.send_message(chat_id = update.message.chat_id, text = "*Format:*\n/stop _filter_\__name_", parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
bot.send_message(chat_id = update.message.chat_id, text = "Fuck off.", parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
bot.send_message(chat_id = update.message.chat_id, text = "The /stop command is disabled. You can enable it using `/enable filters` or by /plugins.", parse_mode = 'Markdown', reply_to_message_id = update.message.message_id) | filters.py | def add_filter(bot, update):
from chats_data import chats_data
from filter_dict import filter_dict
chat_id = update.message.chat_id
msg = update.message.text
user = bot.get_chat_member(chat_id, update.message.from_user.id)['status']
if update.message.chat_id in chats_data.keys():
if chats_data[chat_id]['filters'] == True:
if user in ["administrator", "creator"]:
if '-' in msg:
msg_list = msg.split(' ', 1)[1].split('-', 1)
filter_name = msg_list[0].strip().lower()
if msg_list[1]:
response = msg_list[1].strip()
if chat_id in filter_dict.keys():
if filter_name not in filter_dict[chat_id].keys():
filter_dict[chat_id][filter_name] = response
from pickle import dump
filter_db = open('filter.db', 'wb')
dump(filter_dict, filter_db)
filter_db.close()
text = f'Filter `{filter_name}` succesfully added'
bot.send_message(chat_id = update.message.chat_id, text = text, parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
text = f'A Filter already exists by the name `{filter_name}`'
bot.send_message(chat_id = update.message.chat_id, text = text, parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
filter_dict[chat_id] = {}
filter_dict[chat_id][filter_name] = response
from pickle import dump
filter_db = open('filter.db', 'wb')
dump(filter_dict, filter_db)
filter_db.close()
text = f'Filter `{filter_name}` succesfully added'
bot.send_message(chat_id = update.message.chat_id, text = text, parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
bot.send_message(chat_id = update.message.chat_id, text = '*Format:*\n/filter _filter_\__name_ - _response_', parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
bot.send_message(chat_id = update.message.chat_id, text = '*Format:*\n/filter _filter_\__name_ - _response_', parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
bot.send_message(chat_id = update.message.chat_id, text = 'Fuck off.', parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
bot.send_message(chat_id = update.message.chat_id, text = 'The /filter plugin is disabled. You can enable it using `/enable filters` or by /plugins.', parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
def check_filters(bot, update):
from chats_data import chats_data
from filter_dict import filter_dict
from re import search
msg = update.message
chat_id = msg.chat_id
if chats_data.get(chat_id, None) and chats_data[chat_id].get('filters', None):
txt = msg.text.lower()
if chat_id in filter_dict.keys():
for trigger in filter_dict[chat_id].keys():
if search(rf"\b{trigger}\b", txt):
bot.send_message(chat_id = chat_id, text = filter_dict[chat_id][trigger], reply_to_message_id = msg.message_id)
def filter_list(bot, update):
from filter_dict import filter_dict
from chats_data import chats_data
chat_id = update.message.chat_id
chat_title = update.message.chat.title
if chats_data.get(chat_id, None) and chats_data[chat_id].get('filters', None):
if chat_id in filter_dict.keys():
if filter_dict[chat_id]:
msg = f'Filters for {chat_title}:\n'
filter_list = filter_dict[chat_id].keys()
for filter_name in filter_list:
msg += f'`{filter_name}`\n'
bot.send_message(chat_id = update.message.chat_id, text = msg, parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
bot.send_message(chat_id = update.message.chat_id, text = 'Filters have not been added yet. Use /filter to add.', parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
bot.send_message(chat_id = update.message.chat_id, text = 'Filters have not been added yet. Use /filter to add', parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
bot.send_message(chat_id = update.message.chat_id, text = 'The /filters plugin is disabled. You can enable it using `/enable filters` or by /plugins.', parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
def remove_filter(bot, update):
from chats_data import chats_data
from filter_dict import filter_dict
chat_id = update.message.chat_id
msg = update.message.text
if chats_data.get(chat_id, None) and chats_data[chat_id].get('filters', None):
user = bot.get_chat_member(chat_id, update.message.from_user.id)['status']
if user in ["administrator", "creator"]:
msg_list = msg.strip().split(' ', 1)
if len(msg_list) == 2:
trigger = msg_list[1].lower().strip()
if chat_id in filter_dict.keys():
if trigger in filter_dict[chat_id].keys():
del filter_dict[chat_id][trigger]
from pickle import dump
filter_db = open('filter.db', 'wb')
dump(filter_dict, filter_db)
filter_db.close()
bot.send_message(chat_id = update.message.chat_id, text = f'Successfully deleted `{trigger}`', parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
bot.send_message(chat_id = update.message.chat_id, text = "Such a filter doesn't exist for your group.", parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
bot.send_message(chat_id = update.message.chat_id, text = "Filters have not been added yet", parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
bot.send_message(chat_id = update.message.chat_id, text = "*Format:*\n/stop _filter_\__name_", parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
bot.send_message(chat_id = update.message.chat_id, text = "Fuck off.", parse_mode = 'Markdown', reply_to_message_id = update.message.message_id)
else:
bot.send_message(chat_id = update.message.chat_id, text = "The /stop command is disabled. You can enable it using `/enable filters` or by /plugins.", parse_mode = 'Markdown', reply_to_message_id = update.message.message_id) | 0.153613 | 0.048383 |
from types import SimpleNamespace
from models import SimpleDQN, DuelingDQN
import torch
import warnings
import ptan
import ptan.ignite as ptan_ignite
from ignite.engine import Engine
from ignite.metrics import RunningAverage
from ignite.contrib.handlers import tensorboard_logger as tb_logger
from datetime import timedelta, datetime
SEED = 0
DEVICE = torch.device('cuda')
HYPERPARAMETERS = {
"Pong": SimpleNamespace(**{
"env_name": "PongNoFrameskip-v4",
"model_fn": SimpleDQN,
"stop_reward": 18.0,
"run_name": "pong",
"replay_size": 100000,
"replay_initial": 10000,
"target_net_sync": 1000,
"epsilon_frames": 100000,
"epsilon_start": 1.0,
"epsilon_end": 0.02,
"learning_rate": 0.0001,
"gamma": 0.99,
"batch_size": 32
}),
"PongDueling": SimpleNamespace(**{
"env_name": "PongNoFrameskip-v4",
"model_fn": DuelingDQN,
"stop_reward": 18.0,
"run_name": "pong",
"replay_size": 100000,
"replay_initial": 10000,
"target_net_sync": 1000,
"epsilon_frames": 100000,
"epsilon_start": 1.0,
"epsilon_end": 0.02,
"learning_rate": 0.0001,
"gamma": 0.99,
"batch_size": 32
})
}
def setup_ignite(engine, params, exp_source, run_name, extra_metrics = []):
# get rid of missing metrics warning
warnings.simplefilter("ignore", category=UserWarning)
handler = ptan_ignite.EndOfEpisodeHandler(
exp_source, bound_avg_reward=params.stop_reward)
handler.attach(engine)
ptan_ignite.EpisodeFPSHandler().attach(engine)
@engine.on(ptan_ignite.EpisodeEvents.EPISODE_COMPLETED)
def episode_completed(trainer: Engine):
passed = trainer.state.metrics.get('time_passed', 0)
print("Episode %d: reward=%.0f, steps=%s, "
"speed=%.1f f/s, elapsed=%s" % (
trainer.state.episode, trainer.state.episode_reward,
trainer.state.episode_steps,
trainer.state.metrics.get('avg_fps', 0),
timedelta(seconds=int(passed))))
@engine.on(ptan_ignite.EpisodeEvents.BOUND_REWARD_REACHED)
def game_solved(trainer: Engine):
passed = trainer.state.metrics['time_passed']
print("Game solved in %s, after %d episodes "
"and %d iterations!" % (
timedelta(seconds=int(passed)),
trainer.state.episode, trainer.state.iteration))
trainer.should_terminate = True
now = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
logdir = f"runs/{now}-{params.run_name}-{run_name}"
tb = tb_logger.TensorboardLogger(log_dir=logdir)
run_avg = RunningAverage(output_transform=lambda v: v['loss'])
run_avg.attach(engine, "avg_loss")
metrics = ['reward', 'steps', 'avg_reward']
handler = tb_logger.OutputHandler(
tag="episodes", metric_names=metrics)
event = ptan_ignite.EpisodeEvents.EPISODE_COMPLETED
tb.attach(engine, log_handler=handler, event_name=event)
# write to tensorboard every 100 iterations
ptan_ignite.PeriodicEvents().attach(engine)
metrics = ['avg_loss', 'avg_fps']
metrics.extend(extra_metrics)
handler = tb_logger.OutputHandler(
tag="train", metric_names=metrics,
output_transform=lambda a: a)
event = ptan_ignite.PeriodEvents.ITERS_100_COMPLETED
tb.attach(engine, log_handler=handler, event_name=event) | Rainbow/config.py | from types import SimpleNamespace
from models import SimpleDQN, DuelingDQN
import torch
import warnings
import ptan
import ptan.ignite as ptan_ignite
from ignite.engine import Engine
from ignite.metrics import RunningAverage
from ignite.contrib.handlers import tensorboard_logger as tb_logger
from datetime import timedelta, datetime
SEED = 0
DEVICE = torch.device('cuda')
HYPERPARAMETERS = {
"Pong": SimpleNamespace(**{
"env_name": "PongNoFrameskip-v4",
"model_fn": SimpleDQN,
"stop_reward": 18.0,
"run_name": "pong",
"replay_size": 100000,
"replay_initial": 10000,
"target_net_sync": 1000,
"epsilon_frames": 100000,
"epsilon_start": 1.0,
"epsilon_end": 0.02,
"learning_rate": 0.0001,
"gamma": 0.99,
"batch_size": 32
}),
"PongDueling": SimpleNamespace(**{
"env_name": "PongNoFrameskip-v4",
"model_fn": DuelingDQN,
"stop_reward": 18.0,
"run_name": "pong",
"replay_size": 100000,
"replay_initial": 10000,
"target_net_sync": 1000,
"epsilon_frames": 100000,
"epsilon_start": 1.0,
"epsilon_end": 0.02,
"learning_rate": 0.0001,
"gamma": 0.99,
"batch_size": 32
})
}
def setup_ignite(engine, params, exp_source, run_name, extra_metrics = []):
# get rid of missing metrics warning
warnings.simplefilter("ignore", category=UserWarning)
handler = ptan_ignite.EndOfEpisodeHandler(
exp_source, bound_avg_reward=params.stop_reward)
handler.attach(engine)
ptan_ignite.EpisodeFPSHandler().attach(engine)
@engine.on(ptan_ignite.EpisodeEvents.EPISODE_COMPLETED)
def episode_completed(trainer: Engine):
passed = trainer.state.metrics.get('time_passed', 0)
print("Episode %d: reward=%.0f, steps=%s, "
"speed=%.1f f/s, elapsed=%s" % (
trainer.state.episode, trainer.state.episode_reward,
trainer.state.episode_steps,
trainer.state.metrics.get('avg_fps', 0),
timedelta(seconds=int(passed))))
@engine.on(ptan_ignite.EpisodeEvents.BOUND_REWARD_REACHED)
def game_solved(trainer: Engine):
passed = trainer.state.metrics['time_passed']
print("Game solved in %s, after %d episodes "
"and %d iterations!" % (
timedelta(seconds=int(passed)),
trainer.state.episode, trainer.state.iteration))
trainer.should_terminate = True
now = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
logdir = f"runs/{now}-{params.run_name}-{run_name}"
tb = tb_logger.TensorboardLogger(log_dir=logdir)
run_avg = RunningAverage(output_transform=lambda v: v['loss'])
run_avg.attach(engine, "avg_loss")
metrics = ['reward', 'steps', 'avg_reward']
handler = tb_logger.OutputHandler(
tag="episodes", metric_names=metrics)
event = ptan_ignite.EpisodeEvents.EPISODE_COMPLETED
tb.attach(engine, log_handler=handler, event_name=event)
# write to tensorboard every 100 iterations
ptan_ignite.PeriodicEvents().attach(engine)
metrics = ['avg_loss', 'avg_fps']
metrics.extend(extra_metrics)
handler = tb_logger.OutputHandler(
tag="train", metric_names=metrics,
output_transform=lambda a: a)
event = ptan_ignite.PeriodEvents.ITERS_100_COMPLETED
tb.attach(engine, log_handler=handler, event_name=event) | 0.70069 | 0.261687 |
import os
import subprocess
from .interface import AbstractQueue
from ..utils import config
class Queue(AbstractQueue):
mic_script = """#!/bin/sh
ulimit -s unlimited
export PATH={tau_root}/bin:$PATH
export LD_LIBRARY_PATH={ldlibpath}
cd {datadir}/../..
# mark the job as running
echo -n "{exp_name} {insname} mic Running" >{datadir}/.job.stat
# setup the environment for the experiment
{exp_setup}
# run the experiment
{exp_run} 2>&1 | tee {datadir}/job.log
# mark the job as finished
echo -n "{exp_name} {insname} mic Finished" >{datadir}/.job.stat
"""
def __init__(self, experiment):
self.name = "mic"
self.longname = "Queue.%s.%s.%s" % (self.name, experiment.platform_name, experiment.name)
self.experiment = experiment
self.platform = self.experiment.platform
self.logger = experiment.logger
self.target = config.get("%s.target" % self.longname)
self.nfsmaps = dict()
for m in config.get("%s.nfsmaps" % self.longname).split():
a, b = m.split(":")
self.nfsmaps[os.path.realpath(a)] = b;
# rootdir and tauroot must be nfs mounted
self.rootdir = self.host2mic(experiment.rootdir)
self.tauroot = self.host2mic(experiment.tauroot)
self.sinkpath = config.get("%s.sinkpath" % self.longname, "/opt/intel/lib/mic")
self.depends = self.get_dependencies(experiment.execmd)
self.ldlibpath = set()
for d in self.depends:
self.ldlibpath.add(os.path.dirname(d))
self.ldlibpath = ":".join(map(self.host2mic, self.ldlibpath))
def setup(self):
self.platform = self.experiment.platform
def host2mic(self, path):
"""
Map a host path to mic path based on nfsmaps
Args:
path: a host path
Return:
string: mapped mic path
Exception:
cannot map the host path to mic path
"""
path = os.path.expanduser(path)
path = os.path.realpath(path)
base = ""
for m in self.nfsmaps:
if path.startswith(m) and len(m) > len(base):
base = m
if len(base) == 0:
raise Exception("`%s' is not maped to MIC, please check nfsmaps" % path)
else:
return path.replace(base, self.nfsmaps[base], 1)
def get_dependencies(self, app):
"""
Get the library dependencies of `app' with 'micnativeloadex -l'
Args:
app: the path of the application executable
Return:
list: a list of dependent library path
"""
env = {'SINK_LD_LIBRARY_PATH': self.sinkpath}
output = subprocess.check_output(["micnativeloadex", app, "-l"],
env=dict(list(os.environ.items()) + list(env.items())))
depends = []
found = False;
for line in map(str.strip, output.splitlines()):
if line.startswith("Dependencies Found:"):
found = True;
elif line.startswith("Dependencies Not Found Locally"):
break;
else:
if found and line != "" and line != "(none found)":
depends.append(line)
return depends
def get_status(self, idstr):
queue, colon, pid = idstr.partition(":")
if queue != "mic":
print("queue: %s" % queue)
raise Exception("Fatal error: job queue mismatch!")
# FIXME: need a way to know whether the job is killed or not
return "Alive"
def submit(self, cmd, block=False):
datadir = self.experiment.datadirs[self.experiment.iteration]
datadir = self.host2mic(datadir)
jobstat = "%s/.job.stat" % datadir
with open(jobstat, "w") as f:
f.write("%s %s mic Queueing" % (self.experiment.name,
self.experiment.insname))
content = self.mic_script.format(
tau_root=self.tauroot,
ldlibpath=self.ldlibpath,
insname=self.experiment.insname,
exp_name=self.experiment.name,
exp_setup=self.platform.setup_str(),
datadir=datadir,
exp_run=cmd,
)
script_name = "%s/job.sh" % datadir
self.logger.info("Populating the MIC native job script")
with open(script_name, "w") as script:
script.write(content)
os.chmod(script_name, 0o755)
print("*** Submitting MIC native job ...")
self.logger.info("Running the MIC native job script")
self.logger.cmd("ssh %s %s\n", self.target, script_name)
subprocess.call(["ssh", self.target, script_name])
def wrap_command(self, execmd, exeopt) -> (str,str):
return (self.host2mic(execmd), exeopt) | autoperf/queues/mic.py | import os
import subprocess
from .interface import AbstractQueue
from ..utils import config
class Queue(AbstractQueue):
mic_script = """#!/bin/sh
ulimit -s unlimited
export PATH={tau_root}/bin:$PATH
export LD_LIBRARY_PATH={ldlibpath}
cd {datadir}/../..
# mark the job as running
echo -n "{exp_name} {insname} mic Running" >{datadir}/.job.stat
# setup the environment for the experiment
{exp_setup}
# run the experiment
{exp_run} 2>&1 | tee {datadir}/job.log
# mark the job as finished
echo -n "{exp_name} {insname} mic Finished" >{datadir}/.job.stat
"""
def __init__(self, experiment):
self.name = "mic"
self.longname = "Queue.%s.%s.%s" % (self.name, experiment.platform_name, experiment.name)
self.experiment = experiment
self.platform = self.experiment.platform
self.logger = experiment.logger
self.target = config.get("%s.target" % self.longname)
self.nfsmaps = dict()
for m in config.get("%s.nfsmaps" % self.longname).split():
a, b = m.split(":")
self.nfsmaps[os.path.realpath(a)] = b;
# rootdir and tauroot must be nfs mounted
self.rootdir = self.host2mic(experiment.rootdir)
self.tauroot = self.host2mic(experiment.tauroot)
self.sinkpath = config.get("%s.sinkpath" % self.longname, "/opt/intel/lib/mic")
self.depends = self.get_dependencies(experiment.execmd)
self.ldlibpath = set()
for d in self.depends:
self.ldlibpath.add(os.path.dirname(d))
self.ldlibpath = ":".join(map(self.host2mic, self.ldlibpath))
def setup(self):
self.platform = self.experiment.platform
def host2mic(self, path):
"""
Map a host path to mic path based on nfsmaps
Args:
path: a host path
Return:
string: mapped mic path
Exception:
cannot map the host path to mic path
"""
path = os.path.expanduser(path)
path = os.path.realpath(path)
base = ""
for m in self.nfsmaps:
if path.startswith(m) and len(m) > len(base):
base = m
if len(base) == 0:
raise Exception("`%s' is not maped to MIC, please check nfsmaps" % path)
else:
return path.replace(base, self.nfsmaps[base], 1)
def get_dependencies(self, app):
"""
Get the library dependencies of `app' with 'micnativeloadex -l'
Args:
app: the path of the application executable
Return:
list: a list of dependent library path
"""
env = {'SINK_LD_LIBRARY_PATH': self.sinkpath}
output = subprocess.check_output(["micnativeloadex", app, "-l"],
env=dict(list(os.environ.items()) + list(env.items())))
depends = []
found = False;
for line in map(str.strip, output.splitlines()):
if line.startswith("Dependencies Found:"):
found = True;
elif line.startswith("Dependencies Not Found Locally"):
break;
else:
if found and line != "" and line != "(none found)":
depends.append(line)
return depends
def get_status(self, idstr):
queue, colon, pid = idstr.partition(":")
if queue != "mic":
print("queue: %s" % queue)
raise Exception("Fatal error: job queue mismatch!")
# FIXME: need a way to know whether the job is killed or not
return "Alive"
def submit(self, cmd, block=False):
datadir = self.experiment.datadirs[self.experiment.iteration]
datadir = self.host2mic(datadir)
jobstat = "%s/.job.stat" % datadir
with open(jobstat, "w") as f:
f.write("%s %s mic Queueing" % (self.experiment.name,
self.experiment.insname))
content = self.mic_script.format(
tau_root=self.tauroot,
ldlibpath=self.ldlibpath,
insname=self.experiment.insname,
exp_name=self.experiment.name,
exp_setup=self.platform.setup_str(),
datadir=datadir,
exp_run=cmd,
)
script_name = "%s/job.sh" % datadir
self.logger.info("Populating the MIC native job script")
with open(script_name, "w") as script:
script.write(content)
os.chmod(script_name, 0o755)
print("*** Submitting MIC native job ...")
self.logger.info("Running the MIC native job script")
self.logger.cmd("ssh %s %s\n", self.target, script_name)
subprocess.call(["ssh", self.target, script_name])
def wrap_command(self, execmd, exeopt) -> (str,str):
return (self.host2mic(execmd), exeopt) | 0.467089 | 0.118105 |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import bad_lsp_log
class counts_state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-isis-operational - based on the path /counts-state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: IS-IS Counters
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__areamis','__maxareamis','__badidlen','__seqskip','__seqerr','__l1dbol','__l2dbol','__ownpurge','__csnp_l1authfail','__csnp_l2authfail','__psnp_l1authfail','__psnp_l2authfail','__circ_l1authfail','__circ_l2authfail','__bad_lsp_log',)
_yang_name = 'counts-state'
_rest_name = 'counts-state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__psnp_l2authfail = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="psnp-l2authfail", rest_name="psnp-l2authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__circ_l1authfail = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="circ-l1authfail", rest_name="circ-l1authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__circ_l2authfail = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="circ-l2authfail", rest_name="circ-l2authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__maxareamis = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="maxareamis", rest_name="maxareamis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__bad_lsp_log = YANGDynClass(base=YANGListType("type_index",bad_lsp_log.bad_lsp_log, yang_name="bad-lsp-log", rest_name="bad-lsp-log", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='type-index', extensions={u'tailf-common': {u'callpoint': u'isis-bad-lsp-log-entry', u'cli-suppress-show-path': None}}), is_container='list', yang_name="bad-lsp-log", rest_name="bad-lsp-log", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-bad-lsp-log-entry', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='list', is_config=False)
self.__seqskip = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="seqskip", rest_name="seqskip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__csnp_l2authfail = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="csnp-l2authfail", rest_name="csnp-l2authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__seqerr = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="seqerr", rest_name="seqerr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__l1dbol = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="l1dbol", rest_name="l1dbol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__csnp_l1authfail = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="csnp-l1authfail", rest_name="csnp-l1authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__psnp_l1authfail = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="psnp-l1authfail", rest_name="psnp-l1authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__l2dbol = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="l2dbol", rest_name="l2dbol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__areamis = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="areamis", rest_name="areamis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__ownpurge = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ownpurge", rest_name="ownpurge", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__badidlen = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="badidlen", rest_name="badidlen", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'counts-state']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'counts-state']
def _get_areamis(self):
"""
Getter method for areamis, mapped from YANG variable /counts_state/areamis (uint32)
YANG Description: Area Mismatch
"""
return self.__areamis
def _set_areamis(self, v, load=False):
"""
Setter method for areamis, mapped from YANG variable /counts_state/areamis (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_areamis is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_areamis() directly.
YANG Description: Area Mismatch
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="areamis", rest_name="areamis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """areamis must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="areamis", rest_name="areamis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__areamis = t
if hasattr(self, '_set'):
self._set()
def _unset_areamis(self):
self.__areamis = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="areamis", rest_name="areamis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_maxareamis(self):
"""
Getter method for maxareamis, mapped from YANG variable /counts_state/maxareamis (uint32)
YANG Description: Max Area Mismatch
"""
return self.__maxareamis
def _set_maxareamis(self, v, load=False):
"""
Setter method for maxareamis, mapped from YANG variable /counts_state/maxareamis (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_maxareamis is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_maxareamis() directly.
YANG Description: Max Area Mismatch
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="maxareamis", rest_name="maxareamis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """maxareamis must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="maxareamis", rest_name="maxareamis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__maxareamis = t
if hasattr(self, '_set'):
self._set()
def _unset_maxareamis(self):
self.__maxareamis = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="maxareamis", rest_name="maxareamis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_badidlen(self):
"""
Getter method for badidlen, mapped from YANG variable /counts_state/badidlen (uint32)
YANG Description: System ID Length Mismatch
"""
return self.__badidlen
def _set_badidlen(self, v, load=False):
"""
Setter method for badidlen, mapped from YANG variable /counts_state/badidlen (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_badidlen is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_badidlen() directly.
YANG Description: System ID Length Mismatch
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="badidlen", rest_name="badidlen", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """badidlen must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="badidlen", rest_name="badidlen", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__badidlen = t
if hasattr(self, '_set'):
self._set()
def _unset_badidlen(self):
self.__badidlen = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="badidlen", rest_name="badidlen", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_seqskip(self):
"""
Getter method for seqskip, mapped from YANG variable /counts_state/seqskip (uint32)
YANG Description: LSP Sequence Number Skipped
"""
return self.__seqskip
def _set_seqskip(self, v, load=False):
"""
Setter method for seqskip, mapped from YANG variable /counts_state/seqskip (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_seqskip is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_seqskip() directly.
YANG Description: LSP Sequence Number Skipped
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="seqskip", rest_name="seqskip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """seqskip must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="seqskip", rest_name="seqskip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__seqskip = t
if hasattr(self, '_set'):
self._set()
def _unset_seqskip(self):
self.__seqskip = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="seqskip", rest_name="seqskip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_seqerr(self):
"""
Getter method for seqerr, mapped from YANG variable /counts_state/seqerr (uint32)
YANG Description: LSP Max Sequence Number Exceeded
"""
return self.__seqerr
def _set_seqerr(self, v, load=False):
"""
Setter method for seqerr, mapped from YANG variable /counts_state/seqerr (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_seqerr is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_seqerr() directly.
YANG Description: LSP Max Sequence Number Exceeded
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="seqerr", rest_name="seqerr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """seqerr must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="seqerr", rest_name="seqerr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__seqerr = t
if hasattr(self, '_set'):
self._set()
def _unset_seqerr(self):
self.__seqerr = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="seqerr", rest_name="seqerr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_l1dbol(self):
"""
Getter method for l1dbol, mapped from YANG variable /counts_state/l1dbol (uint32)
YANG Description: Level-1 Database Overload
"""
return self.__l1dbol
def _set_l1dbol(self, v, load=False):
"""
Setter method for l1dbol, mapped from YANG variable /counts_state/l1dbol (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_l1dbol is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_l1dbol() directly.
YANG Description: Level-1 Database Overload
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="l1dbol", rest_name="l1dbol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """l1dbol must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="l1dbol", rest_name="l1dbol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__l1dbol = t
if hasattr(self, '_set'):
self._set()
def _unset_l1dbol(self):
self.__l1dbol = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="l1dbol", rest_name="l1dbol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_l2dbol(self):
"""
Getter method for l2dbol, mapped from YANG variable /counts_state/l2dbol (uint32)
YANG Description: Level-2 Database Overload
"""
return self.__l2dbol
def _set_l2dbol(self, v, load=False):
"""
Setter method for l2dbol, mapped from YANG variable /counts_state/l2dbol (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_l2dbol is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_l2dbol() directly.
YANG Description: Level-2 Database Overload
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="l2dbol", rest_name="l2dbol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """l2dbol must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="l2dbol", rest_name="l2dbol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__l2dbol = t
if hasattr(self, '_set'):
self._set()
def _unset_l2dbol(self):
self.__l2dbol = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="l2dbol", rest_name="l2dbol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_ownpurge(self):
"""
Getter method for ownpurge, mapped from YANG variable /counts_state/ownpurge (uint32)
YANG Description: Our LSP Purged
"""
return self.__ownpurge
def _set_ownpurge(self, v, load=False):
"""
Setter method for ownpurge, mapped from YANG variable /counts_state/ownpurge (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_ownpurge is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ownpurge() directly.
YANG Description: Our LSP Purged
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ownpurge", rest_name="ownpurge", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ownpurge must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ownpurge", rest_name="ownpurge", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__ownpurge = t
if hasattr(self, '_set'):
self._set()
def _unset_ownpurge(self):
self.__ownpurge = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ownpurge", rest_name="ownpurge", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_csnp_l1authfail(self):
"""
Getter method for csnp_l1authfail, mapped from YANG variable /counts_state/csnp_l1authfail (uint32)
YANG Description: L1 CSNP PDU auth failures
"""
return self.__csnp_l1authfail
def _set_csnp_l1authfail(self, v, load=False):
"""
Setter method for csnp_l1authfail, mapped from YANG variable /counts_state/csnp_l1authfail (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_csnp_l1authfail is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_csnp_l1authfail() directly.
YANG Description: L1 CSNP PDU auth failures
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="csnp-l1authfail", rest_name="csnp-l1authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """csnp_l1authfail must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="csnp-l1authfail", rest_name="csnp-l1authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__csnp_l1authfail = t
if hasattr(self, '_set'):
self._set()
def _unset_csnp_l1authfail(self):
self.__csnp_l1authfail = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="csnp-l1authfail", rest_name="csnp-l1authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_csnp_l2authfail(self):
"""
Getter method for csnp_l2authfail, mapped from YANG variable /counts_state/csnp_l2authfail (uint32)
YANG Description: L2 CSNP PDU auth failures
"""
return self.__csnp_l2authfail
def _set_csnp_l2authfail(self, v, load=False):
"""
Setter method for csnp_l2authfail, mapped from YANG variable /counts_state/csnp_l2authfail (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_csnp_l2authfail is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_csnp_l2authfail() directly.
YANG Description: L2 CSNP PDU auth failures
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="csnp-l2authfail", rest_name="csnp-l2authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """csnp_l2authfail must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="csnp-l2authfail", rest_name="csnp-l2authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__csnp_l2authfail = t
if hasattr(self, '_set'):
self._set()
def _unset_csnp_l2authfail(self):
self.__csnp_l2authfail = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="csnp-l2authfail", rest_name="csnp-l2authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_psnp_l1authfail(self):
"""
Getter method for psnp_l1authfail, mapped from YANG variable /counts_state/psnp_l1authfail (uint32)
YANG Description: L1 PSNP PDU auth failures
"""
return self.__psnp_l1authfail
def _set_psnp_l1authfail(self, v, load=False):
"""
Setter method for psnp_l1authfail, mapped from YANG variable /counts_state/psnp_l1authfail (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_psnp_l1authfail is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_psnp_l1authfail() directly.
YANG Description: L1 PSNP PDU auth failures
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="psnp-l1authfail", rest_name="psnp-l1authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """psnp_l1authfail must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="psnp-l1authfail", rest_name="psnp-l1authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__psnp_l1authfail = t
if hasattr(self, '_set'):
self._set()
def _unset_psnp_l1authfail(self):
self.__psnp_l1authfail = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="psnp-l1authfail", rest_name="psnp-l1authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_psnp_l2authfail(self):
"""
Getter method for psnp_l2authfail, mapped from YANG variable /counts_state/psnp_l2authfail (uint32)
YANG Description: L2 PSNP PDU auth failures
"""
return self.__psnp_l2authfail
def _set_psnp_l2authfail(self, v, load=False):
"""
Setter method for psnp_l2authfail, mapped from YANG variable /counts_state/psnp_l2authfail (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_psnp_l2authfail is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_psnp_l2authfail() directly.
YANG Description: L2 PSNP PDU auth failures
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="psnp-l2authfail", rest_name="psnp-l2authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """psnp_l2authfail must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="psnp-l2authfail", rest_name="psnp-l2authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__psnp_l2authfail = t
if hasattr(self, '_set'):
self._set()
def _unset_psnp_l2authfail(self):
self.__psnp_l2authfail = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="psnp-l2authfail", rest_name="psnp-l2authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_circ_l1authfail(self):
"""
Getter method for circ_l1authfail, mapped from YANG variable /counts_state/circ_l1authfail (uint32)
YANG Description: Total L1 Hello PDU auth failures
"""
return self.__circ_l1authfail
def _set_circ_l1authfail(self, v, load=False):
"""
Setter method for circ_l1authfail, mapped from YANG variable /counts_state/circ_l1authfail (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_circ_l1authfail is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_circ_l1authfail() directly.
YANG Description: Total L1 Hello PDU auth failures
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="circ-l1authfail", rest_name="circ-l1authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """circ_l1authfail must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="circ-l1authfail", rest_name="circ-l1authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__circ_l1authfail = t
if hasattr(self, '_set'):
self._set()
def _unset_circ_l1authfail(self):
self.__circ_l1authfail = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="circ-l1authfail", rest_name="circ-l1authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_circ_l2authfail(self):
"""
Getter method for circ_l2authfail, mapped from YANG variable /counts_state/circ_l2authfail (uint32)
YANG Description: Total L2 Hello PDU auth failures
"""
return self.__circ_l2authfail
def _set_circ_l2authfail(self, v, load=False):
"""
Setter method for circ_l2authfail, mapped from YANG variable /counts_state/circ_l2authfail (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_circ_l2authfail is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_circ_l2authfail() directly.
YANG Description: Total L2 Hello PDU auth failures
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="circ-l2authfail", rest_name="circ-l2authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """circ_l2authfail must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="circ-l2authfail", rest_name="circ-l2authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__circ_l2authfail = t
if hasattr(self, '_set'):
self._set()
def _unset_circ_l2authfail(self):
self.__circ_l2authfail = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="circ-l2authfail", rest_name="circ-l2authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_bad_lsp_log(self):
"""
Getter method for bad_lsp_log, mapped from YANG variable /counts_state/bad_lsp_log (list)
YANG Description: LSP Name
"""
return self.__bad_lsp_log
def _set_bad_lsp_log(self, v, load=False):
"""
Setter method for bad_lsp_log, mapped from YANG variable /counts_state/bad_lsp_log (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_bad_lsp_log is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bad_lsp_log() directly.
YANG Description: LSP Name
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("type_index",bad_lsp_log.bad_lsp_log, yang_name="bad-lsp-log", rest_name="bad-lsp-log", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='type-index', extensions={u'tailf-common': {u'callpoint': u'isis-bad-lsp-log-entry', u'cli-suppress-show-path': None}}), is_container='list', yang_name="bad-lsp-log", rest_name="bad-lsp-log", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-bad-lsp-log-entry', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bad_lsp_log must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("type_index",bad_lsp_log.bad_lsp_log, yang_name="bad-lsp-log", rest_name="bad-lsp-log", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='type-index', extensions={u'tailf-common': {u'callpoint': u'isis-bad-lsp-log-entry', u'cli-suppress-show-path': None}}), is_container='list', yang_name="bad-lsp-log", rest_name="bad-lsp-log", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-bad-lsp-log-entry', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='list', is_config=False)""",
})
self.__bad_lsp_log = t
if hasattr(self, '_set'):
self._set()
def _unset_bad_lsp_log(self):
self.__bad_lsp_log = YANGDynClass(base=YANGListType("type_index",bad_lsp_log.bad_lsp_log, yang_name="bad-lsp-log", rest_name="bad-lsp-log", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='type-index', extensions={u'tailf-common': {u'callpoint': u'isis-bad-lsp-log-entry', u'cli-suppress-show-path': None}}), is_container='list', yang_name="bad-lsp-log", rest_name="bad-lsp-log", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-bad-lsp-log-entry', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='list', is_config=False)
areamis = __builtin__.property(_get_areamis)
maxareamis = __builtin__.property(_get_maxareamis)
badidlen = __builtin__.property(_get_badidlen)
seqskip = __builtin__.property(_get_seqskip)
seqerr = __builtin__.property(_get_seqerr)
l1dbol = __builtin__.property(_get_l1dbol)
l2dbol = __builtin__.property(_get_l2dbol)
ownpurge = __builtin__.property(_get_ownpurge)
csnp_l1authfail = __builtin__.property(_get_csnp_l1authfail)
csnp_l2authfail = __builtin__.property(_get_csnp_l2authfail)
psnp_l1authfail = __builtin__.property(_get_psnp_l1authfail)
psnp_l2authfail = __builtin__.property(_get_psnp_l2authfail)
circ_l1authfail = __builtin__.property(_get_circ_l1authfail)
circ_l2authfail = __builtin__.property(_get_circ_l2authfail)
bad_lsp_log = __builtin__.property(_get_bad_lsp_log)
_pyangbind_elements = {'areamis': areamis, 'maxareamis': maxareamis, 'badidlen': badidlen, 'seqskip': seqskip, 'seqerr': seqerr, 'l1dbol': l1dbol, 'l2dbol': l2dbol, 'ownpurge': ownpurge, 'csnp_l1authfail': csnp_l1authfail, 'csnp_l2authfail': csnp_l2authfail, 'psnp_l1authfail': psnp_l1authfail, 'psnp_l2authfail': psnp_l2authfail, 'circ_l1authfail': circ_l1authfail, 'circ_l2authfail': circ_l2authfail, 'bad_lsp_log': bad_lsp_log, } | pybind/slxos/v16r_1_00b/counts_state/__init__.py | from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import bad_lsp_log
class counts_state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-isis-operational - based on the path /counts-state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: IS-IS Counters
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__areamis','__maxareamis','__badidlen','__seqskip','__seqerr','__l1dbol','__l2dbol','__ownpurge','__csnp_l1authfail','__csnp_l2authfail','__psnp_l1authfail','__psnp_l2authfail','__circ_l1authfail','__circ_l2authfail','__bad_lsp_log',)
_yang_name = 'counts-state'
_rest_name = 'counts-state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__psnp_l2authfail = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="psnp-l2authfail", rest_name="psnp-l2authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__circ_l1authfail = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="circ-l1authfail", rest_name="circ-l1authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__circ_l2authfail = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="circ-l2authfail", rest_name="circ-l2authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__maxareamis = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="maxareamis", rest_name="maxareamis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__bad_lsp_log = YANGDynClass(base=YANGListType("type_index",bad_lsp_log.bad_lsp_log, yang_name="bad-lsp-log", rest_name="bad-lsp-log", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='type-index', extensions={u'tailf-common': {u'callpoint': u'isis-bad-lsp-log-entry', u'cli-suppress-show-path': None}}), is_container='list', yang_name="bad-lsp-log", rest_name="bad-lsp-log", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-bad-lsp-log-entry', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='list', is_config=False)
self.__seqskip = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="seqskip", rest_name="seqskip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__csnp_l2authfail = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="csnp-l2authfail", rest_name="csnp-l2authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__seqerr = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="seqerr", rest_name="seqerr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__l1dbol = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="l1dbol", rest_name="l1dbol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__csnp_l1authfail = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="csnp-l1authfail", rest_name="csnp-l1authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__psnp_l1authfail = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="psnp-l1authfail", rest_name="psnp-l1authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__l2dbol = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="l2dbol", rest_name="l2dbol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__areamis = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="areamis", rest_name="areamis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__ownpurge = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ownpurge", rest_name="ownpurge", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__badidlen = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="badidlen", rest_name="badidlen", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'counts-state']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'counts-state']
def _get_areamis(self):
"""
Getter method for areamis, mapped from YANG variable /counts_state/areamis (uint32)
YANG Description: Area Mismatch
"""
return self.__areamis
def _set_areamis(self, v, load=False):
"""
Setter method for areamis, mapped from YANG variable /counts_state/areamis (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_areamis is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_areamis() directly.
YANG Description: Area Mismatch
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="areamis", rest_name="areamis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """areamis must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="areamis", rest_name="areamis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__areamis = t
if hasattr(self, '_set'):
self._set()
def _unset_areamis(self):
self.__areamis = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="areamis", rest_name="areamis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_maxareamis(self):
"""
Getter method for maxareamis, mapped from YANG variable /counts_state/maxareamis (uint32)
YANG Description: Max Area Mismatch
"""
return self.__maxareamis
def _set_maxareamis(self, v, load=False):
"""
Setter method for maxareamis, mapped from YANG variable /counts_state/maxareamis (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_maxareamis is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_maxareamis() directly.
YANG Description: Max Area Mismatch
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="maxareamis", rest_name="maxareamis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """maxareamis must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="maxareamis", rest_name="maxareamis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__maxareamis = t
if hasattr(self, '_set'):
self._set()
def _unset_maxareamis(self):
self.__maxareamis = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="maxareamis", rest_name="maxareamis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_badidlen(self):
"""
Getter method for badidlen, mapped from YANG variable /counts_state/badidlen (uint32)
YANG Description: System ID Length Mismatch
"""
return self.__badidlen
def _set_badidlen(self, v, load=False):
"""
Setter method for badidlen, mapped from YANG variable /counts_state/badidlen (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_badidlen is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_badidlen() directly.
YANG Description: System ID Length Mismatch
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="badidlen", rest_name="badidlen", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """badidlen must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="badidlen", rest_name="badidlen", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__badidlen = t
if hasattr(self, '_set'):
self._set()
def _unset_badidlen(self):
self.__badidlen = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="badidlen", rest_name="badidlen", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_seqskip(self):
"""
Getter method for seqskip, mapped from YANG variable /counts_state/seqskip (uint32)
YANG Description: LSP Sequence Number Skipped
"""
return self.__seqskip
def _set_seqskip(self, v, load=False):
"""
Setter method for seqskip, mapped from YANG variable /counts_state/seqskip (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_seqskip is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_seqskip() directly.
YANG Description: LSP Sequence Number Skipped
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="seqskip", rest_name="seqskip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """seqskip must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="seqskip", rest_name="seqskip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__seqskip = t
if hasattr(self, '_set'):
self._set()
def _unset_seqskip(self):
self.__seqskip = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="seqskip", rest_name="seqskip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_seqerr(self):
"""
Getter method for seqerr, mapped from YANG variable /counts_state/seqerr (uint32)
YANG Description: LSP Max Sequence Number Exceeded
"""
return self.__seqerr
def _set_seqerr(self, v, load=False):
"""
Setter method for seqerr, mapped from YANG variable /counts_state/seqerr (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_seqerr is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_seqerr() directly.
YANG Description: LSP Max Sequence Number Exceeded
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="seqerr", rest_name="seqerr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """seqerr must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="seqerr", rest_name="seqerr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__seqerr = t
if hasattr(self, '_set'):
self._set()
def _unset_seqerr(self):
self.__seqerr = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="seqerr", rest_name="seqerr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_l1dbol(self):
"""
Getter method for l1dbol, mapped from YANG variable /counts_state/l1dbol (uint32)
YANG Description: Level-1 Database Overload
"""
return self.__l1dbol
def _set_l1dbol(self, v, load=False):
"""
Setter method for l1dbol, mapped from YANG variable /counts_state/l1dbol (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_l1dbol is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_l1dbol() directly.
YANG Description: Level-1 Database Overload
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="l1dbol", rest_name="l1dbol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """l1dbol must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="l1dbol", rest_name="l1dbol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__l1dbol = t
if hasattr(self, '_set'):
self._set()
def _unset_l1dbol(self):
self.__l1dbol = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="l1dbol", rest_name="l1dbol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_l2dbol(self):
"""
Getter method for l2dbol, mapped from YANG variable /counts_state/l2dbol (uint32)
YANG Description: Level-2 Database Overload
"""
return self.__l2dbol
def _set_l2dbol(self, v, load=False):
"""
Setter method for l2dbol, mapped from YANG variable /counts_state/l2dbol (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_l2dbol is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_l2dbol() directly.
YANG Description: Level-2 Database Overload
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="l2dbol", rest_name="l2dbol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """l2dbol must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="l2dbol", rest_name="l2dbol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__l2dbol = t
if hasattr(self, '_set'):
self._set()
def _unset_l2dbol(self):
self.__l2dbol = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="l2dbol", rest_name="l2dbol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_ownpurge(self):
"""
Getter method for ownpurge, mapped from YANG variable /counts_state/ownpurge (uint32)
YANG Description: Our LSP Purged
"""
return self.__ownpurge
def _set_ownpurge(self, v, load=False):
"""
Setter method for ownpurge, mapped from YANG variable /counts_state/ownpurge (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_ownpurge is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ownpurge() directly.
YANG Description: Our LSP Purged
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ownpurge", rest_name="ownpurge", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ownpurge must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ownpurge", rest_name="ownpurge", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__ownpurge = t
if hasattr(self, '_set'):
self._set()
def _unset_ownpurge(self):
self.__ownpurge = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ownpurge", rest_name="ownpurge", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_csnp_l1authfail(self):
"""
Getter method for csnp_l1authfail, mapped from YANG variable /counts_state/csnp_l1authfail (uint32)
YANG Description: L1 CSNP PDU auth failures
"""
return self.__csnp_l1authfail
def _set_csnp_l1authfail(self, v, load=False):
"""
Setter method for csnp_l1authfail, mapped from YANG variable /counts_state/csnp_l1authfail (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_csnp_l1authfail is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_csnp_l1authfail() directly.
YANG Description: L1 CSNP PDU auth failures
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="csnp-l1authfail", rest_name="csnp-l1authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """csnp_l1authfail must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="csnp-l1authfail", rest_name="csnp-l1authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__csnp_l1authfail = t
if hasattr(self, '_set'):
self._set()
def _unset_csnp_l1authfail(self):
self.__csnp_l1authfail = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="csnp-l1authfail", rest_name="csnp-l1authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_csnp_l2authfail(self):
"""
Getter method for csnp_l2authfail, mapped from YANG variable /counts_state/csnp_l2authfail (uint32)
YANG Description: L2 CSNP PDU auth failures
"""
return self.__csnp_l2authfail
def _set_csnp_l2authfail(self, v, load=False):
"""
Setter method for csnp_l2authfail, mapped from YANG variable /counts_state/csnp_l2authfail (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_csnp_l2authfail is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_csnp_l2authfail() directly.
YANG Description: L2 CSNP PDU auth failures
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="csnp-l2authfail", rest_name="csnp-l2authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """csnp_l2authfail must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="csnp-l2authfail", rest_name="csnp-l2authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__csnp_l2authfail = t
if hasattr(self, '_set'):
self._set()
def _unset_csnp_l2authfail(self):
self.__csnp_l2authfail = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="csnp-l2authfail", rest_name="csnp-l2authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_psnp_l1authfail(self):
"""
Getter method for psnp_l1authfail, mapped from YANG variable /counts_state/psnp_l1authfail (uint32)
YANG Description: L1 PSNP PDU auth failures
"""
return self.__psnp_l1authfail
def _set_psnp_l1authfail(self, v, load=False):
"""
Setter method for psnp_l1authfail, mapped from YANG variable /counts_state/psnp_l1authfail (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_psnp_l1authfail is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_psnp_l1authfail() directly.
YANG Description: L1 PSNP PDU auth failures
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="psnp-l1authfail", rest_name="psnp-l1authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """psnp_l1authfail must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="psnp-l1authfail", rest_name="psnp-l1authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__psnp_l1authfail = t
if hasattr(self, '_set'):
self._set()
def _unset_psnp_l1authfail(self):
self.__psnp_l1authfail = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="psnp-l1authfail", rest_name="psnp-l1authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_psnp_l2authfail(self):
"""
Getter method for psnp_l2authfail, mapped from YANG variable /counts_state/psnp_l2authfail (uint32)
YANG Description: L2 PSNP PDU auth failures
"""
return self.__psnp_l2authfail
def _set_psnp_l2authfail(self, v, load=False):
"""
Setter method for psnp_l2authfail, mapped from YANG variable /counts_state/psnp_l2authfail (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_psnp_l2authfail is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_psnp_l2authfail() directly.
YANG Description: L2 PSNP PDU auth failures
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="psnp-l2authfail", rest_name="psnp-l2authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """psnp_l2authfail must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="psnp-l2authfail", rest_name="psnp-l2authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__psnp_l2authfail = t
if hasattr(self, '_set'):
self._set()
def _unset_psnp_l2authfail(self):
self.__psnp_l2authfail = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="psnp-l2authfail", rest_name="psnp-l2authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_circ_l1authfail(self):
"""
Getter method for circ_l1authfail, mapped from YANG variable /counts_state/circ_l1authfail (uint32)
YANG Description: Total L1 Hello PDU auth failures
"""
return self.__circ_l1authfail
def _set_circ_l1authfail(self, v, load=False):
"""
Setter method for circ_l1authfail, mapped from YANG variable /counts_state/circ_l1authfail (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_circ_l1authfail is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_circ_l1authfail() directly.
YANG Description: Total L1 Hello PDU auth failures
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="circ-l1authfail", rest_name="circ-l1authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """circ_l1authfail must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="circ-l1authfail", rest_name="circ-l1authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__circ_l1authfail = t
if hasattr(self, '_set'):
self._set()
def _unset_circ_l1authfail(self):
self.__circ_l1authfail = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="circ-l1authfail", rest_name="circ-l1authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_circ_l2authfail(self):
"""
Getter method for circ_l2authfail, mapped from YANG variable /counts_state/circ_l2authfail (uint32)
YANG Description: Total L2 Hello PDU auth failures
"""
return self.__circ_l2authfail
def _set_circ_l2authfail(self, v, load=False):
"""
Setter method for circ_l2authfail, mapped from YANG variable /counts_state/circ_l2authfail (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_circ_l2authfail is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_circ_l2authfail() directly.
YANG Description: Total L2 Hello PDU auth failures
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="circ-l2authfail", rest_name="circ-l2authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """circ_l2authfail must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="circ-l2authfail", rest_name="circ-l2authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__circ_l2authfail = t
if hasattr(self, '_set'):
self._set()
def _unset_circ_l2authfail(self):
self.__circ_l2authfail = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="circ-l2authfail", rest_name="circ-l2authfail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_bad_lsp_log(self):
"""
Getter method for bad_lsp_log, mapped from YANG variable /counts_state/bad_lsp_log (list)
YANG Description: LSP Name
"""
return self.__bad_lsp_log
def _set_bad_lsp_log(self, v, load=False):
"""
Setter method for bad_lsp_log, mapped from YANG variable /counts_state/bad_lsp_log (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_bad_lsp_log is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bad_lsp_log() directly.
YANG Description: LSP Name
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("type_index",bad_lsp_log.bad_lsp_log, yang_name="bad-lsp-log", rest_name="bad-lsp-log", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='type-index', extensions={u'tailf-common': {u'callpoint': u'isis-bad-lsp-log-entry', u'cli-suppress-show-path': None}}), is_container='list', yang_name="bad-lsp-log", rest_name="bad-lsp-log", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-bad-lsp-log-entry', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bad_lsp_log must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("type_index",bad_lsp_log.bad_lsp_log, yang_name="bad-lsp-log", rest_name="bad-lsp-log", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='type-index', extensions={u'tailf-common': {u'callpoint': u'isis-bad-lsp-log-entry', u'cli-suppress-show-path': None}}), is_container='list', yang_name="bad-lsp-log", rest_name="bad-lsp-log", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-bad-lsp-log-entry', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='list', is_config=False)""",
})
self.__bad_lsp_log = t
if hasattr(self, '_set'):
self._set()
def _unset_bad_lsp_log(self):
self.__bad_lsp_log = YANGDynClass(base=YANGListType("type_index",bad_lsp_log.bad_lsp_log, yang_name="bad-lsp-log", rest_name="bad-lsp-log", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='type-index', extensions={u'tailf-common': {u'callpoint': u'isis-bad-lsp-log-entry', u'cli-suppress-show-path': None}}), is_container='list', yang_name="bad-lsp-log", rest_name="bad-lsp-log", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-bad-lsp-log-entry', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='list', is_config=False)
areamis = __builtin__.property(_get_areamis)
maxareamis = __builtin__.property(_get_maxareamis)
badidlen = __builtin__.property(_get_badidlen)
seqskip = __builtin__.property(_get_seqskip)
seqerr = __builtin__.property(_get_seqerr)
l1dbol = __builtin__.property(_get_l1dbol)
l2dbol = __builtin__.property(_get_l2dbol)
ownpurge = __builtin__.property(_get_ownpurge)
csnp_l1authfail = __builtin__.property(_get_csnp_l1authfail)
csnp_l2authfail = __builtin__.property(_get_csnp_l2authfail)
psnp_l1authfail = __builtin__.property(_get_psnp_l1authfail)
psnp_l2authfail = __builtin__.property(_get_psnp_l2authfail)
circ_l1authfail = __builtin__.property(_get_circ_l1authfail)
circ_l2authfail = __builtin__.property(_get_circ_l2authfail)
bad_lsp_log = __builtin__.property(_get_bad_lsp_log)
_pyangbind_elements = {'areamis': areamis, 'maxareamis': maxareamis, 'badidlen': badidlen, 'seqskip': seqskip, 'seqerr': seqerr, 'l1dbol': l1dbol, 'l2dbol': l2dbol, 'ownpurge': ownpurge, 'csnp_l1authfail': csnp_l1authfail, 'csnp_l2authfail': csnp_l2authfail, 'psnp_l1authfail': psnp_l1authfail, 'psnp_l2authfail': psnp_l2authfail, 'circ_l1authfail': circ_l1authfail, 'circ_l2authfail': circ_l2authfail, 'bad_lsp_log': bad_lsp_log, } | 0.601008 | 0.074064 |
import unittest
from py_sc_fermi.defect_charge_state import DefectChargeState
from py_sc_fermi.defect_charge_state import FrozenDefectChargeState
class TestDefectChargeStateInit(unittest.TestCase):
def test_defect_charge_state_is_initialised(self):
charge = 1.0
energy = 123.4
degeneracy = 2
defect_charge_state = DefectChargeState(charge=charge,
energy=energy,
degeneracy=degeneracy)
self.assertEqual( defect_charge_state._charge, charge )
self.assertEqual( defect_charge_state._energy, energy )
self.assertEqual( defect_charge_state._degeneracy, degeneracy )
self.assertEqual( defect_charge_state._fixed_concentration, False )
class TestDefectChargeState(unittest.TestCase):
def setUp(self):
charge = 1.0
energy = 0.1234
degeneracy = 2
self.defect_charge_state = DefectChargeState(charge=charge,
energy=energy,
degeneracy=degeneracy)
def test_charge_property(self):
self.assertEqual( self.defect_charge_state.charge, self.defect_charge_state._charge )
def test_energy_property(self):
self.assertEqual( self.defect_charge_state.energy, self.defect_charge_state._energy )
def test_degeneracy_property(self):
self.assertEqual( self.defect_charge_state.degeneracy, self.defect_charge_state._degeneracy )
def test_concentration_is_fixed_property(self):
self.assertEqual( self.defect_charge_state.concentration_is_fixed, self.defect_charge_state._fixed_concentration )
def test_get_formation_energy(self):
e_fermi = 1.2
formation_energy = self.defect_charge_state.get_formation_energy( e_fermi )
self.assertEqual( formation_energy, 0.1234 + ( 1.0*1.2 ) )
def test_get_concentration(self):
e_fermi = 1.2
temperature = 298.0
conc = self.defect_charge_state.get_concentration(e_fermi=e_fermi,
temperature=temperature)
self.assertEqual(conc, 8.311501552630706e-23)
class TestFrozenDefectChargeStateInit(unittest.TestCase):
def test_frozen_defect_charge_state_is_initialised(self):
charge = 1.0
concentration = 123.4
defect_charge_state = FrozenDefectChargeState(charge=charge,
concentration=concentration)
self.assertEqual( defect_charge_state._charge, charge )
self.assertEqual( defect_charge_state._concentration, concentration )
self.assertEqual( defect_charge_state._fixed_concentration, True )
class TestFrozenDefectChargeState(unittest.TestCase):
def setUp(self):
charge = 1.0
concentration = 0.1234
self.defect_charge_state = FrozenDefectChargeState(charge=charge, concentration=concentration)
def test_get_concentration(self):
e_fermi = 1.2
temperature = 298.0
conc = self.defect_charge_state.get_concentration( e_fermi=e_fermi,
temperature=temperature )
self.assertEqual( conc, 0.1234 )
if __name__ == '__main__':
unittest.main() | tests/test_defect_charge_state.py | import unittest
from py_sc_fermi.defect_charge_state import DefectChargeState
from py_sc_fermi.defect_charge_state import FrozenDefectChargeState
class TestDefectChargeStateInit(unittest.TestCase):
def test_defect_charge_state_is_initialised(self):
charge = 1.0
energy = 123.4
degeneracy = 2
defect_charge_state = DefectChargeState(charge=charge,
energy=energy,
degeneracy=degeneracy)
self.assertEqual( defect_charge_state._charge, charge )
self.assertEqual( defect_charge_state._energy, energy )
self.assertEqual( defect_charge_state._degeneracy, degeneracy )
self.assertEqual( defect_charge_state._fixed_concentration, False )
class TestDefectChargeState(unittest.TestCase):
def setUp(self):
charge = 1.0
energy = 0.1234
degeneracy = 2
self.defect_charge_state = DefectChargeState(charge=charge,
energy=energy,
degeneracy=degeneracy)
def test_charge_property(self):
self.assertEqual( self.defect_charge_state.charge, self.defect_charge_state._charge )
def test_energy_property(self):
self.assertEqual( self.defect_charge_state.energy, self.defect_charge_state._energy )
def test_degeneracy_property(self):
self.assertEqual( self.defect_charge_state.degeneracy, self.defect_charge_state._degeneracy )
def test_concentration_is_fixed_property(self):
self.assertEqual( self.defect_charge_state.concentration_is_fixed, self.defect_charge_state._fixed_concentration )
def test_get_formation_energy(self):
e_fermi = 1.2
formation_energy = self.defect_charge_state.get_formation_energy( e_fermi )
self.assertEqual( formation_energy, 0.1234 + ( 1.0*1.2 ) )
def test_get_concentration(self):
e_fermi = 1.2
temperature = 298.0
conc = self.defect_charge_state.get_concentration(e_fermi=e_fermi,
temperature=temperature)
self.assertEqual(conc, 8.311501552630706e-23)
class TestFrozenDefectChargeStateInit(unittest.TestCase):
def test_frozen_defect_charge_state_is_initialised(self):
charge = 1.0
concentration = 123.4
defect_charge_state = FrozenDefectChargeState(charge=charge,
concentration=concentration)
self.assertEqual( defect_charge_state._charge, charge )
self.assertEqual( defect_charge_state._concentration, concentration )
self.assertEqual( defect_charge_state._fixed_concentration, True )
class TestFrozenDefectChargeState(unittest.TestCase):
def setUp(self):
charge = 1.0
concentration = 0.1234
self.defect_charge_state = FrozenDefectChargeState(charge=charge, concentration=concentration)
def test_get_concentration(self):
e_fermi = 1.2
temperature = 298.0
conc = self.defect_charge_state.get_concentration( e_fermi=e_fermi,
temperature=temperature )
self.assertEqual( conc, 0.1234 )
if __name__ == '__main__':
unittest.main() | 0.615203 | 0.65524 |
import sys
import json
import datetime
import requests
import html
import tldextract
from bs4 import BeautifulSoup, Comment
import re
import signal
from urllib.parse import urlparse
from urllib.parse import parse_qs
'''
This contains utilities used by other functions in the YoutubeDataApi class, as well as a few convenience functions for data analysis.
'''
__all__ = [
'_chunker',
'_load_response',
'_text_from_html',
'parse_yt_datetime',
'strip_video_id_from_url',
'get_upload_playlist_id',
'get_liked_playlist_id',
'is_user',
'strip_youtube_id',
'get_channel_id_from_custom_url',
'get_url_from_video_id'
]
class TimeoutError(Exception):
pass
class timeout:
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
def _chunker(l, chunksize):
"""Yield successive ``chunksize``-sized chunks from l."""
for i in range(0, len(l), chunksize):
yield l[i:i + chunksize]
def _load_response(response):
'''
Loads the response to json, and checks for errors.
'''
response.raise_for_status()
response_json = json.loads(response.text)
return response_json
def _text_from_html(html_body):
'''
Gets clean text from html.
'''
def _tag_visible(element):
'''Gets the text elements we're interested in'''
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
soup = BeautifulSoup(html_body, 'html.parser')
raw_text = soup.findAll(text=True)
visible_text = filter(_tag_visible, raw_text)
text = u" ".join(t.strip() for t in visible_text)
text = re.sub(r"[\n\t]", ' ', text)
text = re.sub(r'<.+?>', '', text)
text = html.unescape(text)
text = ' '.join(text.split())
return text
def parse_yt_datetime(date_str):
'''
Parses a date string returned from YouTube's API into a Python datetime.
'''
date = None
if date_str:
try:
date = datetime.datetime.strptime(date_str,"%Y-%m-%dT%H:%M:%S.%fZ")
except:
pass
return date
def strip_video_id_from_url(url):
'''Strips the video_id from YouTube URL.'''
domain = tldextract.extract(url).registered_domain
url_ = None
if 'youtu.be' in domain:
url_ = url[url.rindex('/') + 1:]
if '?' in url_:
url_ = url_[:url_.rindex('?')]
elif "youtube.com" in domain and "embed" in url:
url_ = url.rpartition("/")[-1].partition("?")[0]
elif "youtube.com" in domain and "attribution_link" in url:
u = urlparse(url)
# Get and parse the query string, which will look like:
#. a=--oPiH1x0pU&u=%2Fwatch%3Fv%3DHR1Ta25HkBM%26feature%3Dshare
q = parse_qs(u.query)
# Now we have a decoded query string, e.g., 'u':['/watch?v=HR1Ta25HkBM&feature=share']
if ( 'u' in q ):
# Parse like it was a normal /watch url
q = parse_qs(urlparse(q['u'][0]).query)
if ( 'v' in q ):
url_ = q['v'][0]
elif ( 'video_id' in q ):
url_ = q['video_id'][0]
elif "youtube.com" in domain:
u = urlparse(url)
q = parse_qs(u.query)
if ( 'v' in q ):
url_ = q['v'][0]
elif ( 'video_id' in q ):
url_ = q['video_id'][0]
return url_
def get_upload_playlist_id(channel_id):
'''Given a channel_id, returns the user uploaded playlist id.'''
playlist_id = 'UU' + channel_id[2:]
return playlist_id
def get_liked_playlist_id(channel_id):
'''Given a channel_id, returns the user liked playlist id.'''
playlist_id = 'LL' + channel_id[2:]
return playlist_id
def is_user(channel_url):
'''
Checks if url is channel or user
'''
if 'youtube.com/user/' in channel_url:
return True
elif 'youtube.com/channel/' in channel_url:
return False
else:
raise Exception("Didn't recognize url {}".format(channel_url))
def strip_youtube_id(channel_url):
'''
From a URL returns the YT ID.
'''
return (channel_url.rstrip('/').replace('/featured', '')
.split('/')[-1].split('#')[0])
def get_channel_id_from_custom_url(url):
'''
Gets channel id from a url of a custom url IE: http://youtube.com/stefbot
returns a channel_id IE: UCuMo0RRtnNDuMB8DV5stEag
'''
r = requests.get(url)
soup = BeautifulSoup(r.content, 'lxml')
class_ = ('yt-uix-button yt-uix-button-size-default '
'yt-uix-button-subscribe-branded '
'yt-uix-button-has-icon no-icon-markup '
'yt-uix-subscription-button yt-can-buffer')
channel_id = soup.find('button', class_=class_).get('data-channel-external-id')
return channel_id
def get_url_from_video_id(video_id):
'''
Given a video id, this function returns the full URL.
'''
url = "https://youtube.com/watch?v={}".format(video_id)
return url | youtube_api/youtube_api_utils.py | import sys
import json
import datetime
import requests
import html
import tldextract
from bs4 import BeautifulSoup, Comment
import re
import signal
from urllib.parse import urlparse
from urllib.parse import parse_qs
'''
This contains utilities used by other functions in the YoutubeDataApi class, as well as a few convenience functions for data analysis.
'''
__all__ = [
'_chunker',
'_load_response',
'_text_from_html',
'parse_yt_datetime',
'strip_video_id_from_url',
'get_upload_playlist_id',
'get_liked_playlist_id',
'is_user',
'strip_youtube_id',
'get_channel_id_from_custom_url',
'get_url_from_video_id'
]
class TimeoutError(Exception):
pass
class timeout:
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
def _chunker(l, chunksize):
"""Yield successive ``chunksize``-sized chunks from l."""
for i in range(0, len(l), chunksize):
yield l[i:i + chunksize]
def _load_response(response):
'''
Loads the response to json, and checks for errors.
'''
response.raise_for_status()
response_json = json.loads(response.text)
return response_json
def _text_from_html(html_body):
'''
Gets clean text from html.
'''
def _tag_visible(element):
'''Gets the text elements we're interested in'''
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
soup = BeautifulSoup(html_body, 'html.parser')
raw_text = soup.findAll(text=True)
visible_text = filter(_tag_visible, raw_text)
text = u" ".join(t.strip() for t in visible_text)
text = re.sub(r"[\n\t]", ' ', text)
text = re.sub(r'<.+?>', '', text)
text = html.unescape(text)
text = ' '.join(text.split())
return text
def parse_yt_datetime(date_str):
'''
Parses a date string returned from YouTube's API into a Python datetime.
'''
date = None
if date_str:
try:
date = datetime.datetime.strptime(date_str,"%Y-%m-%dT%H:%M:%S.%fZ")
except:
pass
return date
def strip_video_id_from_url(url):
'''Strips the video_id from YouTube URL.'''
domain = tldextract.extract(url).registered_domain
url_ = None
if 'youtu.be' in domain:
url_ = url[url.rindex('/') + 1:]
if '?' in url_:
url_ = url_[:url_.rindex('?')]
elif "youtube.com" in domain and "embed" in url:
url_ = url.rpartition("/")[-1].partition("?")[0]
elif "youtube.com" in domain and "attribution_link" in url:
u = urlparse(url)
# Get and parse the query string, which will look like:
#. a=--oPiH1x0pU&u=%2Fwatch%3Fv%3DHR1Ta25HkBM%26feature%3Dshare
q = parse_qs(u.query)
# Now we have a decoded query string, e.g., 'u':['/watch?v=HR1Ta25HkBM&feature=share']
if ( 'u' in q ):
# Parse like it was a normal /watch url
q = parse_qs(urlparse(q['u'][0]).query)
if ( 'v' in q ):
url_ = q['v'][0]
elif ( 'video_id' in q ):
url_ = q['video_id'][0]
elif "youtube.com" in domain:
u = urlparse(url)
q = parse_qs(u.query)
if ( 'v' in q ):
url_ = q['v'][0]
elif ( 'video_id' in q ):
url_ = q['video_id'][0]
return url_
def get_upload_playlist_id(channel_id):
'''Given a channel_id, returns the user uploaded playlist id.'''
playlist_id = 'UU' + channel_id[2:]
return playlist_id
def get_liked_playlist_id(channel_id):
'''Given a channel_id, returns the user liked playlist id.'''
playlist_id = 'LL' + channel_id[2:]
return playlist_id
def is_user(channel_url):
'''
Checks if url is channel or user
'''
if 'youtube.com/user/' in channel_url:
return True
elif 'youtube.com/channel/' in channel_url:
return False
else:
raise Exception("Didn't recognize url {}".format(channel_url))
def strip_youtube_id(channel_url):
'''
From a URL returns the YT ID.
'''
return (channel_url.rstrip('/').replace('/featured', '')
.split('/')[-1].split('#')[0])
def get_channel_id_from_custom_url(url):
'''
Gets channel id from a url of a custom url IE: http://youtube.com/stefbot
returns a channel_id IE: UCuMo0RRtnNDuMB8DV5stEag
'''
r = requests.get(url)
soup = BeautifulSoup(r.content, 'lxml')
class_ = ('yt-uix-button yt-uix-button-size-default '
'yt-uix-button-subscribe-branded '
'yt-uix-button-has-icon no-icon-markup '
'yt-uix-subscription-button yt-can-buffer')
channel_id = soup.find('button', class_=class_).get('data-channel-external-id')
return channel_id
def get_url_from_video_id(video_id):
'''
Given a video id, this function returns the full URL.
'''
url = "https://youtube.com/watch?v={}".format(video_id)
return url | 0.368974 | 0.102799 |
from __future__ import absolute_import
import json
import os
import time
from typing import *
import requests
import six
from .exc import PIXError, PIXLoginError
from .factory import Factory
from .model import PIXProject
from .utils import import_modules
if TYPE_CHECKING:
import requests.cookies
__all__ = [
'Session',
]
class SessionHeader(object):
"""
Context manager for temporarily changing the session headers.
"""
def __init__(self, session, headers):
# type: (Session, Dict[str, str]) -> None
"""
Parameters
----------
session : Session
headers : Dict[str, str]
"""
super(SessionHeader, self).__init__()
self._session = session
self.original = None
self.headers = headers
def __enter__(self):
# copy here!
self.original = dict(self._session.headers)
self._session.headers.update(self.headers)
def __exit__(self, exception_type, exception_value, traceback):
self._session.headers = self.original
class Expiry(object):
"""
Object that existence check fails for after a set duration in seconds.
Examples
--------
>>> import time
>>> e = Expiry(5)
>>> i = 0
>>> while True:
... if not e:
... break
... print(i)
... i += 1
... time.sleep(1)
"""
def __init__(self, seconds):
# type: (Union[int, float]) -> None
"""
Parameters
----------
seconds : Union[int, float]
"""
self.expires = time.time() + seconds
def __bool__(self):
# type: () -> bool
"""
Returns
-------
bool
"""
return time.time() < self.expires
__nonzero__ = __bool__
class Session(object):
"""
A Session manages all API calls to the PIX REST endpoints. It manages
the current PIX session including the user logging in and the current
active project. PIX REST calls return results differently depending on
the active user and project. It also handles refreshing the active session
if it expires
Examples
--------
>>> session = Session()
... for project in session.get_projects():
... for feed in project.iter_unread():
... for attachment in feed.iter_attachments():
... for note in attachment.get_notes():
... image_bytes = note.get_media('composite')
"""
def __init__(self, api_url=None, app_key=None, username=None,
password=None, plugin_paths=None):
# type: (Optional[str], Optional[str], Optional[str], Optional[str], Optional[Union[str, Iterable[str]]]) -> None
"""
Parameters
----------
api_url : Optional[str]
The host PIX API url. If None, then the environment variable
PIX_API_URL will be used.
app_key : Optional[str]
The host PIX API KEY. If None, then the environment variable
PIX_APP_KEY will be used.
username : Optional[str]
The PIX username used for logging in. If None, then the environment
variable PIX_USERNAME will be used.
password : Optional[str]
The PIX password associated with `username` used for logging in.
If None, then the environment variable PIX_PASSWORD will be used.
plugin_paths : Optional[Union[str, Iterable[str]]]
Paths to custom modules or packages that should be loaded prior to
constructing any objects via the factory. This allows for
registration of any custom bases within the factory. If None,
the environment variable PIX_PLUGIN_PATH will be used.
"""
if api_url is None:
api_url = os.environ.get('PIX_API_URL')
if app_key is None:
app_key = os.environ.get('PIX_APP_KEY')
if username is None:
username = os.environ.get('PIX_USERNAME')
if password is None:
password = os.environ.get('PIX_PASSWORD')
_missing_creds = [
x[0] for x in [
('api_url', api_url),
('app_key', app_key),
('username', username),
('password', password),
] if not x[1]
]
if _missing_creds:
raise PIXLoginError('Missing login credentials: {}'.format(
', '.join(_missing_creds)))
assert api_url, 'Invalid credentials: api_url'
assert app_key, 'Invalid credentials: app_key'
assert username, 'Invalid credentials: username'
assert password, 'Invalid credentials: password'
self.api_url = api_url # type: str
self.app_key = app_key # type: str
self.username = username # type: str
self.password = password # type: str
plugin_paths = plugin_paths or os.environ.get('PIX_PLUGIN_PATH')
if plugin_paths:
import_modules(plugin_paths)
self.factory = Factory(self)
self.headers = {
'X-PIX-App-Key': self.app_key,
'Content-type': 'application/json;charset=utf-8',
'Accept': 'application/json;charset=utf-8'
}
self.cookies = None # type: Optional[requests.cookies.RequestsCookieJar]
# A time-out object representing the current session. Expires after a
# set duration and is then refreshed.
self._session = None # type: Optional[Expiry]
# current active project
self.active_project = None # type: Optional[PIXProject]
def __enter__(self):
# type: () -> Session
"""
Use session as a context manager to log out when it exits.
Examples
--------
>>> with Session() as session:
... project = session.load_project('FooBar')
... label = project.label
Returns
-------
Session
"""
return self
def __exit__(self, *args):
self.logout()
def login(self):
"""
Log into PIX
"""
result = requests.put(
url=self.api_url + '/session/',
headers=self.headers,
data=json.dumps(
{'username': self.username, 'password': self.password}))
if result.status_code != 201:
raise PIXLoginError(result.reason)
self.cookies = result.cookies
self._session = Expiry(self.time_remaining())
def logout(self):
"""
Log out of PIX
"""
result = self.delete_session()
self._session = result
def time_remaining(self):
# type: () -> int
"""
Get the time remaining for current session.
Returns
-------
int
"""
# Not using self.get here is intentional to avoid recursive self.login
# calls.
response = requests.get(
url=self.api_url + '/session/time_remaining',
cookies=self.cookies,
headers=self.headers)
return json.loads(response.text)
def delete_session(self):
# type: () -> requests.Response
"""
End a PIX session.
Returns
-------
requests.Response
"""
return requests.delete(
url=self.api_url + '/session/', cookies=self.cookies,
headers=self.headers)
def put(self, url, payload=None):
# type: (str, Optional[Any]) -> requests.Response
"""
PUT REST call
Parameters
----------
url : str
payload : Optional[Any]
Returns
-------
requests.Response
"""
if not self._session:
self.login()
if self.api_url not in url:
url = self.api_url + url
if payload is not None:
payload = json.dumps(payload)
return requests.put(
url=url, cookies=self.cookies, headers=self.headers, data=payload)
def post(self, url, payload=None):
# type: (str, Optional[Any]) -> requests.Response
"""
POST REST call
Parameters
----------
url : str
payload : Optional[Any]
Returns
-------
requests.Response
"""
if not self._session:
self.login()
if self.api_url not in url:
url = self.api_url + url
if payload is not None:
payload = json.dumps(payload)
return requests.post(
url=url, cookies=self.cookies, headers=self.headers, data=payload)
def delete(self, url, payload=None):
# type: (str, Optional[Any]) -> requests.Response
"""
DELETE REST call
Parameters
----------
url : str
payload : Optional[Any]
Returns
-------
requests.Response
"""
if not self._session:
self.login()
if self.api_url not in url:
url = self.api_url + url
if payload is not None:
payload = json.dumps(payload)
return requests.delete(
url=url, cookies=self.cookies, headers=self.headers, data=payload)
def get(self, url):
# type: (str) -> Any
"""
GET REST call
Parameters
----------
url : str
Returns
-------
Any
"""
if not self._session:
self.login()
if self.api_url not in url:
url = self.api_url + url
return self.process_result(
requests.get(url=url, cookies=self.cookies, headers=self.headers))
def process_result(self, raw_result):
# type: (requests.Response) -> Union[requests.Response, Any]
"""
Process request results. This utilizes the `Factory` to premote
certain elements within the raw results to dict-like objects. These
objects may be built with base classes registered with the factory to
offer additional helper methods.
Parameters
----------
raw_result : requests.Response
Returns
-------
Union[requests.Response, Any]
"""
if self.headers['Accept'] != 'application/json;charset=utf-8':
return raw_result
return self.factory.objectfy(json.loads(raw_result.text))
def header(self, headers):
# type: (Dict[str, str]) -> SessionHeader
"""
Context manager for temporarily setting the session headers.
Examples
--------
>>> session = Session()
>>> with session.header({'Accept': 'text/xml'}):
... response = session.get('/media/1234/markup')
Parameters
----------
headers : Dict[str, str]
Returns
-------
SessionHeader
"""
return SessionHeader(self, headers)
def get_projects(self, limit=None):
# type: (Optional[int]) -> List[PIXProject]
"""
Load all projects user has access to.
Parameters
----------
limit : Optional[int]
Returns
-------
List[PIXProject]
"""
url = '/projects'
if limit is not None:
url += '?limit={}'.format(limit)
response = self.get(url)
if isinstance(response, dict):
if response.get('type') == 'bad_request':
raise PIXError(
'Error fetching projects: {}'.format(
response.get('user_message')))
response = cast(List[PIXProject], response)
return response
def load_project(self, project):
# type: (Union[str, PIXProject]) -> PIXProject
"""
Load a project as the active project.
Parameters
----------
project : Union[str, PIXProject]
Returns
-------
PIXProject
"""
if isinstance(project, six.string_types):
for p in self.get_projects():
if p['label'] == project or p['id'] == project:
project = p
break
else:
raise PIXError(
'No project found {!r}'.format(project))
result = self.put(
'/session/active_project', payload={'id': project['id']})
if result.status_code == 200:
self.active_project = project
return project
raise PIXError(result.reason) | pix/api.py | from __future__ import absolute_import
import json
import os
import time
from typing import *
import requests
import six
from .exc import PIXError, PIXLoginError
from .factory import Factory
from .model import PIXProject
from .utils import import_modules
if TYPE_CHECKING:
import requests.cookies
__all__ = [
'Session',
]
class SessionHeader(object):
"""
Context manager for temporarily changing the session headers.
"""
def __init__(self, session, headers):
# type: (Session, Dict[str, str]) -> None
"""
Parameters
----------
session : Session
headers : Dict[str, str]
"""
super(SessionHeader, self).__init__()
self._session = session
self.original = None
self.headers = headers
def __enter__(self):
# copy here!
self.original = dict(self._session.headers)
self._session.headers.update(self.headers)
def __exit__(self, exception_type, exception_value, traceback):
self._session.headers = self.original
class Expiry(object):
"""
Object that existence check fails for after a set duration in seconds.
Examples
--------
>>> import time
>>> e = Expiry(5)
>>> i = 0
>>> while True:
... if not e:
... break
... print(i)
... i += 1
... time.sleep(1)
"""
def __init__(self, seconds):
# type: (Union[int, float]) -> None
"""
Parameters
----------
seconds : Union[int, float]
"""
self.expires = time.time() + seconds
def __bool__(self):
# type: () -> bool
"""
Returns
-------
bool
"""
return time.time() < self.expires
__nonzero__ = __bool__
class Session(object):
"""
A Session manages all API calls to the PIX REST endpoints. It manages
the current PIX session including the user logging in and the current
active project. PIX REST calls return results differently depending on
the active user and project. It also handles refreshing the active session
if it expires
Examples
--------
>>> session = Session()
... for project in session.get_projects():
... for feed in project.iter_unread():
... for attachment in feed.iter_attachments():
... for note in attachment.get_notes():
... image_bytes = note.get_media('composite')
"""
def __init__(self, api_url=None, app_key=None, username=None,
password=None, plugin_paths=None):
# type: (Optional[str], Optional[str], Optional[str], Optional[str], Optional[Union[str, Iterable[str]]]) -> None
"""
Parameters
----------
api_url : Optional[str]
The host PIX API url. If None, then the environment variable
PIX_API_URL will be used.
app_key : Optional[str]
The host PIX API KEY. If None, then the environment variable
PIX_APP_KEY will be used.
username : Optional[str]
The PIX username used for logging in. If None, then the environment
variable PIX_USERNAME will be used.
password : Optional[str]
The PIX password associated with `username` used for logging in.
If None, then the environment variable PIX_PASSWORD will be used.
plugin_paths : Optional[Union[str, Iterable[str]]]
Paths to custom modules or packages that should be loaded prior to
constructing any objects via the factory. This allows for
registration of any custom bases within the factory. If None,
the environment variable PIX_PLUGIN_PATH will be used.
"""
if api_url is None:
api_url = os.environ.get('PIX_API_URL')
if app_key is None:
app_key = os.environ.get('PIX_APP_KEY')
if username is None:
username = os.environ.get('PIX_USERNAME')
if password is None:
password = os.environ.get('PIX_PASSWORD')
_missing_creds = [
x[0] for x in [
('api_url', api_url),
('app_key', app_key),
('username', username),
('password', password),
] if not x[1]
]
if _missing_creds:
raise PIXLoginError('Missing login credentials: {}'.format(
', '.join(_missing_creds)))
assert api_url, 'Invalid credentials: api_url'
assert app_key, 'Invalid credentials: app_key'
assert username, 'Invalid credentials: username'
assert password, 'Invalid credentials: password'
self.api_url = api_url # type: str
self.app_key = app_key # type: str
self.username = username # type: str
self.password = password # type: str
plugin_paths = plugin_paths or os.environ.get('PIX_PLUGIN_PATH')
if plugin_paths:
import_modules(plugin_paths)
self.factory = Factory(self)
self.headers = {
'X-PIX-App-Key': self.app_key,
'Content-type': 'application/json;charset=utf-8',
'Accept': 'application/json;charset=utf-8'
}
self.cookies = None # type: Optional[requests.cookies.RequestsCookieJar]
# A time-out object representing the current session. Expires after a
# set duration and is then refreshed.
self._session = None # type: Optional[Expiry]
# current active project
self.active_project = None # type: Optional[PIXProject]
def __enter__(self):
# type: () -> Session
"""
Use session as a context manager to log out when it exits.
Examples
--------
>>> with Session() as session:
... project = session.load_project('FooBar')
... label = project.label
Returns
-------
Session
"""
return self
def __exit__(self, *args):
self.logout()
def login(self):
"""
Log into PIX
"""
result = requests.put(
url=self.api_url + '/session/',
headers=self.headers,
data=json.dumps(
{'username': self.username, 'password': self.password}))
if result.status_code != 201:
raise PIXLoginError(result.reason)
self.cookies = result.cookies
self._session = Expiry(self.time_remaining())
def logout(self):
"""
Log out of PIX
"""
result = self.delete_session()
self._session = result
def time_remaining(self):
# type: () -> int
"""
Get the time remaining for current session.
Returns
-------
int
"""
# Not using self.get here is intentional to avoid recursive self.login
# calls.
response = requests.get(
url=self.api_url + '/session/time_remaining',
cookies=self.cookies,
headers=self.headers)
return json.loads(response.text)
def delete_session(self):
# type: () -> requests.Response
"""
End a PIX session.
Returns
-------
requests.Response
"""
return requests.delete(
url=self.api_url + '/session/', cookies=self.cookies,
headers=self.headers)
def put(self, url, payload=None):
# type: (str, Optional[Any]) -> requests.Response
"""
PUT REST call
Parameters
----------
url : str
payload : Optional[Any]
Returns
-------
requests.Response
"""
if not self._session:
self.login()
if self.api_url not in url:
url = self.api_url + url
if payload is not None:
payload = json.dumps(payload)
return requests.put(
url=url, cookies=self.cookies, headers=self.headers, data=payload)
def post(self, url, payload=None):
# type: (str, Optional[Any]) -> requests.Response
"""
POST REST call
Parameters
----------
url : str
payload : Optional[Any]
Returns
-------
requests.Response
"""
if not self._session:
self.login()
if self.api_url not in url:
url = self.api_url + url
if payload is not None:
payload = json.dumps(payload)
return requests.post(
url=url, cookies=self.cookies, headers=self.headers, data=payload)
def delete(self, url, payload=None):
# type: (str, Optional[Any]) -> requests.Response
"""
DELETE REST call
Parameters
----------
url : str
payload : Optional[Any]
Returns
-------
requests.Response
"""
if not self._session:
self.login()
if self.api_url not in url:
url = self.api_url + url
if payload is not None:
payload = json.dumps(payload)
return requests.delete(
url=url, cookies=self.cookies, headers=self.headers, data=payload)
def get(self, url):
# type: (str) -> Any
"""
GET REST call
Parameters
----------
url : str
Returns
-------
Any
"""
if not self._session:
self.login()
if self.api_url not in url:
url = self.api_url + url
return self.process_result(
requests.get(url=url, cookies=self.cookies, headers=self.headers))
def process_result(self, raw_result):
# type: (requests.Response) -> Union[requests.Response, Any]
"""
Process request results. This utilizes the `Factory` to premote
certain elements within the raw results to dict-like objects. These
objects may be built with base classes registered with the factory to
offer additional helper methods.
Parameters
----------
raw_result : requests.Response
Returns
-------
Union[requests.Response, Any]
"""
if self.headers['Accept'] != 'application/json;charset=utf-8':
return raw_result
return self.factory.objectfy(json.loads(raw_result.text))
def header(self, headers):
# type: (Dict[str, str]) -> SessionHeader
"""
Context manager for temporarily setting the session headers.
Examples
--------
>>> session = Session()
>>> with session.header({'Accept': 'text/xml'}):
... response = session.get('/media/1234/markup')
Parameters
----------
headers : Dict[str, str]
Returns
-------
SessionHeader
"""
return SessionHeader(self, headers)
def get_projects(self, limit=None):
# type: (Optional[int]) -> List[PIXProject]
"""
Load all projects user has access to.
Parameters
----------
limit : Optional[int]
Returns
-------
List[PIXProject]
"""
url = '/projects'
if limit is not None:
url += '?limit={}'.format(limit)
response = self.get(url)
if isinstance(response, dict):
if response.get('type') == 'bad_request':
raise PIXError(
'Error fetching projects: {}'.format(
response.get('user_message')))
response = cast(List[PIXProject], response)
return response
def load_project(self, project):
# type: (Union[str, PIXProject]) -> PIXProject
"""
Load a project as the active project.
Parameters
----------
project : Union[str, PIXProject]
Returns
-------
PIXProject
"""
if isinstance(project, six.string_types):
for p in self.get_projects():
if p['label'] == project or p['id'] == project:
project = p
break
else:
raise PIXError(
'No project found {!r}'.format(project))
result = self.put(
'/session/active_project', payload={'id': project['id']})
if result.status_code == 200:
self.active_project = project
return project
raise PIXError(result.reason) | 0.761095 | 0.122707 |
import sys
import os
PROJECT_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__),
os.pardir)
)
sys.path.append(PROJECT_ROOT)
## Import the relevant Libraries
from RFEM.enums import *
from RFEM.initModel import Model
from RFEM.TypesForMembers.memberHinge import MemberHinge
from RFEM.TypesForMembers.memberResultIntermediatePoints import MemberResultIntermediatePoint
from RFEM.TypesForMembers.memberSupport import MemberSupport
from RFEM.dataTypes import inf
from RFEM.TypesForMembers.memberDefinableStiffness import MemberDefinableStiffness
from RFEM.TypesForMembers.memberEccentricity import MemberEccentricity
from RFEM.TypesForMembers.memberNonlinearity import MemberNonlinearity
from RFEM.TypesForMembers.memberStiffnessModification import MemberStiffnessModification
if Model.clientModel is None:
Model()
def test_memberDefinableStiffness():
Model.clientModel.service.delete_all()
Model.clientModel.service.begin_modification()
MemberDefinableStiffness(1, [False], "", 1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12)
memberStiffness_1 = Model.clientModel.service.get_member_definable_stiffness(1)
assert memberStiffness_1.torsional_stiffness == 1
Model.clientModel.service.finish_modification()
def test_memberEccentricity():
Model.clientModel.service.delete_all()
Model.clientModel.service.begin_modification()
MemberEccentricity()
memberEccentricitiy_1 = Model.clientModel.service.get_member_eccentricity(1)
assert memberEccentricitiy_1.specification_type == "TYPE_RELATIVE"
Model.clientModel.service.finish_modification()
def test_memberHinge():
Model.clientModel.service.delete_all()
Model.clientModel.service.begin_modification()
MemberHinge(1, "Local", "", 4000, translational_release_n_nonlinearity=[MemberHingeNonlineartiy.NONLINEARITY_TYPE_PARTIAL_ACTIVITY, [MemberHingePartialActivityType.PARTIAL_ACTIVITY_TYPE_COMPLETE, 8000], [MemberHingePartialActivityType.PARTIAL_ACTIVITY_TYPE_COMPLETE, 9000]])
MemberHinge(2, "Local", "", 2000, translational_release_n_nonlinearity=[MemberHingeNonlineartiy.NONLINEARITY_TYPE_FRICTION_DIRECTION_1, [0.5]])
MemberHinge(3, "Local", "", translational_release_vy=0, translational_release_vy_nonlinearity=[MemberHingeNonlineartiy.NONLINEARITY_TYPE_DIAGRAM, [MemberHingeDiagramType.DIAGRAM_ENDING_TYPE_CONTINUOUS, MemberHingeDiagramType.DIAGRAM_ENDING_TYPE_CONTINUOUS, [[1,2, 3], [3,4, 5]]]])
MemberHinge(4, "Local", "", translational_release_vz=0, translational_release_vz_nonlinearity=[MemberHingeNonlineartiy.NONLINEARITY_TYPE_PARTIAL_ACTIVITY, [MemberHingePartialActivityType.PARTIAL_ACTIVITY_TYPE_FIXED, 0.004], [MemberHingePartialActivityType.PARTIAL_ACTIVITY_TYPE_FIXED, 0.005]])
memberHinge_1 = Model.clientModel.service.get_member_hinge(1)
memberHinge_2 = Model.clientModel.service.get_member_hinge(2)
memberHinge_3 = Model.clientModel.service.get_member_hinge(3)
memberHinge_4 = Model.clientModel.service.get_member_hinge(4)
assert memberHinge_1.no == 1
assert memberHinge_2.axial_release_n == 2000
assert memberHinge_3.axial_release_vy_nonlinearity == "NONLINEARITY_TYPE_DIAGRAM"
assert memberHinge_4.axial_release_vy == inf
Model.clientModel.service.finish_modification()
def test_memberNonlinearity():
Model.clientModel.service.delete_all()
Model.clientModel.service.begin_modification()
MemberNonlinearity()
memberNonlinearity_1 = Model.clientModel.service.get_member_nonlinearity(1)
assert memberNonlinearity_1.type == "TYPE_FAILURE_IF_TENSION"
Model.clientModel.service.finish_modification()
def test_memberResultIntermediatePoint():
Model.clientModel.service.delete_all()
Model.clientModel.service.begin_modification()
MemberResultIntermediatePoint(1, "", 5)
memberResultIntermediatePoint_1 = Model.clientModel.service.get_member_result_intermediate_point(1)
assert memberResultIntermediatePoint_1.point_count == 5
Model.clientModel.service.finish_modification()
def test_memberStiffnessModification():
Model.clientModel.service.delete_all()
Model.clientModel.service.begin_modification()
MemberStiffnessModification()
memberStiffnessModification_1 = Model.clientModel.service.get_member_stiffness_modification(1)
assert memberStiffnessModification_1.factor_of_bending_z_stiffness == 1
Model.clientModel.service.finish_modification()
def test_memberSupport():
Model.clientModel.service.delete_all()
Model.clientModel.service.begin_modification()
MemberSupport()
MemberSupport(2, '', 1,2, [inf, MemberSupportNonlinearity.NONLINEARITY_FAILURE_IF_NEGATIVE_CONTACT_STRESS_Z], 3, 4, 5, 6)
memberSupport_1 = Model.clientModel.service.get_member_support(1)
memberSupport_2 = Model.clientModel.service.get_member_support(2)
assert memberSupport_1.no == 1
assert memberSupport_2.spring_translation_y == 2
Model.clientModel.service.finish_modification() | UnitTests/test_typesForMembers.py |
import sys
import os
PROJECT_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__),
os.pardir)
)
sys.path.append(PROJECT_ROOT)
## Import the relevant Libraries
from RFEM.enums import *
from RFEM.initModel import Model
from RFEM.TypesForMembers.memberHinge import MemberHinge
from RFEM.TypesForMembers.memberResultIntermediatePoints import MemberResultIntermediatePoint
from RFEM.TypesForMembers.memberSupport import MemberSupport
from RFEM.dataTypes import inf
from RFEM.TypesForMembers.memberDefinableStiffness import MemberDefinableStiffness
from RFEM.TypesForMembers.memberEccentricity import MemberEccentricity
from RFEM.TypesForMembers.memberNonlinearity import MemberNonlinearity
from RFEM.TypesForMembers.memberStiffnessModification import MemberStiffnessModification
if Model.clientModel is None:
Model()
def test_memberDefinableStiffness():
Model.clientModel.service.delete_all()
Model.clientModel.service.begin_modification()
MemberDefinableStiffness(1, [False], "", 1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12)
memberStiffness_1 = Model.clientModel.service.get_member_definable_stiffness(1)
assert memberStiffness_1.torsional_stiffness == 1
Model.clientModel.service.finish_modification()
def test_memberEccentricity():
Model.clientModel.service.delete_all()
Model.clientModel.service.begin_modification()
MemberEccentricity()
memberEccentricitiy_1 = Model.clientModel.service.get_member_eccentricity(1)
assert memberEccentricitiy_1.specification_type == "TYPE_RELATIVE"
Model.clientModel.service.finish_modification()
def test_memberHinge():
Model.clientModel.service.delete_all()
Model.clientModel.service.begin_modification()
MemberHinge(1, "Local", "", 4000, translational_release_n_nonlinearity=[MemberHingeNonlineartiy.NONLINEARITY_TYPE_PARTIAL_ACTIVITY, [MemberHingePartialActivityType.PARTIAL_ACTIVITY_TYPE_COMPLETE, 8000], [MemberHingePartialActivityType.PARTIAL_ACTIVITY_TYPE_COMPLETE, 9000]])
MemberHinge(2, "Local", "", 2000, translational_release_n_nonlinearity=[MemberHingeNonlineartiy.NONLINEARITY_TYPE_FRICTION_DIRECTION_1, [0.5]])
MemberHinge(3, "Local", "", translational_release_vy=0, translational_release_vy_nonlinearity=[MemberHingeNonlineartiy.NONLINEARITY_TYPE_DIAGRAM, [MemberHingeDiagramType.DIAGRAM_ENDING_TYPE_CONTINUOUS, MemberHingeDiagramType.DIAGRAM_ENDING_TYPE_CONTINUOUS, [[1,2, 3], [3,4, 5]]]])
MemberHinge(4, "Local", "", translational_release_vz=0, translational_release_vz_nonlinearity=[MemberHingeNonlineartiy.NONLINEARITY_TYPE_PARTIAL_ACTIVITY, [MemberHingePartialActivityType.PARTIAL_ACTIVITY_TYPE_FIXED, 0.004], [MemberHingePartialActivityType.PARTIAL_ACTIVITY_TYPE_FIXED, 0.005]])
memberHinge_1 = Model.clientModel.service.get_member_hinge(1)
memberHinge_2 = Model.clientModel.service.get_member_hinge(2)
memberHinge_3 = Model.clientModel.service.get_member_hinge(3)
memberHinge_4 = Model.clientModel.service.get_member_hinge(4)
assert memberHinge_1.no == 1
assert memberHinge_2.axial_release_n == 2000
assert memberHinge_3.axial_release_vy_nonlinearity == "NONLINEARITY_TYPE_DIAGRAM"
assert memberHinge_4.axial_release_vy == inf
Model.clientModel.service.finish_modification()
def test_memberNonlinearity():
Model.clientModel.service.delete_all()
Model.clientModel.service.begin_modification()
MemberNonlinearity()
memberNonlinearity_1 = Model.clientModel.service.get_member_nonlinearity(1)
assert memberNonlinearity_1.type == "TYPE_FAILURE_IF_TENSION"
Model.clientModel.service.finish_modification()
def test_memberResultIntermediatePoint():
Model.clientModel.service.delete_all()
Model.clientModel.service.begin_modification()
MemberResultIntermediatePoint(1, "", 5)
memberResultIntermediatePoint_1 = Model.clientModel.service.get_member_result_intermediate_point(1)
assert memberResultIntermediatePoint_1.point_count == 5
Model.clientModel.service.finish_modification()
def test_memberStiffnessModification():
Model.clientModel.service.delete_all()
Model.clientModel.service.begin_modification()
MemberStiffnessModification()
memberStiffnessModification_1 = Model.clientModel.service.get_member_stiffness_modification(1)
assert memberStiffnessModification_1.factor_of_bending_z_stiffness == 1
Model.clientModel.service.finish_modification()
def test_memberSupport():
Model.clientModel.service.delete_all()
Model.clientModel.service.begin_modification()
MemberSupport()
MemberSupport(2, '', 1,2, [inf, MemberSupportNonlinearity.NONLINEARITY_FAILURE_IF_NEGATIVE_CONTACT_STRESS_Z], 3, 4, 5, 6)
memberSupport_1 = Model.clientModel.service.get_member_support(1)
memberSupport_2 = Model.clientModel.service.get_member_support(2)
assert memberSupport_1.no == 1
assert memberSupport_2.spring_translation_y == 2
Model.clientModel.service.finish_modification() | 0.414662 | 0.285982 |
import base64
from pathlib import Path
import dash_bootstrap_components as dbc
import networkx as nx
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import pydot
from dash import dcc, html
from dash.dependencies import Input, Output, State
from jupyter_dash import JupyterDash
from plotly.subplots import make_subplots
from pyvis.network import Network
class View:
def __init__(self, model):
self.model = model
self.network_svg_path = str(self._create_network_svg(return_b64=False))
self.network_svg_b64 = (
"data:image/svg+xml;base64," + str(self._create_network_svg(return_b64=True))[2:-1]
)
self.network_iframe_path = str(self._create_network_iframe())
self.app = self._build_app()
def plot(self, params=None, layout_dict: dict = None, show: bool = False):
"""
Plots the course of the parameter development over time.
Returns a plotly express figure
"""
if not params:
classes = [k for k in self.model.controller.classes_keys]
shape_params = self.model.get_params(["t", "J", "K"])
t, J, K = shape_params["t"], shape_params["J"], shape_params["K"]
params = self.model.controller.classes_data.reshape((len(classes), t, J, K))
params = dict(zip(classes, params))
if "AnzahlFall" in params:
fig = go.Figure()
fig = make_subplots(specs=[[{"secondary_y": True}]])
dates = list(params["AnzahlFall"].keys())
for key, values in params.items():
if key != "AnzahlAnfälligen":
fig.add_trace(go.Line(x=dates, y=params[key], name=key))
else:
fig.add_trace(
go.Line(x=dates, y=params[key], name=key), secondary_y=True,
)
fig.update_yaxes(range=[0, 83240000], secondary_y=True)
else:
params = {k: np.sum(v, axis=(1, 2)) for k, v in params.items() if k in classes}
df = pd.DataFrame(params)
layout = {
"title": "Simulation",
# "xaxis_title": r"$\text{Time } t \text{ in days}$",
# "yaxis_title": r"$\text{Number of people } n$",
"xaxis_title": "Time t in days",
"yaxis_title": "Number of people n",
"legend_title_text": "Classes",
"plot_bgcolor": "rgba(255, 255, 255, 0.1)",
"paper_bgcolor": "rgba(0, 0, 0, 0)",
}
if layout_dict:
for k, v in layout_dict.items():
layout[k] = v
fig = px.line(df)
fig.update_layout(go.Layout(layout))
if show:
fig.show()
return fig
def _create_network_iframe(
self,
network_iframe_path=Path("assets/network.html"),
dot_path=Path("data/param_graph.dot"),
):
G = nx.DiGraph(nx.drawing.nx_pydot.read_dot(dot_path))
net = Network(directed=True, notebook=True)
net.from_nx(G)
options = [
"""
var options = \
{
"nodes": {
"font": {
"background": "rgba(255,125,104,0.77)"
}
},
"edges": {
"color": {
"inherit": true
},
"scaling": {
"max": 100
},
"font": {
"size": 9,
"background": "rgba(255,255,255,0.90)"
},
"smooth": {
"forceDirection": "none"
}
},
"layout": {
"hierarchical": {
"enabled": true,
"direction": "LR",
"sortMethod": "directed"
}
},
"interaction": {
"multiselect": true
},
"physics": {
"hierarchicalRepulsion": {
"centralGravity": 0
}
}
}
""",
"""
var options = \
{
"nodes":{
"font":{
"background":"rgba(255,125,104,0.77)"
}
},
"edges":{
"color":{
"inherit":true
},
"scaling":{
"max":100
},
"font":{
"size":9,
"background":"rgba(255,255,255,0.90)"
},
"smooth":{
"forceDirection":"none"
}
},
"physics":{
"minVelocity":0.75,
"solver":"repulsion"
}
}
""",
]
net.set_options(options[1])
# net.show_buttons(filter_=True)
# net.show(network_iframe_path)
net.write_html(str(network_iframe_path), notebook=True)
return network_iframe_path
def _create_network_svg(
self,
network_svg_path=Path("assets/network.svg"),
dot_path=Path("data/param_graph.dot"),
return_b64=False,
) -> str:
graphs = pydot.graph_from_dot_file(dot_path)
graph = graphs[0]
graph.set_bgcolor("transparent")
graph.set_size(8)
graph.write_svg(network_svg_path)
if return_b64:
return base64.b64encode(graph.create_svg())
else:
return network_svg_path
def _build_app(self):
"""
returns Jupyter-Dash Webapp
"""
def build_slider(kwargs=None):
"""
Build a slider from kwargs and default settings
"""
sliderparams = {
"min": 0,
"max": 1,
"value": 0.65,
"step": 0.05,
"marks": {
0: {"label": "0", "style": {"color": "#0b4f6c"}},
0.25: {"label": "0.25", "style": {"color": colors["text"]}},
0.5: {"label": "0.5", "style": {"color": colors["text"]}},
0.75: {"label": "0.75", "style": {"color": colors["text"]}},
1: {"label": "1", "style": {"color": "#f50"}},
},
"included": True,
"disabled": False, # Handles can't be moved if True
# "vertical":True,
# "verticalHeight":400,
}
if kwargs:
for k, v in kwargs.items():
sliderparams[k] = v
return dcc.Slider(**sliderparams)
# Build App
app = JupyterDash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP],)
colors = {
"background": "#577590",
"text": "#7FDBFF",
}
header = html.Div(
className="row navbar navbar-dark bg-dark shadow-sm",
id="header",
children=[
html.H1(
style={"text-align": "left", "color": colors["background"]},
children=["MVSEIQRD - Modell",],
),
],
)
# key: [min, max, value, step, marks]
slider_dict = {
"sigma": [0, 1, 0.5, 0.05, {}],
"rho_mat": [0, 1, 0.5, 0.05, {}],
"rho_vac": [0, 1, 0.5, 0.05, {}],
"rho_rec": [0, 1, 0.5, 0.05, {}],
"nu": [0, 1, 0.5, 0.05, {}],
"beta_asym": [0, 1, 0.5, 0.05, {}],
"beta_sym": [0, 1, 0.5, 0.05, {}],
"beta_sev": [0, 1, 0.5, 0.05, {}],
"psi": [0, 1, 0.5, 0.05, {}],
"epsilon": [0, 1, 0.5, 0.05, {}],
"gamma_asym": [0, 1, 0.5, 0.05, {}],
"gamma_sym": [0, 1, 0.5, 0.05, {}],
"gamma_sev": [0, 1, 0.5, 0.05, {}],
"gamma_sev_r": [0, 1, 0.5, 0.05, {}],
"gamma_sev_d": [0, 1, 0.5, 0.05, {}],
"mu_sym": [0, 1, 0.5, 0.05, {}],
"mu_sev": [0, 1, 0.5, 0.05, {}],
"tau_asym": [0, 1, 0.5, 0.05, {}],
"tau_sym": [0, 1, 0.5, 0.05, {}],
"tau_sev": [0, 1, 0.5, 0.05, {}],
}
params = self.model.get_params()
for k in slider_dict:
# min
min_ = self.model.controller.default_domains[k][0]
slider_dict[k][0] = min_
# make sure limits are not 1.0, 2.0, 0.0 etc.
if min_ == round(min_):
min_ = int(min_)
# max
max_ = self.model.controller.default_domains[k][1]
slider_dict[k][1] = max_
# make sure limits are not 1.0, 2.0, 0.0 etc.
if max_ == round(max_):
max_ = int(max_)
# value
slider_dict[k][2] = np.round(np.median(params[k]), 4)
# step
if slider_dict[k][2] == 0.0:
slider_dict[k][3] = max_ / 10
else:
slider_dict[k][3] = slider_dict[k][2] / 10
# marks
marks = np.linspace(min_, max_, 5)
slider_dict[k][4] = {
min_: {"label": str(int(marks[0])), "style": {"color": "#0B4F6C"}},
marks[1]: {"label": str(marks[1]), "style": {"color": colors["text"]}},
marks[2]: {"label": str(marks[2]), "style": {"color": colors["text"]}},
marks[3]: {"label": str(marks[3]), "style": {"color": colors["text"]}},
max_: {"label": str(int(marks[4])), "style": {"color": "#F50"}},
}
sliders = []
for slider_key in slider_dict:
slider_output_id = slider_key + "_output"
slider_id = slider_key + "_slider"
# Text label
sliders.append(
html.P(
children=slider_key,
className="mt-2 mb-0 ms-3",
style={"text-align": "left",},
id=slider_output_id,
)
)
# Slider
sliders.append(
build_slider(
{
"id": slider_id,
"min": slider_dict[slider_key][0],
"max": slider_dict[slider_key][1],
"value": slider_dict[slider_key][2],
"step": slider_dict[slider_key][3],
"marks": slider_dict[slider_key][4],
}
)
)
slider_col_1 = html.Div(
className="col-2 my-auto align-middle",
id="slider-col-1",
children=sliders[: len(sliders) // 2],
)
slider_col_2 = html.Div(
className="col-2 my-auto align-middle",
id="slider-col-2",
children=sliders[len(sliders) // 2 :],
)
button_col = html.Div(
className="col-1 my-auto align-middle",
style={"text-align": "center",},
children=[
dbc.Button(
id="loading-button",
className="my-auto",
n_clicks=0,
children=["Run Simulation"],
),
],
)
plot_col = html.Div(
className="col-7 my-auto px-0 mx-0",
id="sim-graph",
children=[
html.Div(
style={"text-align": "center",},
children=[
dcc.Loading(
children=[
html.Img(
src=self.network_svg_b64,
id="network-output",
className="mx-auto mb-1 mt-5 pt-5",
style={
"width": 600,
"height": 250,
"text-align": "center",
"background-color": "transparent",
},
)
],
color="#119DFF",
type="default",
fullscreen=False,
),
],
),
html.Div(
dcc.Loading(
children=[
dcc.Graph(
id="loading-output",
figure=px.line(width=800, height=500),
className="mx-auto my-auto",
style={"width": 800, "height": 500, "text-align": "center"},
)
],
color="#119DFF",
type="default",
fullscreen=False,
className="mx-0 px-0 my-auto",
)
),
],
)
main_row = html.Div(
# className = "row flex-fill d-flex justify-content-center",
className="row justify-content-center",
style={"height": "900"},
id="main-row",
children=[slider_col_1, slider_col_2, button_col, plot_col],
)
footer = html.Div(
className="row navbar fixed-bottom navbar-dark bg-dark shadow-sm",
id="footer",
children=[],
)
# Build Layout
app.layout = html.Div(
style={"background-color": colors["background"]},
className="container-fluid d-flex h-100 flex-column",
children=[header, main_row, footer,],
)
# HACK could be done a lot cleaner...
global update_params
update_params = self.model.controller.update_params
# Slider Functionality
for slider_key in slider_dict:
slider_output_id = slider_key + "_output"
slider_id = slider_key + "_slider"
exec(
"@app.callback(Output('"
+ slider_output_id
+ "', 'children'), [Input('"
+ slider_id
+ "', 'value')])\n"
+ "def "
+ "update_output_"
+ slider_key
+ "(value):\n\t"
+ "update_params(params={'"
+ slider_key
+ "':value}, fill_missing_values=False, reset=False)\n\t"
+ "return "
+ "'"
+ slider_key
+ "'"
+ " + f' = {value}'"
)
# Button functionality
@app.callback(Output("loading-output", "figure"), [Input("loading-button", "n_clicks")])
def load_output(n_clicks):
self.model.run()
return self.plot(layout_dict={"width": 800, "height": 500})
return app
def run_app(self):
"""Run app and display result inline in the notebook"""
return self.app.run_server(mode="inline", width="1600", height="880") | app/view.py | import base64
from pathlib import Path
import dash_bootstrap_components as dbc
import networkx as nx
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import pydot
from dash import dcc, html
from dash.dependencies import Input, Output, State
from jupyter_dash import JupyterDash
from plotly.subplots import make_subplots
from pyvis.network import Network
class View:
def __init__(self, model):
self.model = model
self.network_svg_path = str(self._create_network_svg(return_b64=False))
self.network_svg_b64 = (
"data:image/svg+xml;base64," + str(self._create_network_svg(return_b64=True))[2:-1]
)
self.network_iframe_path = str(self._create_network_iframe())
self.app = self._build_app()
def plot(self, params=None, layout_dict: dict = None, show: bool = False):
"""
Plots the course of the parameter development over time.
Returns a plotly express figure
"""
if not params:
classes = [k for k in self.model.controller.classes_keys]
shape_params = self.model.get_params(["t", "J", "K"])
t, J, K = shape_params["t"], shape_params["J"], shape_params["K"]
params = self.model.controller.classes_data.reshape((len(classes), t, J, K))
params = dict(zip(classes, params))
if "AnzahlFall" in params:
fig = go.Figure()
fig = make_subplots(specs=[[{"secondary_y": True}]])
dates = list(params["AnzahlFall"].keys())
for key, values in params.items():
if key != "AnzahlAnfälligen":
fig.add_trace(go.Line(x=dates, y=params[key], name=key))
else:
fig.add_trace(
go.Line(x=dates, y=params[key], name=key), secondary_y=True,
)
fig.update_yaxes(range=[0, 83240000], secondary_y=True)
else:
params = {k: np.sum(v, axis=(1, 2)) for k, v in params.items() if k in classes}
df = pd.DataFrame(params)
layout = {
"title": "Simulation",
# "xaxis_title": r"$\text{Time } t \text{ in days}$",
# "yaxis_title": r"$\text{Number of people } n$",
"xaxis_title": "Time t in days",
"yaxis_title": "Number of people n",
"legend_title_text": "Classes",
"plot_bgcolor": "rgba(255, 255, 255, 0.1)",
"paper_bgcolor": "rgba(0, 0, 0, 0)",
}
if layout_dict:
for k, v in layout_dict.items():
layout[k] = v
fig = px.line(df)
fig.update_layout(go.Layout(layout))
if show:
fig.show()
return fig
def _create_network_iframe(
self,
network_iframe_path=Path("assets/network.html"),
dot_path=Path("data/param_graph.dot"),
):
G = nx.DiGraph(nx.drawing.nx_pydot.read_dot(dot_path))
net = Network(directed=True, notebook=True)
net.from_nx(G)
options = [
"""
var options = \
{
"nodes": {
"font": {
"background": "rgba(255,125,104,0.77)"
}
},
"edges": {
"color": {
"inherit": true
},
"scaling": {
"max": 100
},
"font": {
"size": 9,
"background": "rgba(255,255,255,0.90)"
},
"smooth": {
"forceDirection": "none"
}
},
"layout": {
"hierarchical": {
"enabled": true,
"direction": "LR",
"sortMethod": "directed"
}
},
"interaction": {
"multiselect": true
},
"physics": {
"hierarchicalRepulsion": {
"centralGravity": 0
}
}
}
""",
"""
var options = \
{
"nodes":{
"font":{
"background":"rgba(255,125,104,0.77)"
}
},
"edges":{
"color":{
"inherit":true
},
"scaling":{
"max":100
},
"font":{
"size":9,
"background":"rgba(255,255,255,0.90)"
},
"smooth":{
"forceDirection":"none"
}
},
"physics":{
"minVelocity":0.75,
"solver":"repulsion"
}
}
""",
]
net.set_options(options[1])
# net.show_buttons(filter_=True)
# net.show(network_iframe_path)
net.write_html(str(network_iframe_path), notebook=True)
return network_iframe_path
def _create_network_svg(
self,
network_svg_path=Path("assets/network.svg"),
dot_path=Path("data/param_graph.dot"),
return_b64=False,
) -> str:
graphs = pydot.graph_from_dot_file(dot_path)
graph = graphs[0]
graph.set_bgcolor("transparent")
graph.set_size(8)
graph.write_svg(network_svg_path)
if return_b64:
return base64.b64encode(graph.create_svg())
else:
return network_svg_path
def _build_app(self):
"""
returns Jupyter-Dash Webapp
"""
def build_slider(kwargs=None):
"""
Build a slider from kwargs and default settings
"""
sliderparams = {
"min": 0,
"max": 1,
"value": 0.65,
"step": 0.05,
"marks": {
0: {"label": "0", "style": {"color": "#0b4f6c"}},
0.25: {"label": "0.25", "style": {"color": colors["text"]}},
0.5: {"label": "0.5", "style": {"color": colors["text"]}},
0.75: {"label": "0.75", "style": {"color": colors["text"]}},
1: {"label": "1", "style": {"color": "#f50"}},
},
"included": True,
"disabled": False, # Handles can't be moved if True
# "vertical":True,
# "verticalHeight":400,
}
if kwargs:
for k, v in kwargs.items():
sliderparams[k] = v
return dcc.Slider(**sliderparams)
# Build App
app = JupyterDash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP],)
colors = {
"background": "#577590",
"text": "#7FDBFF",
}
header = html.Div(
className="row navbar navbar-dark bg-dark shadow-sm",
id="header",
children=[
html.H1(
style={"text-align": "left", "color": colors["background"]},
children=["MVSEIQRD - Modell",],
),
],
)
# key: [min, max, value, step, marks]
slider_dict = {
"sigma": [0, 1, 0.5, 0.05, {}],
"rho_mat": [0, 1, 0.5, 0.05, {}],
"rho_vac": [0, 1, 0.5, 0.05, {}],
"rho_rec": [0, 1, 0.5, 0.05, {}],
"nu": [0, 1, 0.5, 0.05, {}],
"beta_asym": [0, 1, 0.5, 0.05, {}],
"beta_sym": [0, 1, 0.5, 0.05, {}],
"beta_sev": [0, 1, 0.5, 0.05, {}],
"psi": [0, 1, 0.5, 0.05, {}],
"epsilon": [0, 1, 0.5, 0.05, {}],
"gamma_asym": [0, 1, 0.5, 0.05, {}],
"gamma_sym": [0, 1, 0.5, 0.05, {}],
"gamma_sev": [0, 1, 0.5, 0.05, {}],
"gamma_sev_r": [0, 1, 0.5, 0.05, {}],
"gamma_sev_d": [0, 1, 0.5, 0.05, {}],
"mu_sym": [0, 1, 0.5, 0.05, {}],
"mu_sev": [0, 1, 0.5, 0.05, {}],
"tau_asym": [0, 1, 0.5, 0.05, {}],
"tau_sym": [0, 1, 0.5, 0.05, {}],
"tau_sev": [0, 1, 0.5, 0.05, {}],
}
params = self.model.get_params()
for k in slider_dict:
# min
min_ = self.model.controller.default_domains[k][0]
slider_dict[k][0] = min_
# make sure limits are not 1.0, 2.0, 0.0 etc.
if min_ == round(min_):
min_ = int(min_)
# max
max_ = self.model.controller.default_domains[k][1]
slider_dict[k][1] = max_
# make sure limits are not 1.0, 2.0, 0.0 etc.
if max_ == round(max_):
max_ = int(max_)
# value
slider_dict[k][2] = np.round(np.median(params[k]), 4)
# step
if slider_dict[k][2] == 0.0:
slider_dict[k][3] = max_ / 10
else:
slider_dict[k][3] = slider_dict[k][2] / 10
# marks
marks = np.linspace(min_, max_, 5)
slider_dict[k][4] = {
min_: {"label": str(int(marks[0])), "style": {"color": "#0B4F6C"}},
marks[1]: {"label": str(marks[1]), "style": {"color": colors["text"]}},
marks[2]: {"label": str(marks[2]), "style": {"color": colors["text"]}},
marks[3]: {"label": str(marks[3]), "style": {"color": colors["text"]}},
max_: {"label": str(int(marks[4])), "style": {"color": "#F50"}},
}
sliders = []
for slider_key in slider_dict:
slider_output_id = slider_key + "_output"
slider_id = slider_key + "_slider"
# Text label
sliders.append(
html.P(
children=slider_key,
className="mt-2 mb-0 ms-3",
style={"text-align": "left",},
id=slider_output_id,
)
)
# Slider
sliders.append(
build_slider(
{
"id": slider_id,
"min": slider_dict[slider_key][0],
"max": slider_dict[slider_key][1],
"value": slider_dict[slider_key][2],
"step": slider_dict[slider_key][3],
"marks": slider_dict[slider_key][4],
}
)
)
slider_col_1 = html.Div(
className="col-2 my-auto align-middle",
id="slider-col-1",
children=sliders[: len(sliders) // 2],
)
slider_col_2 = html.Div(
className="col-2 my-auto align-middle",
id="slider-col-2",
children=sliders[len(sliders) // 2 :],
)
button_col = html.Div(
className="col-1 my-auto align-middle",
style={"text-align": "center",},
children=[
dbc.Button(
id="loading-button",
className="my-auto",
n_clicks=0,
children=["Run Simulation"],
),
],
)
plot_col = html.Div(
className="col-7 my-auto px-0 mx-0",
id="sim-graph",
children=[
html.Div(
style={"text-align": "center",},
children=[
dcc.Loading(
children=[
html.Img(
src=self.network_svg_b64,
id="network-output",
className="mx-auto mb-1 mt-5 pt-5",
style={
"width": 600,
"height": 250,
"text-align": "center",
"background-color": "transparent",
},
)
],
color="#119DFF",
type="default",
fullscreen=False,
),
],
),
html.Div(
dcc.Loading(
children=[
dcc.Graph(
id="loading-output",
figure=px.line(width=800, height=500),
className="mx-auto my-auto",
style={"width": 800, "height": 500, "text-align": "center"},
)
],
color="#119DFF",
type="default",
fullscreen=False,
className="mx-0 px-0 my-auto",
)
),
],
)
main_row = html.Div(
# className = "row flex-fill d-flex justify-content-center",
className="row justify-content-center",
style={"height": "900"},
id="main-row",
children=[slider_col_1, slider_col_2, button_col, plot_col],
)
footer = html.Div(
className="row navbar fixed-bottom navbar-dark bg-dark shadow-sm",
id="footer",
children=[],
)
# Build Layout
app.layout = html.Div(
style={"background-color": colors["background"]},
className="container-fluid d-flex h-100 flex-column",
children=[header, main_row, footer,],
)
# HACK could be done a lot cleaner...
global update_params
update_params = self.model.controller.update_params
# Slider Functionality
for slider_key in slider_dict:
slider_output_id = slider_key + "_output"
slider_id = slider_key + "_slider"
exec(
"@app.callback(Output('"
+ slider_output_id
+ "', 'children'), [Input('"
+ slider_id
+ "', 'value')])\n"
+ "def "
+ "update_output_"
+ slider_key
+ "(value):\n\t"
+ "update_params(params={'"
+ slider_key
+ "':value}, fill_missing_values=False, reset=False)\n\t"
+ "return "
+ "'"
+ slider_key
+ "'"
+ " + f' = {value}'"
)
# Button functionality
@app.callback(Output("loading-output", "figure"), [Input("loading-button", "n_clicks")])
def load_output(n_clicks):
self.model.run()
return self.plot(layout_dict={"width": 800, "height": 500})
return app
def run_app(self):
"""Run app and display result inline in the notebook"""
return self.app.run_server(mode="inline", width="1600", height="880") | 0.760651 | 0.262245 |
import time
import logging
import sys
import argparse
from decouple import config
import urllib.parse
import re
from utils.qa import grab_qa_for, search_string_in_everything
from utils.levenshtein import levenshtein_ratio_and_distance
from utils.browser_opts import browser_options
from utils.utillities import wait_until, strtobool
from selenium.webdriver.common.by import By
my_parser = argparse.ArgumentParser()
my_parser.add_argument('-s', '--show',
action='store_true',
help='Show browser. Default is headless.')
my_parser.add_argument('-l', '--live',
action='store_true',
help='Run real test, not just practice.')
options = my_parser.parse_args()
options = vars(options)
LINKED_IN_USER = config('LINKED_IN_USER')
LINKED_IN_PASS = config('LINKED_IN_PASS')
LINKED_IN_PROFILE = config('LINKED_IN_PROFILE')
QUESTION_COUNT = 15
EXAM_NAME = ""
def main():
driver = browser_options(options)
if EXAM_NAME:
clean_qa, qa_pairs = grab_qa_for(quiz=EXAM_NAME)
if clean_qa[0]:
login(driver)
wait_until(driver, page_loaded=True)
#run_single(EXAM_NAME)
else:
login(driver)
wait_until(driver, page_loaded=True)
run_all(driver)
def run_all(driver):
on_page = to_quiz_page(driver)
if not on_page:
return False
exam_list = get_all_test(driver)
for exam in exam_list:
exam_s = urllib.parse.quote(exam)
exam_link = f"https://www.linkedin.com/skill-assessments/{exam_s}/quiz-intro/"
selected_qa = 0
exam = exam.replace("assessment","").replace("microsoft","").strip()
l = exam.split(" ")
clean_qa, qa_pairs = grab_qa_for(quiz=exam)
if not clean_qa[0]:
for x in l:
clean_qa,qa_pairs = grab_qa_for(quiz=x.replace("(","").replace(")",""))
if clean_qa[0]:
selected_qa = clean_qa
else:
selected_qa = clean_qa
if selected_qa:
driver.get(exam_link)
wait_until(driver, page_loaded=True)
wait_until(driver, page_loaded=True)
time.sleep(3)
if "quiz-intro" in driver.current_url:
if options.get("live"):
driver.execute_script("""document.querySelector("button[title='Start']").click()""")
else:
driver.execute_script("""document.querySelector("button[title='Practice']").click()""")
time.sleep(2)
driver.execute_script("""Array.from(document.querySelectorAll('button')).find(el => el.innerText === 'Next').click()""")
wait_until(driver, page_loaded=True)
wait_until(driver, page_loaded=True)
time.sleep(2)
# handles our exam taking
during_the_exam(exam, driver,selected_qa,qa_pairs)
time.sleep(60)
# after exam go back to profile
# maybe we add more here we wanna do after
#result, score = after_exam()
"""
def run_single(exam_name,driver):
# basic usage
to_quiz_page(driver)
# has to run so we get all the tests visible
options_list = get_all_test(driver)
# pick exam to take - leave blank for all
pick_and_go(exam_name,options_list)
# handles our exam taking
during_the_exam(exam_name)
# after exam go back to profile
# maybe we add more here we wanna do after
result, score = after_exam()
"""
def login(driver):
try:
driver.get("https://www.linkedin.com/login")
wait_until(driver, page_loaded=True)
except Exception as e:
logging.critical("login page could not be loaded " + str(e))
sys.exit()
wait_until(driver, page_loaded=True)
username = driver.find_element(By.ID, "username")
password = driver.find_element(By.ID, "password")
username.send_keys(LINKED_IN_USER)
password.send_keys(<PASSWORD>)
wait_until(driver, page_loaded=True)
time.sleep(3)
driver.find_element(By.CSS_SELECTOR,".btn__primary--large").click()
wait_until(driver, page_loaded=True)
wait_until(driver, js='document.querySelector("#global-nav-typeahead").id.length > 0')
change_lang(driver)
def change_lang(driver):
driver.get(LINKED_IN_PROFILE)
wait_until(driver, page_loaded=True)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(1)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(1)
driver.execute_script('''
var languages = document.querySelector("#globalfooter-select_language")
for(var i = 0; i < languages.length; i++){
if (languages[i].value === "en_US"){
languages.value = "en_US";
languages.lang = "en_US";
languages.dispatchEvent(new Event('change'));
};
};
''')
wait_until(driver, page_loaded=True)
wait_until(driver, page_loaded=True)
def to_quiz_page(driver):
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
driver.get("https://www.linkedin.com/skill-assessments/hub/quizzes/")
wait_until(driver, page_loaded=True)
wait_until(driver, page_loaded=True)
time.sleep(2)
try:
is_skill_page = strtobool(driver.execute_script('return document.querySelector("header > div > h2").innerText === "Skill Assessments"'))
if is_skill_page:
return True
else:
driver.get("https://www.linkedin.com/skill-assessments/hub/quizzes/")
wait_until(driver, page_loaded=True)
is_skill_page = strtobool(driver.execute_script('return document.querySelector("header > div > h2").innerText === "Skill Assessments"'))
if is_skill_page:
return True
else:
return False
except Exception as e:
print(e)
pass
def get_all_test(driver):
for _ in range(0,6):
# scroll down to "Show more assesments"
try:
driver.execute_script('var buttons = document.querySelectorAll("button");for (var i = 0; i < buttons.length; i++){if(buttons[i].innerText.includes("more")){buttons[i].scrollIntoView();};};')
time.sleep(2)
except Exception as e:
pass
# show more
try:
driver.execute_script('document.querySelector(".pv-detail-assessments__pager-row > button").click()')
time.sleep(2)
except Exception as e:
break
return driver.execute_script('return (function assesments(){var l=[];document.querySelectorAll(".pv-assessment-item__title").forEach((e)=>{l.push(e.innerText.toLowerCase())});return l})()')
def during_the_exam(exam_name, driver, selected_qa, qa_pairs):
still_questions = True
counter = 0
while still_questions:
picked_quest = False
wait_until(driver, page_loaded=True)
wait_until(driver, page_loaded=True)
time.sleep(4)
try:
question_heading = driver.execute_script('return document.querySelector("h3").innerText')
except Exception as e:
print(e)
question_heading = exam_name
try:
asessment_name = driver.execute_script('return document.querySelector("h3").innerText.split(" Q")[0]')
except Exception as e:
print(e)
asessment_name = exam_name
try:
try:
question_number = driver.execute_script("return document.querySelector('h3').innerText.split(' Q')[1].split('/')[0]")
except Exception as e:
print(e)
try:
question_number = driver.execute_script('return document.querySelector("span.t-16").innerText.replace("Q","").split("/")[0]')
except Exception as e:
print(e)
question_number = driver.execute_script('return document.querySelector("footer > div > div > span").innerText.replace("Q","").split("/")[0]')
except Exception as e:
print(e)
counter += 1
question_number = counter
try:
try:
question_text = driver.execute_script("return document.querySelector('#assessment-a11y-title > span').children.length > 1 ? document.querySelector('#assessment-a11y-title > span > span').innerText : document.querySelector('#assessment-a11y-title > span').innerText")
if not question_text:
try:
question_text = driver.execute_script('return document.querySelector("section > p").innerText.split("\n")[0]')
except Exception as e:
print(e)
except Exception as e:
print(e)
question_text = driver.execute_script('return document.querySelector("section > p").innerText.split("\\n")[0]')
still_questions = question_text
except Exception:
still_questions = False
return
answers_list = 0
try:
try:
answers_list = driver.execute_script("""return (function answers(){var l=[];document.querySelectorAll("p[id^=skill-assessment]").forEach((e)=>{l.push(e.querySelector("span[aria-hidden]").innerText)});return l})();""")
except Exception as e:
try:
answers_list = driver.execute_script("""return (function answers(){var l=[];document.querySelectorAll("p[id^=skill-assessment]").forEach((e)=>{l.push(e.querySelector("span").innerText.split("\n")[0])});return l})();""")
except Exception as e:
answers_list = driver.execute_script("""return (function answers(){var l=[];document.querySelectorAll("div > ul > li").forEach((e)=>{l.push(e.innerText.split("\n")[0])});return l})();""")
except Exception as e:
time.sleep(2)
try:
answers_list = driver.execute_script("""return (function answers(){var l=[];document.querySelectorAll("p[id^=skill-assessment]").forEach((e)=>{l.push(e.querySelector("span[aria-hidden]").innerText)});return l})();""")
except Exception as e:
try:
answers_list = driver.execute_script("""return (function answers(){var l=[];document.querySelectorAll("p[id^=skill-assessment]").forEach((e)=>{l.push(e.querySelector("span").innerText.split("\n")[0])});return l})();""")
except Exception as e:
try:
answers_list = driver.execute_script('return (function answers(){var l=[];document.querySelectorAll("div > ul > li").forEach((e)=>{l.push(e.innerText.split("\n")[0])});return l})();')
except Exception as e:
print(e)
try:
time_left = driver.execute_script('return document.querySelector("footer > div > div > span > div").innerText')
except Exception as e:
print(e)
try:
if not answers_list:
try:
answers_list = driver.execute_script('return (function answers(){var l=[];document.querySelectorAll("div > ul > li").forEach((e)=>{l.push(e.innerText.split("\n")[0])});return l})();')
except Exception as e:
try:
answers_list = driver.execute_script('''
function answers(){
var l=[];
document.querySelectorAll("div > ul > li").forEach((e) => {
l.push(e.innerText.split("\n")[0])
});
return l
}; return answers()''')
except Exception as e:
try:
answers_list = driver.execute_script('var testlist = [];document.querySelectorAll("p[id*=skill-assessment-quiz]").forEach((ti)=>{testlist.push(ti.innerText.split("\n")[0])}); return testlist')
except Exception as e:
print(e)
if not answers_list:
driver.execute_script('document.querySelectorAll("p[id^=skill-assessment]")['+str(1)+'].click()')
time.sleep(2)
else:
found = False
question_answers_pair = {}
for question in qa_pairs:
clean_q = re.sub(r'[^\w]', '', question_text.lower())
if list(question.keys())[0] == clean_q:
question_answers_pair = question
print("We found the question:")
print(question_answers_pair)
print("------------------------")
found = True
break
else:
search_term = question_text
multiple_qa = []
question_answers_pair = next((item for item in qa_pairs if item == search_term), None)
if not question_answers_pair:
for quest in qa_pairs:
if search_term in list(quest.keys())[0]:
multiple_qa.append(quest)
if len(multiple_qa) > 1:
best_index = 0
curr = 0
for num, pair in enumerate(multiple_qa, start=0):
found_qa = levenshtein_ratio_and_distance(search_term,list(pair.keys())[0])
if num == 0:
curr = found_qa
if found_qa < curr and search_term in list(pair.keys())[0]:
best_index = num
question_answers_pair = multiple_qa[best_index]
break
else:
if not question_answers_pair:
question_answers_pair = multiple_qa[0] if len(multiple_qa) > 0 else None
if not question_answers_pair:
# do harder search
for question in selected_qa[0]:
clean_q = re.sub(r'[^\w]', '', question_text.lower())
if question == clean_q:
print("We found the question:")
print(question)
print("------------------------")
found = True
if not found:
print("We dont have that question:")
print(question_text)
print("------------------------")
found = False
pick_answers = []
if not question_answers_pair:
for answer in selected_qa[1]:
for i,ans in enumerate(answers_list, start=0):
clean_a = re.sub(r'[^\w]', '', ans.lower())
if answer == clean_a:
pick_answers.append((i,answer))
# TODO: if multiple answers this will fail
# we have to do a better search see qa.py for startingpoitn
if len(pick_answers) > 1:
pick_answers = [pick_answers[0]]
else:
for answ in list(question_answers_pair.values())[0]:
for i, ans in enumerate(answers_list, start=0):
clean_a = re.sub(r'[^\w]', '', ans.lower())
if answ == clean_a:
pick_answers.append((i,answ))
# make better random answer if multiple choice
if not pick_answers:
found = 0
a_match = ()
for i, answ_opt in enumerate(answers_list, start=0):
found = search_string_in_everything(answ_opt)
if found:
a_match = (i, answ_opt)
break
if a_match:
driver.execute_script('document.querySelectorAll("p[id^=skill-assessment]")['+str(a_match[0])+'].click()')
time.sleep(2)
picked_quest = True
else:
driver.execute_script('document.querySelectorAll("p[id^=skill-assessment]")['+str(1)+'].click()')
time.sleep(2)
picked_quest = True
print("We do not have answers:")
print(answers_list)
print("------------------------")
else:
print("We found the answer/s:")
print(pick_answers)
print("------------------------")
# pick answers
for i in pick_answers:
driver.execute_script('document.querySelectorAll("p[id^=skill-assessment]")['+str(i[0])+'].click()')
time.sleep(2)
picked_quest = True
except Exception as e:
print(e)
if not picked_quest:
driver.execute_script('document.querySelectorAll("p[id^=skill-assessment]")['+str(1)+'].click()')
time.sleep(2)
picked_quest = False
try:
next_button = driver.execute_script("Array.from(document.querySelectorAll('button')).find(el => el.innerText === 'Next').click();")
except Exception as e:
#print(e)
next_button = driver.execute_script('document.querySelector("footer").querySelector("button").click()')
"""
def pick_and_go(exam_name,options_list):
if exam_name in options_list:
driver.execute_script('''
var availableTests = []
var buttons = document.querySelectorAll('button > span.visually-hidden');
for (var i = 0; i < buttons.length; i++){
var btnTxt = buttons[i].innerText.trim();
var btnArr = btnTxt.split(' ');
btnArr.shift();
btnArr.shift();
var cleanArr = btnArr;
btnTxt = cleanArr.join(',');
btnTxt = btnTxt.replace(/,/g, ' ');
availableTests.push(btnTxt);''' + "if (btnTxt.toLowerCase() === '"+exam_name.lower()+"') {buttons[i].click();};};")
if exam_name:
logging.info('Attempting to take: '+exam_name)
else:
logging.warning('Trying to start all exams..DANGERZONE')
else:
time.sleep(1)
if len(options_list) > 0:
exam_name = options_list[0] # -> exam is the first in the available list
driver.execute_script('''
var availableTests = []
var buttons = document.querySelectorAll('button > span.visually-hidden');
for (var i = 0; i < buttons.length; i++){
var btnTxt = buttons[i].innerText.trim();
var btnArr = btnTxt.split(' ');
btnArr.shift();
btnArr.shift();
var cleanArr = btnArr;
btnTxt = cleanArr.join(',');
btnTxt = btnTxt.replace(/,/g, ' ');
availableTests.push(btnTxt);''' + "if (btnTxt.toLowerCase() === '"+exam_name.lower()+"') {buttons[i].click();};};")
if exam_name:
logging.info('Attempting to take: '+exam_name)
else:
logging.warning('Trying to start all exams..DANGERZONE')
else:
print("We did all exams. Bye")
sys.exit()
time.sleep(1)
logging.info('Selected Exam: '+exam_name)
time.sleep(2)
driver.execute_script('''
(function (el) {
var buttons = document.querySelectorAll('button');
for (var i = 0; i < buttons.length; i++){
var btnTxt = buttons[i].innerText.trim();
if (btnTxt.toLowerCase() === el.toLowerCase()) {
buttons[i].click()
};};}'''+ "('Start'));")
logging.info('Starting Exam: '+exam_name)
time.sleep(2)
print("The choice:{0} is not available.".format(exam_name))
logging.error(exam_name+' is not a valid exam name.')
print('Here is a list of choices')
for x in options_list:
print(str(x))
def after_exam():
time.sleep(8)
# when you actrually pass an exam make this return score
# we only know the selectors when not passed
result = driver.execute_script("var result = document.querySelector('section > div > h3').innerText;return result;")
score = driver.execute_script("var score = document.querySelector('section > div > p').innerText;return score;")
logging.info('Test is done, going back to profile')
time.sleep(1)
driver.execute_script('''
var buttons = document.querySelectorAll('button');
for (var i = 0; i < buttons.length; i++){'''+"if(buttons[i].innerText.includes('Profile')){buttons[i].click();};};")
print(result)
print(score)
return result,score
"""
if __name__ == '__main__':
try:
main()
except:
time.sleep(120) | linkedin_parser.py | import time
import logging
import sys
import argparse
from decouple import config
import urllib.parse
import re
from utils.qa import grab_qa_for, search_string_in_everything
from utils.levenshtein import levenshtein_ratio_and_distance
from utils.browser_opts import browser_options
from utils.utillities import wait_until, strtobool
from selenium.webdriver.common.by import By
my_parser = argparse.ArgumentParser()
my_parser.add_argument('-s', '--show',
action='store_true',
help='Show browser. Default is headless.')
my_parser.add_argument('-l', '--live',
action='store_true',
help='Run real test, not just practice.')
options = my_parser.parse_args()
options = vars(options)
LINKED_IN_USER = config('LINKED_IN_USER')
LINKED_IN_PASS = config('LINKED_IN_PASS')
LINKED_IN_PROFILE = config('LINKED_IN_PROFILE')
QUESTION_COUNT = 15
EXAM_NAME = ""
def main():
driver = browser_options(options)
if EXAM_NAME:
clean_qa, qa_pairs = grab_qa_for(quiz=EXAM_NAME)
if clean_qa[0]:
login(driver)
wait_until(driver, page_loaded=True)
#run_single(EXAM_NAME)
else:
login(driver)
wait_until(driver, page_loaded=True)
run_all(driver)
def run_all(driver):
on_page = to_quiz_page(driver)
if not on_page:
return False
exam_list = get_all_test(driver)
for exam in exam_list:
exam_s = urllib.parse.quote(exam)
exam_link = f"https://www.linkedin.com/skill-assessments/{exam_s}/quiz-intro/"
selected_qa = 0
exam = exam.replace("assessment","").replace("microsoft","").strip()
l = exam.split(" ")
clean_qa, qa_pairs = grab_qa_for(quiz=exam)
if not clean_qa[0]:
for x in l:
clean_qa,qa_pairs = grab_qa_for(quiz=x.replace("(","").replace(")",""))
if clean_qa[0]:
selected_qa = clean_qa
else:
selected_qa = clean_qa
if selected_qa:
driver.get(exam_link)
wait_until(driver, page_loaded=True)
wait_until(driver, page_loaded=True)
time.sleep(3)
if "quiz-intro" in driver.current_url:
if options.get("live"):
driver.execute_script("""document.querySelector("button[title='Start']").click()""")
else:
driver.execute_script("""document.querySelector("button[title='Practice']").click()""")
time.sleep(2)
driver.execute_script("""Array.from(document.querySelectorAll('button')).find(el => el.innerText === 'Next').click()""")
wait_until(driver, page_loaded=True)
wait_until(driver, page_loaded=True)
time.sleep(2)
# handles our exam taking
during_the_exam(exam, driver,selected_qa,qa_pairs)
time.sleep(60)
# after exam go back to profile
# maybe we add more here we wanna do after
#result, score = after_exam()
"""
def run_single(exam_name,driver):
# basic usage
to_quiz_page(driver)
# has to run so we get all the tests visible
options_list = get_all_test(driver)
# pick exam to take - leave blank for all
pick_and_go(exam_name,options_list)
# handles our exam taking
during_the_exam(exam_name)
# after exam go back to profile
# maybe we add more here we wanna do after
result, score = after_exam()
"""
def login(driver):
try:
driver.get("https://www.linkedin.com/login")
wait_until(driver, page_loaded=True)
except Exception as e:
logging.critical("login page could not be loaded " + str(e))
sys.exit()
wait_until(driver, page_loaded=True)
username = driver.find_element(By.ID, "username")
password = driver.find_element(By.ID, "password")
username.send_keys(LINKED_IN_USER)
password.send_keys(<PASSWORD>)
wait_until(driver, page_loaded=True)
time.sleep(3)
driver.find_element(By.CSS_SELECTOR,".btn__primary--large").click()
wait_until(driver, page_loaded=True)
wait_until(driver, js='document.querySelector("#global-nav-typeahead").id.length > 0')
change_lang(driver)
def change_lang(driver):
driver.get(LINKED_IN_PROFILE)
wait_until(driver, page_loaded=True)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(1)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(1)
driver.execute_script('''
var languages = document.querySelector("#globalfooter-select_language")
for(var i = 0; i < languages.length; i++){
if (languages[i].value === "en_US"){
languages.value = "en_US";
languages.lang = "en_US";
languages.dispatchEvent(new Event('change'));
};
};
''')
wait_until(driver, page_loaded=True)
wait_until(driver, page_loaded=True)
def to_quiz_page(driver):
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
driver.get("https://www.linkedin.com/skill-assessments/hub/quizzes/")
wait_until(driver, page_loaded=True)
wait_until(driver, page_loaded=True)
time.sleep(2)
try:
is_skill_page = strtobool(driver.execute_script('return document.querySelector("header > div > h2").innerText === "Skill Assessments"'))
if is_skill_page:
return True
else:
driver.get("https://www.linkedin.com/skill-assessments/hub/quizzes/")
wait_until(driver, page_loaded=True)
is_skill_page = strtobool(driver.execute_script('return document.querySelector("header > div > h2").innerText === "Skill Assessments"'))
if is_skill_page:
return True
else:
return False
except Exception as e:
print(e)
pass
def get_all_test(driver):
for _ in range(0,6):
# scroll down to "Show more assesments"
try:
driver.execute_script('var buttons = document.querySelectorAll("button");for (var i = 0; i < buttons.length; i++){if(buttons[i].innerText.includes("more")){buttons[i].scrollIntoView();};};')
time.sleep(2)
except Exception as e:
pass
# show more
try:
driver.execute_script('document.querySelector(".pv-detail-assessments__pager-row > button").click()')
time.sleep(2)
except Exception as e:
break
return driver.execute_script('return (function assesments(){var l=[];document.querySelectorAll(".pv-assessment-item__title").forEach((e)=>{l.push(e.innerText.toLowerCase())});return l})()')
def during_the_exam(exam_name, driver, selected_qa, qa_pairs):
still_questions = True
counter = 0
while still_questions:
picked_quest = False
wait_until(driver, page_loaded=True)
wait_until(driver, page_loaded=True)
time.sleep(4)
try:
question_heading = driver.execute_script('return document.querySelector("h3").innerText')
except Exception as e:
print(e)
question_heading = exam_name
try:
asessment_name = driver.execute_script('return document.querySelector("h3").innerText.split(" Q")[0]')
except Exception as e:
print(e)
asessment_name = exam_name
try:
try:
question_number = driver.execute_script("return document.querySelector('h3').innerText.split(' Q')[1].split('/')[0]")
except Exception as e:
print(e)
try:
question_number = driver.execute_script('return document.querySelector("span.t-16").innerText.replace("Q","").split("/")[0]')
except Exception as e:
print(e)
question_number = driver.execute_script('return document.querySelector("footer > div > div > span").innerText.replace("Q","").split("/")[0]')
except Exception as e:
print(e)
counter += 1
question_number = counter
try:
try:
question_text = driver.execute_script("return document.querySelector('#assessment-a11y-title > span').children.length > 1 ? document.querySelector('#assessment-a11y-title > span > span').innerText : document.querySelector('#assessment-a11y-title > span').innerText")
if not question_text:
try:
question_text = driver.execute_script('return document.querySelector("section > p").innerText.split("\n")[0]')
except Exception as e:
print(e)
except Exception as e:
print(e)
question_text = driver.execute_script('return document.querySelector("section > p").innerText.split("\\n")[0]')
still_questions = question_text
except Exception:
still_questions = False
return
answers_list = 0
try:
try:
answers_list = driver.execute_script("""return (function answers(){var l=[];document.querySelectorAll("p[id^=skill-assessment]").forEach((e)=>{l.push(e.querySelector("span[aria-hidden]").innerText)});return l})();""")
except Exception as e:
try:
answers_list = driver.execute_script("""return (function answers(){var l=[];document.querySelectorAll("p[id^=skill-assessment]").forEach((e)=>{l.push(e.querySelector("span").innerText.split("\n")[0])});return l})();""")
except Exception as e:
answers_list = driver.execute_script("""return (function answers(){var l=[];document.querySelectorAll("div > ul > li").forEach((e)=>{l.push(e.innerText.split("\n")[0])});return l})();""")
except Exception as e:
time.sleep(2)
try:
answers_list = driver.execute_script("""return (function answers(){var l=[];document.querySelectorAll("p[id^=skill-assessment]").forEach((e)=>{l.push(e.querySelector("span[aria-hidden]").innerText)});return l})();""")
except Exception as e:
try:
answers_list = driver.execute_script("""return (function answers(){var l=[];document.querySelectorAll("p[id^=skill-assessment]").forEach((e)=>{l.push(e.querySelector("span").innerText.split("\n")[0])});return l})();""")
except Exception as e:
try:
answers_list = driver.execute_script('return (function answers(){var l=[];document.querySelectorAll("div > ul > li").forEach((e)=>{l.push(e.innerText.split("\n")[0])});return l})();')
except Exception as e:
print(e)
try:
time_left = driver.execute_script('return document.querySelector("footer > div > div > span > div").innerText')
except Exception as e:
print(e)
try:
if not answers_list:
try:
answers_list = driver.execute_script('return (function answers(){var l=[];document.querySelectorAll("div > ul > li").forEach((e)=>{l.push(e.innerText.split("\n")[0])});return l})();')
except Exception as e:
try:
answers_list = driver.execute_script('''
function answers(){
var l=[];
document.querySelectorAll("div > ul > li").forEach((e) => {
l.push(e.innerText.split("\n")[0])
});
return l
}; return answers()''')
except Exception as e:
try:
answers_list = driver.execute_script('var testlist = [];document.querySelectorAll("p[id*=skill-assessment-quiz]").forEach((ti)=>{testlist.push(ti.innerText.split("\n")[0])}); return testlist')
except Exception as e:
print(e)
if not answers_list:
driver.execute_script('document.querySelectorAll("p[id^=skill-assessment]")['+str(1)+'].click()')
time.sleep(2)
else:
found = False
question_answers_pair = {}
for question in qa_pairs:
clean_q = re.sub(r'[^\w]', '', question_text.lower())
if list(question.keys())[0] == clean_q:
question_answers_pair = question
print("We found the question:")
print(question_answers_pair)
print("------------------------")
found = True
break
else:
search_term = question_text
multiple_qa = []
question_answers_pair = next((item for item in qa_pairs if item == search_term), None)
if not question_answers_pair:
for quest in qa_pairs:
if search_term in list(quest.keys())[0]:
multiple_qa.append(quest)
if len(multiple_qa) > 1:
best_index = 0
curr = 0
for num, pair in enumerate(multiple_qa, start=0):
found_qa = levenshtein_ratio_and_distance(search_term,list(pair.keys())[0])
if num == 0:
curr = found_qa
if found_qa < curr and search_term in list(pair.keys())[0]:
best_index = num
question_answers_pair = multiple_qa[best_index]
break
else:
if not question_answers_pair:
question_answers_pair = multiple_qa[0] if len(multiple_qa) > 0 else None
if not question_answers_pair:
# do harder search
for question in selected_qa[0]:
clean_q = re.sub(r'[^\w]', '', question_text.lower())
if question == clean_q:
print("We found the question:")
print(question)
print("------------------------")
found = True
if not found:
print("We dont have that question:")
print(question_text)
print("------------------------")
found = False
pick_answers = []
if not question_answers_pair:
for answer in selected_qa[1]:
for i,ans in enumerate(answers_list, start=0):
clean_a = re.sub(r'[^\w]', '', ans.lower())
if answer == clean_a:
pick_answers.append((i,answer))
# TODO: if multiple answers this will fail
# we have to do a better search see qa.py for startingpoitn
if len(pick_answers) > 1:
pick_answers = [pick_answers[0]]
else:
for answ in list(question_answers_pair.values())[0]:
for i, ans in enumerate(answers_list, start=0):
clean_a = re.sub(r'[^\w]', '', ans.lower())
if answ == clean_a:
pick_answers.append((i,answ))
# make better random answer if multiple choice
if not pick_answers:
found = 0
a_match = ()
for i, answ_opt in enumerate(answers_list, start=0):
found = search_string_in_everything(answ_opt)
if found:
a_match = (i, answ_opt)
break
if a_match:
driver.execute_script('document.querySelectorAll("p[id^=skill-assessment]")['+str(a_match[0])+'].click()')
time.sleep(2)
picked_quest = True
else:
driver.execute_script('document.querySelectorAll("p[id^=skill-assessment]")['+str(1)+'].click()')
time.sleep(2)
picked_quest = True
print("We do not have answers:")
print(answers_list)
print("------------------------")
else:
print("We found the answer/s:")
print(pick_answers)
print("------------------------")
# pick answers
for i in pick_answers:
driver.execute_script('document.querySelectorAll("p[id^=skill-assessment]")['+str(i[0])+'].click()')
time.sleep(2)
picked_quest = True
except Exception as e:
print(e)
if not picked_quest:
driver.execute_script('document.querySelectorAll("p[id^=skill-assessment]")['+str(1)+'].click()')
time.sleep(2)
picked_quest = False
try:
next_button = driver.execute_script("Array.from(document.querySelectorAll('button')).find(el => el.innerText === 'Next').click();")
except Exception as e:
#print(e)
next_button = driver.execute_script('document.querySelector("footer").querySelector("button").click()')
"""
def pick_and_go(exam_name,options_list):
if exam_name in options_list:
driver.execute_script('''
var availableTests = []
var buttons = document.querySelectorAll('button > span.visually-hidden');
for (var i = 0; i < buttons.length; i++){
var btnTxt = buttons[i].innerText.trim();
var btnArr = btnTxt.split(' ');
btnArr.shift();
btnArr.shift();
var cleanArr = btnArr;
btnTxt = cleanArr.join(',');
btnTxt = btnTxt.replace(/,/g, ' ');
availableTests.push(btnTxt);''' + "if (btnTxt.toLowerCase() === '"+exam_name.lower()+"') {buttons[i].click();};};")
if exam_name:
logging.info('Attempting to take: '+exam_name)
else:
logging.warning('Trying to start all exams..DANGERZONE')
else:
time.sleep(1)
if len(options_list) > 0:
exam_name = options_list[0] # -> exam is the first in the available list
driver.execute_script('''
var availableTests = []
var buttons = document.querySelectorAll('button > span.visually-hidden');
for (var i = 0; i < buttons.length; i++){
var btnTxt = buttons[i].innerText.trim();
var btnArr = btnTxt.split(' ');
btnArr.shift();
btnArr.shift();
var cleanArr = btnArr;
btnTxt = cleanArr.join(',');
btnTxt = btnTxt.replace(/,/g, ' ');
availableTests.push(btnTxt);''' + "if (btnTxt.toLowerCase() === '"+exam_name.lower()+"') {buttons[i].click();};};")
if exam_name:
logging.info('Attempting to take: '+exam_name)
else:
logging.warning('Trying to start all exams..DANGERZONE')
else:
print("We did all exams. Bye")
sys.exit()
time.sleep(1)
logging.info('Selected Exam: '+exam_name)
time.sleep(2)
driver.execute_script('''
(function (el) {
var buttons = document.querySelectorAll('button');
for (var i = 0; i < buttons.length; i++){
var btnTxt = buttons[i].innerText.trim();
if (btnTxt.toLowerCase() === el.toLowerCase()) {
buttons[i].click()
};};}'''+ "('Start'));")
logging.info('Starting Exam: '+exam_name)
time.sleep(2)
print("The choice:{0} is not available.".format(exam_name))
logging.error(exam_name+' is not a valid exam name.')
print('Here is a list of choices')
for x in options_list:
print(str(x))
def after_exam():
time.sleep(8)
# when you actrually pass an exam make this return score
# we only know the selectors when not passed
result = driver.execute_script("var result = document.querySelector('section > div > h3').innerText;return result;")
score = driver.execute_script("var score = document.querySelector('section > div > p').innerText;return score;")
logging.info('Test is done, going back to profile')
time.sleep(1)
driver.execute_script('''
var buttons = document.querySelectorAll('button');
for (var i = 0; i < buttons.length; i++){'''+"if(buttons[i].innerText.includes('Profile')){buttons[i].click();};};")
print(result)
print(score)
return result,score
"""
if __name__ == '__main__':
try:
main()
except:
time.sleep(120) | 0.072633 | 0.060947 |
import discord
import random
from helpcommands import *
from elo import *
from datetime import datetime
from random import shuffle,randint
client = discord.Client()
busyChannels = []
game = discord.Game(name="Perudo")
diefaces = 6 #Number of faces on a die
startingdice = 5 #How many dice each player starts with
maxplayers = 6 #Maximum number of players
@client.event
async def on_ready():
print("Connected!")
print("Username: " + client.user.name)
print("ID: " + str(client.user.id))
await client.change_presence(activity = game)
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content == "!hello":
msg = "Greetings {0.author.mention}".format(message)
await message.channel.send(msg)
if message.content == "!perudo":
if message.channel in busyChannels:
await message.channel.send("Channel busy with another activity.")
else:
busyChannels.append(message.channel)
await message.channel.send("Starting **Perudo** in `#"+message.channel.name+"`...")
await perudo(client,message)
busyChannels.remove(message.channel)
if message.content == "!help":
await helpcommand(client, message)
if message.content == "!help abilities":
await helpabilities(client, message)
if message.content == "!rules":
await rules(client, message)
if message.content.startswith("!elo"):
messagelist = message.content.split()
if len(messagelist) > 1:
messagemention = removeExclamation(messagelist[1])
else:
messagemention = removeExclamation(message.author.mention)
elovalue = fetchelo(messagemention)
if elovalue == 0:
await message.channel.send("Could not find an elo for this player.")
else:
await message.channel.send("{} has elo {}.".format(messagemention,elovalue))
async def perudo(client,message):
#Declarations
gamestate = 0 #State of game
playerlist = [] #List of players
freebidding = False #Can players bid freely, or are they restricted to BGA rules?
calza = False #Can players call calza to gain dice?
abilities = False #Do players have their special abilities?
playerposition = [] #Position of each player (1 for first, 2 for second, and so on)
activeplayerlist = [] #List of players who are still in
playerdicenum = [] #Number of dice each player has
playerdice = [] #Each player's dice
playerabilities = [] #Each player's abilities (0 is not unlocked, 1 is unlocked, 2 is used)
curplayer = 0 #Current player
firstturn = True #Is this the first turn of the round?
palifico = False #Is this a palifico round?
currentbid = [] #Current bid (e.g.[5,6] means five sixes)
if gamestate == 0: #Login phase
gamestate,playerlist,freebidding,calza,abilities,playerposition,activeplayerlist,playerdicenum,playerabilities = await login(client,message)
curplayer = 0
while gamestate == 2 or gamestate == 3:
if gamestate == 2: #Rolling dice at start of round
playerdice = await rolldice(client,message,activeplayerlist,playerdicenum,palifico)
firstturn = True
currentbid = []
gamestate = 3
if gamestate == 3: #Taking a turn
gamestate,playerposition,activeplayerlist,playerdicenum,playerdice,playerabilities,curplayer,firstturn,palifico,currentbid = await taketurn(client,message,playerlist,freebidding,calza,abilities,playerposition,activeplayerlist,playerdicenum,playerdice,playerabilities,curplayer,firstturn,palifico,currentbid)
if gamestate == 4: #End of game
await gameend(client,message,playerlist,playerposition)
#Login phase
async def login(client,message):
gamestate = 1
playerlist = []
freebidding = False
calza = False
abilities = False
playerdicenum = []
playerabilities = []
await message.channel.send("```Login Phase Triggered```\nThe game of Perudo is about to begin.\n*Type !join to enter the game. (2-{} players only.)*\n".format(maxplayers))
while gamestate == 1:
def channel_check(m):
return m.channel == message.channel
reply = await client.wait_for("message", check=channel_check)
if reply.content == "!join":
if len(playerlist) <= maxplayers:
if reply.author not in playerlist:
await message.channel.send("{} has joined the game.".format(reply.author.display_name))
playerlist.append(reply.author)
else:
await message.channel.send("{} is already in the game.".format(reply.author.display_name))
else:
await message.channel.send("The game is full.")
if reply.content == "!quit":
if reply.author in playerlist:
await message.channel.send("{} has left the game.".format(reply.author.display_name))
playerlist.remove(reply.author)
else:
await message.channel.send("{} wasn't in the game.".format(reply.author.display_name))
if reply.content == "!stop" and reply.author in playerlist:
await message.channel.send("The game has been stopped.")
gamestate = 0
replylist = reply.content.split()
if replylist[0] == "!start" and reply.author in playerlist:
if len(playerlist) < 2:
await message.channel.send("Not enough players.")
else:
gamemodestr = ""
if "freebidding" in replylist:
freebidding = True
gamemodestr = gamemodestr + "Free bidding, "
if "calza" in replylist:
calza = True
gamemodestr = gamemodestr + "Calza, "
if "abilities" in replylist:
abilities = True
gamemodestr = gamemodestr + "Special abilities, "
for i in range(len(playerlist)):
playerabilities.append([0,0,0])
gamemodestr = gamemodestr.rstrip(", ")
if gamemodestr != "":
gamemodestr = " ({})".format(gamemodestr)
random.seed(datetime.now())
shuffle(playerlist)
playerposition = [1] * len(playerlist)
activeplayerlist = playerlist.copy()
playerdicenum = [startingdice] * len(playerlist)
await message.channel.send("Game started!{}".format(gamemodestr))
gamestate = 2
return gamestate,playerlist,freebidding,calza,abilities,playerposition,activeplayerlist,playerdicenum,playerabilities
async def rolldice(client,message,activeplayerlist,playerdicenum,palifico):
#Rolls dice and shows each player their dice
playerdice = []
for i in range(len(activeplayerlist)):
playerdice.append([])
for j in range(playerdicenum[i]):
playerdice[i].append(randint(1,diefaces))
playerdice[i].sort()
dicestr = ""
for j in range(len(playerdice[i])):
dicestr = dicestr + str(playerdice[i][j]) + ", "
dicestr = dicestr.rstrip(", ")
await activeplayerlist[i].send("You rolled:\n{}".format(dicestr))
#Displays message at start of each round
messagestring = "Dice have been rolled for the start of this round.\n"
for i in range(len(activeplayerlist)):
messagestring = messagestring + "{} has {} dice\n".format(activeplayerlist[i].display_name,playerdicenum[i])
if palifico:
messagestring = messagestring + "This is a palifico round!"
await message.channel.send(messagestring)
return playerdice
async def taketurn(client,message,playerlist,freebidding,calza,abilities,playerposition,activeplayerlist,playerdicenum,playerdice,playerabilities,curplayer,firstturn,palifico,currentbid):
def player_check(m):
if m.channel == message.channel and m.author in activeplayerlist:
return True
return False
#Function for revealing counting dice at end of round
async def diecount(client,message,activeplayerlist,playerdice,palifico,currentbid):
messagestring = ""
numdice = [0] * diefaces
for i in range(len(activeplayerlist)):
playerstr = ""
for j in range(len(playerdice[i])):
playerstr = playerstr + str(playerdice[i][j]) + ", "
numdice[playerdice[i][j]-1] += 1
playerstr = playerstr.rstrip(", ")
messagestring = messagestring + "{} had {}\n".format(activeplayerlist[i].display_name,playerstr)
await message.channel.send(messagestring)
if currentbid[1] == 1 or palifico:
numofbid = numdice[currentbid[1]-1]
else:
numofbid = numdice[0] + numdice[currentbid[1]-1]
await message.channel.send("There were {} {}s!".format(numofbid,currentbid[1]))
return numofbid
await message.channel.send("It is {}'s turn.".format(activeplayerlist[curplayer].display_name))
waiting = True
while waiting:
command = await client.wait_for('message', check=player_check)
losingplayer = 0
playerlosedie = False #Did someone lose a die?
#Check for calza
if command.content == "calza":
if not calza:
await message.channel.send("Calling calza is disabled.")
continue
if firstturn:
await message.channel.send("You cannot call calza on the first turn.")
continue
if command.author == activeplayerlist[curplayer-1]:
await message.channel.send("You cannot call calza on your own bid.")
continue
numofbid = await diecount(client,message,activeplayerlist,playerdice,palifico,currentbid)
prevplayer = curplayer - 1
for i in range(len(activeplayerlist)):
if command.author == activeplayerlist[i]:
curplayer = i
if currentbid[0] == numofbid:
await message.channel.send("{} called calza successfully on {}!".format(activeplayerlist[curplayer].display_name,activeplayerlist[prevplayer].display_name))
newdicenum = playerdicenum[curplayer]+1
if newdicenum > startingdice:
newdicenum = startingdice
await message.channel.send("{} was at the maximum number of dice and thus has {} dice.".format(activeplayerlist[curplayer].display_name,newdicenum))
else:
await message.channel.send("{} has gained a die and now has {} dice.".format(activeplayerlist[curplayer].display_name,newdicenum))
playerdicenum[curplayer] = newdicenum
gamestate = 2
palifico = False
break
else:
losingplayer = curplayer
await message.channel.send("{} called calza unsuccessfully on {}!".format(activeplayerlist[curplayer].display_name,activeplayerlist[prevplayer].display_name))
playerlosedie = True
if command.author != activeplayerlist[curplayer]:
await message.channel.send("It is not your turn!")
continue
#Check for dudo
if command.content == "dudo":
if firstturn:
await message.channel.send("You cannot call dudo on the first turn.")
continue
#Dudo has been called! Reveal everyone's dice
numofbid = await diecount(client,message,activeplayerlist,playerdice,palifico,currentbid)
prevplayer = curplayer - 1
if currentbid[0] > numofbid:
losingplayer = prevplayer
await message.channel.send("{} called dudo successfully on {}!".format(activeplayerlist[curplayer].display_name,activeplayerlist[prevplayer].display_name))
else:
losingplayer = curplayer
await message.channel.send("{} called dudo unsuccessfully on {}!".format(activeplayerlist[curplayer].display_name,activeplayerlist[prevplayer].display_name))
playerlosedie = True
#If someone lost a die
if playerlosedie:
newdicenum = playerdicenum[losingplayer]-1
playerdicenum[losingplayer] = newdicenum
await message.channel.send("{} has lost a die and now has {} dice.".format(activeplayerlist[losingplayer].display_name,newdicenum))
if newdicenum == 1:
palifico = True
else:
palifico = False
#Do they gain abilities?
if abilities:
if newdicenum == 3 and playerabilities[losingplayer][0] == 0:
await message.channel.send("{} unlocked the ability to match the previous bid! (Type \"match\" to use.)".format(activeplayerlist[losingplayer].display_name))
playerabilities[losingplayer][0] = 1
if newdicenum == 2 and playerabilities[losingplayer][1] == 0:
await message.channel.send("{} unlocked the ability to reroll their own dice! (Type \"reroll\" to use.)".format(activeplayerlist[losingplayer].display_name))
playerabilities[losingplayer][1] = 1
if newdicenum == 1 and playerabilities[losingplayer][2] == 0:
await message.channel.send("{} unlocked the ability to see another player's dice! (Type \"see @playername\" to use.)".format(activeplayerlist[losingplayer].display_name))
playerabilities[losingplayer][2] = 1
curplayer = losingplayer
if newdicenum == 0:
await message.channel.send("{} is out of the game!".format(activeplayerlist[losingplayer].display_name))
actualplayer = 0
for i in range(len(playerlist)):
if playerlist[i] == activeplayerlist[curplayer]:
actualplayer = i
playerposition[actualplayer] = len(activeplayerlist)
activeplayerlist.pop(curplayer)
playerdicenum.pop(curplayer)
if abilities:
playerabilities.pop(curplayer)
if curplayer == len(activeplayerlist) or curplayer == -1:
curplayer = 0
if len(activeplayerlist) == 1:
gamestate = 4
else:
gamestate = 2
break
commandlist = command.content.split()
#Check for use of abilities
#Match bid:
if command.content == "match":
if not abilities:
await message.channel.send("Abilities are disabled.")
continue
if playerabilities[curplayer][0] == 0:
await message.channel.send("You have not yet unlocked this ability.")
continue
if playerabilities[curplayer][0] == 2:
await message.channel.send("You have already used this ability.")
continue
if firstturn:
await message.channel.send("You cannot match on the first turn.")
continue
await message.channel.send("{} matched the previous bid!".format(activeplayerlist[curplayer].display_name))
playerabilities[curplayer][0] = 2
gamestate = 3
firstturn = False
curplayer += 1
if curplayer == len(activeplayerlist):
curplayer = 0
break
#Reroll dice:
if command.content == "reroll":
if not abilities:
await message.channel.send("Abilities are disabled.")
continue
if playerabilities[curplayer][1] == 0:
await message.channel.send("You have not yet unlocked this ability.")
continue
if playerabilities[curplayer][1] == 2:
await message.channel.send("You have already used this ability.")
continue
await message.channel.send("{} rerolled!".format(activeplayerlist[curplayer].display_name))
playerabilities[curplayer][1] = 2
for i in range(len(playerdice[curplayer])):
playerdice[curplayer][i] = randint(1,diefaces)
playerdice[curplayer].sort()
dicestr = ""
for i in range(len(playerdice[curplayer])):
dicestr = dicestr + str(playerdice[curplayer][i]) + ", "
dicestr = dicestr.rstrip(", ")
await activeplayerlist[curplayer].send("You rolled:\n{}".format(dicestr))
continue
#See other player's dice:
if commandlist[0] == "see":
if not abilities:
await message.channel.send("Abilities are disabled.")
continue
if playerabilities[curplayer][2] == 0:
await message.channel.send("You have not yet unlocked this ability.")
continue
if playerabilities[curplayer][2] == 2:
await message.channel.send("You have already used this ability.")
continue
targetstr = commandlist[1]
if len(targetstr) > 2:
if targetstr[2]=="!":
targetstr="<@"+targetstr[3:]
targetnum = -1
for i in range(len(activeplayerlist)):
mentionstr = activeplayerlist[i].mention
if len(mentionstr) > 2:
if mentionstr[2]=="!":
mentionstr="<@"+mentionstr[3:]
if mentionstr == targetstr:
targetnum = i
if targetnum == -1:
await message.channel.send("Invalid command.\nType \"see @playername\" to see their dice.")
continue
await message.channel.send("{} saw {}'s dice!.".format(activeplayerlist[curplayer].display_name,activeplayerlist[targetnum].display_name))
playerabilities[curplayer][2] = 2
dicestr = ""
for i in range(len(playerdice[targetnum])):
dicestr = dicestr + str(playerdice[targetnum][i]) + ", "
dicestr = dicestr.rstrip(", ")
await activeplayerlist[curplayer].send("{} has:\n{}".format(activeplayerlist[targetnum].display_name,dicestr))
continue
badcommand = False
if len(commandlist) != 2:
badcommand = True
else:
try:
numdicebid = int(commandlist[0])
valdicebid = int(commandlist[1])
assert numdicebid > 0
assert valdicebid > 0
assert valdicebid <= diefaces
except:
badcommand = True
if badcommand:
await message.channel.send("Invalid command.\nType the number of dice you wish to bid followed by the value you wish to bid.\n(e.g. Type \"5 6\" to bid 5 sixes)")
continue
#Now the bid is well-formed (though not necessarily legal)
if not firstturn and palifico and valdicebid != currentbid[1]:
await message.channel.send("In palifico, you cannot change the number.")
continue
if freebidding:
if not firstturn:
if currentbid[1] == 1:
oldvalue = (currentbid[0]*2+1)*diefaces + 1
else:
oldvalue = currentbid[0]*diefaces + currentbid[1]
if valdicebid == 1:
newvalue = (numdicebid*2+1)*diefaces + 1
else:
newvalue = numdicebid*diefaces + valdicebid
if newvalue <= oldvalue:
await message.channel.send("You must increase the value of the bid.")
continue
else:
if firstturn:
if valdicebid == 1 and not palifico:
await message.channel.send("You cannot bid ones on the first turn, except in palifico.")
continue
else:
if valdicebid < currentbid[1] and valdicebid != 1:
await message.channel.send("You cannot bid a lower number, unless you are bidding 1.")
continue
if valdicebid == currentbid[1] and numdicebid <= currentbid[0]:
await message.channel.send("If you are bidding the same number, you must increase the number of dice.")
continue
if valdicebid > currentbid[1] and currentbid[1] != 1 and numdicebid != currentbid[0]:
await message.channel.send("If you are bidding a higher number, you must bid the same number of dice.")
continue
if valdicebid != 1 and currentbid[1] == 1 and numdicebid <= currentbid[0]*2:
await message.channel.send("You must bid strictly more than twice the previous bid.")
continue
if valdicebid == 1 and currentbid[1] != 1 and currentbid[0] > numdicebid*2:
await message.channel.send("If you are bidding 1, you must bid at least half the previous bid.")
continue
#Bid should be legal now.
currentbid = [numdicebid,valdicebid]
gamestate = 3
firstturn = False
curplayer += 1
if curplayer == len(activeplayerlist):
curplayer = 0
waiting = False
return gamestate,playerposition,activeplayerlist,playerdicenum,playerdice,playerabilities,curplayer,firstturn,palifico,currentbid
#End of game
async def gameend(client,message,playerlist,playerposition):
await message.channel.send("The game has ended! The leaderboard is:")
messagestring = ""
sortedplayerlist = []
for i in range(len(playerlist)):
for j in range(len(playerlist)):
if playerposition[j] == i+1:
sortedplayerlist.append(playerlist[j])
messagestring = messagestring + "{}: {}\n".format(i+1,playerlist[j].display_name)
messagestring = messagestring + "Congratulations to {}, the winner!".format(sortedplayerlist[0].display_name)
await message.channel.send(messagestring)
#Deal with elos
playerpoints = []
for i in range(len(playerlist)):
playerpoints.append(len(playerlist)-playerposition[i])
playerelochange,newplayerelo = updateelos(playerlist,playerpoints)
messagestring = ""
for i in range(len(playerlist)):
if playerelochange[i] >= 0:
messagestring = messagestring + "{} has gained {} elo points and has a new elo of {}.\n".format(playerlist[i].display_name,playerelochange[i],newplayerelo[i])
else:
messagestring = messagestring + "{} has lost {} elo points and has a new elo of {}.\n".format(playerlist[i].display_name,-playerelochange[i],newplayerelo[i])
await message.channel.send(messagestring)
client.run('TOKEN') | Perudo/Perudo.py | import discord
import random
from helpcommands import *
from elo import *
from datetime import datetime
from random import shuffle,randint
client = discord.Client()
busyChannels = []
game = discord.Game(name="Perudo")
diefaces = 6 #Number of faces on a die
startingdice = 5 #How many dice each player starts with
maxplayers = 6 #Maximum number of players
@client.event
async def on_ready():
print("Connected!")
print("Username: " + client.user.name)
print("ID: " + str(client.user.id))
await client.change_presence(activity = game)
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content == "!hello":
msg = "Greetings {0.author.mention}".format(message)
await message.channel.send(msg)
if message.content == "!perudo":
if message.channel in busyChannels:
await message.channel.send("Channel busy with another activity.")
else:
busyChannels.append(message.channel)
await message.channel.send("Starting **Perudo** in `#"+message.channel.name+"`...")
await perudo(client,message)
busyChannels.remove(message.channel)
if message.content == "!help":
await helpcommand(client, message)
if message.content == "!help abilities":
await helpabilities(client, message)
if message.content == "!rules":
await rules(client, message)
if message.content.startswith("!elo"):
messagelist = message.content.split()
if len(messagelist) > 1:
messagemention = removeExclamation(messagelist[1])
else:
messagemention = removeExclamation(message.author.mention)
elovalue = fetchelo(messagemention)
if elovalue == 0:
await message.channel.send("Could not find an elo for this player.")
else:
await message.channel.send("{} has elo {}.".format(messagemention,elovalue))
async def perudo(client,message):
#Declarations
gamestate = 0 #State of game
playerlist = [] #List of players
freebidding = False #Can players bid freely, or are they restricted to BGA rules?
calza = False #Can players call calza to gain dice?
abilities = False #Do players have their special abilities?
playerposition = [] #Position of each player (1 for first, 2 for second, and so on)
activeplayerlist = [] #List of players who are still in
playerdicenum = [] #Number of dice each player has
playerdice = [] #Each player's dice
playerabilities = [] #Each player's abilities (0 is not unlocked, 1 is unlocked, 2 is used)
curplayer = 0 #Current player
firstturn = True #Is this the first turn of the round?
palifico = False #Is this a palifico round?
currentbid = [] #Current bid (e.g.[5,6] means five sixes)
if gamestate == 0: #Login phase
gamestate,playerlist,freebidding,calza,abilities,playerposition,activeplayerlist,playerdicenum,playerabilities = await login(client,message)
curplayer = 0
while gamestate == 2 or gamestate == 3:
if gamestate == 2: #Rolling dice at start of round
playerdice = await rolldice(client,message,activeplayerlist,playerdicenum,palifico)
firstturn = True
currentbid = []
gamestate = 3
if gamestate == 3: #Taking a turn
gamestate,playerposition,activeplayerlist,playerdicenum,playerdice,playerabilities,curplayer,firstturn,palifico,currentbid = await taketurn(client,message,playerlist,freebidding,calza,abilities,playerposition,activeplayerlist,playerdicenum,playerdice,playerabilities,curplayer,firstturn,palifico,currentbid)
if gamestate == 4: #End of game
await gameend(client,message,playerlist,playerposition)
#Login phase
async def login(client,message):
gamestate = 1
playerlist = []
freebidding = False
calza = False
abilities = False
playerdicenum = []
playerabilities = []
await message.channel.send("```Login Phase Triggered```\nThe game of Perudo is about to begin.\n*Type !join to enter the game. (2-{} players only.)*\n".format(maxplayers))
while gamestate == 1:
def channel_check(m):
return m.channel == message.channel
reply = await client.wait_for("message", check=channel_check)
if reply.content == "!join":
if len(playerlist) <= maxplayers:
if reply.author not in playerlist:
await message.channel.send("{} has joined the game.".format(reply.author.display_name))
playerlist.append(reply.author)
else:
await message.channel.send("{} is already in the game.".format(reply.author.display_name))
else:
await message.channel.send("The game is full.")
if reply.content == "!quit":
if reply.author in playerlist:
await message.channel.send("{} has left the game.".format(reply.author.display_name))
playerlist.remove(reply.author)
else:
await message.channel.send("{} wasn't in the game.".format(reply.author.display_name))
if reply.content == "!stop" and reply.author in playerlist:
await message.channel.send("The game has been stopped.")
gamestate = 0
replylist = reply.content.split()
if replylist[0] == "!start" and reply.author in playerlist:
if len(playerlist) < 2:
await message.channel.send("Not enough players.")
else:
gamemodestr = ""
if "freebidding" in replylist:
freebidding = True
gamemodestr = gamemodestr + "Free bidding, "
if "calza" in replylist:
calza = True
gamemodestr = gamemodestr + "Calza, "
if "abilities" in replylist:
abilities = True
gamemodestr = gamemodestr + "Special abilities, "
for i in range(len(playerlist)):
playerabilities.append([0,0,0])
gamemodestr = gamemodestr.rstrip(", ")
if gamemodestr != "":
gamemodestr = " ({})".format(gamemodestr)
random.seed(datetime.now())
shuffle(playerlist)
playerposition = [1] * len(playerlist)
activeplayerlist = playerlist.copy()
playerdicenum = [startingdice] * len(playerlist)
await message.channel.send("Game started!{}".format(gamemodestr))
gamestate = 2
return gamestate,playerlist,freebidding,calza,abilities,playerposition,activeplayerlist,playerdicenum,playerabilities
async def rolldice(client,message,activeplayerlist,playerdicenum,palifico):
#Rolls dice and shows each player their dice
playerdice = []
for i in range(len(activeplayerlist)):
playerdice.append([])
for j in range(playerdicenum[i]):
playerdice[i].append(randint(1,diefaces))
playerdice[i].sort()
dicestr = ""
for j in range(len(playerdice[i])):
dicestr = dicestr + str(playerdice[i][j]) + ", "
dicestr = dicestr.rstrip(", ")
await activeplayerlist[i].send("You rolled:\n{}".format(dicestr))
#Displays message at start of each round
messagestring = "Dice have been rolled for the start of this round.\n"
for i in range(len(activeplayerlist)):
messagestring = messagestring + "{} has {} dice\n".format(activeplayerlist[i].display_name,playerdicenum[i])
if palifico:
messagestring = messagestring + "This is a palifico round!"
await message.channel.send(messagestring)
return playerdice
async def taketurn(client,message,playerlist,freebidding,calza,abilities,playerposition,activeplayerlist,playerdicenum,playerdice,playerabilities,curplayer,firstturn,palifico,currentbid):
def player_check(m):
if m.channel == message.channel and m.author in activeplayerlist:
return True
return False
#Function for revealing counting dice at end of round
async def diecount(client,message,activeplayerlist,playerdice,palifico,currentbid):
messagestring = ""
numdice = [0] * diefaces
for i in range(len(activeplayerlist)):
playerstr = ""
for j in range(len(playerdice[i])):
playerstr = playerstr + str(playerdice[i][j]) + ", "
numdice[playerdice[i][j]-1] += 1
playerstr = playerstr.rstrip(", ")
messagestring = messagestring + "{} had {}\n".format(activeplayerlist[i].display_name,playerstr)
await message.channel.send(messagestring)
if currentbid[1] == 1 or palifico:
numofbid = numdice[currentbid[1]-1]
else:
numofbid = numdice[0] + numdice[currentbid[1]-1]
await message.channel.send("There were {} {}s!".format(numofbid,currentbid[1]))
return numofbid
await message.channel.send("It is {}'s turn.".format(activeplayerlist[curplayer].display_name))
waiting = True
while waiting:
command = await client.wait_for('message', check=player_check)
losingplayer = 0
playerlosedie = False #Did someone lose a die?
#Check for calza
if command.content == "calza":
if not calza:
await message.channel.send("Calling calza is disabled.")
continue
if firstturn:
await message.channel.send("You cannot call calza on the first turn.")
continue
if command.author == activeplayerlist[curplayer-1]:
await message.channel.send("You cannot call calza on your own bid.")
continue
numofbid = await diecount(client,message,activeplayerlist,playerdice,palifico,currentbid)
prevplayer = curplayer - 1
for i in range(len(activeplayerlist)):
if command.author == activeplayerlist[i]:
curplayer = i
if currentbid[0] == numofbid:
await message.channel.send("{} called calza successfully on {}!".format(activeplayerlist[curplayer].display_name,activeplayerlist[prevplayer].display_name))
newdicenum = playerdicenum[curplayer]+1
if newdicenum > startingdice:
newdicenum = startingdice
await message.channel.send("{} was at the maximum number of dice and thus has {} dice.".format(activeplayerlist[curplayer].display_name,newdicenum))
else:
await message.channel.send("{} has gained a die and now has {} dice.".format(activeplayerlist[curplayer].display_name,newdicenum))
playerdicenum[curplayer] = newdicenum
gamestate = 2
palifico = False
break
else:
losingplayer = curplayer
await message.channel.send("{} called calza unsuccessfully on {}!".format(activeplayerlist[curplayer].display_name,activeplayerlist[prevplayer].display_name))
playerlosedie = True
if command.author != activeplayerlist[curplayer]:
await message.channel.send("It is not your turn!")
continue
#Check for dudo
if command.content == "dudo":
if firstturn:
await message.channel.send("You cannot call dudo on the first turn.")
continue
#Dudo has been called! Reveal everyone's dice
numofbid = await diecount(client,message,activeplayerlist,playerdice,palifico,currentbid)
prevplayer = curplayer - 1
if currentbid[0] > numofbid:
losingplayer = prevplayer
await message.channel.send("{} called dudo successfully on {}!".format(activeplayerlist[curplayer].display_name,activeplayerlist[prevplayer].display_name))
else:
losingplayer = curplayer
await message.channel.send("{} called dudo unsuccessfully on {}!".format(activeplayerlist[curplayer].display_name,activeplayerlist[prevplayer].display_name))
playerlosedie = True
#If someone lost a die
if playerlosedie:
newdicenum = playerdicenum[losingplayer]-1
playerdicenum[losingplayer] = newdicenum
await message.channel.send("{} has lost a die and now has {} dice.".format(activeplayerlist[losingplayer].display_name,newdicenum))
if newdicenum == 1:
palifico = True
else:
palifico = False
#Do they gain abilities?
if abilities:
if newdicenum == 3 and playerabilities[losingplayer][0] == 0:
await message.channel.send("{} unlocked the ability to match the previous bid! (Type \"match\" to use.)".format(activeplayerlist[losingplayer].display_name))
playerabilities[losingplayer][0] = 1
if newdicenum == 2 and playerabilities[losingplayer][1] == 0:
await message.channel.send("{} unlocked the ability to reroll their own dice! (Type \"reroll\" to use.)".format(activeplayerlist[losingplayer].display_name))
playerabilities[losingplayer][1] = 1
if newdicenum == 1 and playerabilities[losingplayer][2] == 0:
await message.channel.send("{} unlocked the ability to see another player's dice! (Type \"see @playername\" to use.)".format(activeplayerlist[losingplayer].display_name))
playerabilities[losingplayer][2] = 1
curplayer = losingplayer
if newdicenum == 0:
await message.channel.send("{} is out of the game!".format(activeplayerlist[losingplayer].display_name))
actualplayer = 0
for i in range(len(playerlist)):
if playerlist[i] == activeplayerlist[curplayer]:
actualplayer = i
playerposition[actualplayer] = len(activeplayerlist)
activeplayerlist.pop(curplayer)
playerdicenum.pop(curplayer)
if abilities:
playerabilities.pop(curplayer)
if curplayer == len(activeplayerlist) or curplayer == -1:
curplayer = 0
if len(activeplayerlist) == 1:
gamestate = 4
else:
gamestate = 2
break
commandlist = command.content.split()
#Check for use of abilities
#Match bid:
if command.content == "match":
if not abilities:
await message.channel.send("Abilities are disabled.")
continue
if playerabilities[curplayer][0] == 0:
await message.channel.send("You have not yet unlocked this ability.")
continue
if playerabilities[curplayer][0] == 2:
await message.channel.send("You have already used this ability.")
continue
if firstturn:
await message.channel.send("You cannot match on the first turn.")
continue
await message.channel.send("{} matched the previous bid!".format(activeplayerlist[curplayer].display_name))
playerabilities[curplayer][0] = 2
gamestate = 3
firstturn = False
curplayer += 1
if curplayer == len(activeplayerlist):
curplayer = 0
break
#Reroll dice:
if command.content == "reroll":
if not abilities:
await message.channel.send("Abilities are disabled.")
continue
if playerabilities[curplayer][1] == 0:
await message.channel.send("You have not yet unlocked this ability.")
continue
if playerabilities[curplayer][1] == 2:
await message.channel.send("You have already used this ability.")
continue
await message.channel.send("{} rerolled!".format(activeplayerlist[curplayer].display_name))
playerabilities[curplayer][1] = 2
for i in range(len(playerdice[curplayer])):
playerdice[curplayer][i] = randint(1,diefaces)
playerdice[curplayer].sort()
dicestr = ""
for i in range(len(playerdice[curplayer])):
dicestr = dicestr + str(playerdice[curplayer][i]) + ", "
dicestr = dicestr.rstrip(", ")
await activeplayerlist[curplayer].send("You rolled:\n{}".format(dicestr))
continue
#See other player's dice:
if commandlist[0] == "see":
if not abilities:
await message.channel.send("Abilities are disabled.")
continue
if playerabilities[curplayer][2] == 0:
await message.channel.send("You have not yet unlocked this ability.")
continue
if playerabilities[curplayer][2] == 2:
await message.channel.send("You have already used this ability.")
continue
targetstr = commandlist[1]
if len(targetstr) > 2:
if targetstr[2]=="!":
targetstr="<@"+targetstr[3:]
targetnum = -1
for i in range(len(activeplayerlist)):
mentionstr = activeplayerlist[i].mention
if len(mentionstr) > 2:
if mentionstr[2]=="!":
mentionstr="<@"+mentionstr[3:]
if mentionstr == targetstr:
targetnum = i
if targetnum == -1:
await message.channel.send("Invalid command.\nType \"see @playername\" to see their dice.")
continue
await message.channel.send("{} saw {}'s dice!.".format(activeplayerlist[curplayer].display_name,activeplayerlist[targetnum].display_name))
playerabilities[curplayer][2] = 2
dicestr = ""
for i in range(len(playerdice[targetnum])):
dicestr = dicestr + str(playerdice[targetnum][i]) + ", "
dicestr = dicestr.rstrip(", ")
await activeplayerlist[curplayer].send("{} has:\n{}".format(activeplayerlist[targetnum].display_name,dicestr))
continue
badcommand = False
if len(commandlist) != 2:
badcommand = True
else:
try:
numdicebid = int(commandlist[0])
valdicebid = int(commandlist[1])
assert numdicebid > 0
assert valdicebid > 0
assert valdicebid <= diefaces
except:
badcommand = True
if badcommand:
await message.channel.send("Invalid command.\nType the number of dice you wish to bid followed by the value you wish to bid.\n(e.g. Type \"5 6\" to bid 5 sixes)")
continue
#Now the bid is well-formed (though not necessarily legal)
if not firstturn and palifico and valdicebid != currentbid[1]:
await message.channel.send("In palifico, you cannot change the number.")
continue
if freebidding:
if not firstturn:
if currentbid[1] == 1:
oldvalue = (currentbid[0]*2+1)*diefaces + 1
else:
oldvalue = currentbid[0]*diefaces + currentbid[1]
if valdicebid == 1:
newvalue = (numdicebid*2+1)*diefaces + 1
else:
newvalue = numdicebid*diefaces + valdicebid
if newvalue <= oldvalue:
await message.channel.send("You must increase the value of the bid.")
continue
else:
if firstturn:
if valdicebid == 1 and not palifico:
await message.channel.send("You cannot bid ones on the first turn, except in palifico.")
continue
else:
if valdicebid < currentbid[1] and valdicebid != 1:
await message.channel.send("You cannot bid a lower number, unless you are bidding 1.")
continue
if valdicebid == currentbid[1] and numdicebid <= currentbid[0]:
await message.channel.send("If you are bidding the same number, you must increase the number of dice.")
continue
if valdicebid > currentbid[1] and currentbid[1] != 1 and numdicebid != currentbid[0]:
await message.channel.send("If you are bidding a higher number, you must bid the same number of dice.")
continue
if valdicebid != 1 and currentbid[1] == 1 and numdicebid <= currentbid[0]*2:
await message.channel.send("You must bid strictly more than twice the previous bid.")
continue
if valdicebid == 1 and currentbid[1] != 1 and currentbid[0] > numdicebid*2:
await message.channel.send("If you are bidding 1, you must bid at least half the previous bid.")
continue
#Bid should be legal now.
currentbid = [numdicebid,valdicebid]
gamestate = 3
firstturn = False
curplayer += 1
if curplayer == len(activeplayerlist):
curplayer = 0
waiting = False
return gamestate,playerposition,activeplayerlist,playerdicenum,playerdice,playerabilities,curplayer,firstturn,palifico,currentbid
#End of game
async def gameend(client,message,playerlist,playerposition):
await message.channel.send("The game has ended! The leaderboard is:")
messagestring = ""
sortedplayerlist = []
for i in range(len(playerlist)):
for j in range(len(playerlist)):
if playerposition[j] == i+1:
sortedplayerlist.append(playerlist[j])
messagestring = messagestring + "{}: {}\n".format(i+1,playerlist[j].display_name)
messagestring = messagestring + "Congratulations to {}, the winner!".format(sortedplayerlist[0].display_name)
await message.channel.send(messagestring)
#Deal with elos
playerpoints = []
for i in range(len(playerlist)):
playerpoints.append(len(playerlist)-playerposition[i])
playerelochange,newplayerelo = updateelos(playerlist,playerpoints)
messagestring = ""
for i in range(len(playerlist)):
if playerelochange[i] >= 0:
messagestring = messagestring + "{} has gained {} elo points and has a new elo of {}.\n".format(playerlist[i].display_name,playerelochange[i],newplayerelo[i])
else:
messagestring = messagestring + "{} has lost {} elo points and has a new elo of {}.\n".format(playerlist[i].display_name,-playerelochange[i],newplayerelo[i])
await message.channel.send(messagestring)
client.run('TOKEN') | 0.310694 | 0.181807 |
from typing import Any
# Local imports
from crawler.services.config import Config
from crawler.services.intervals import TimeInterval
import crawler.communication as communication
def _do_command(command: str, data: Any = None) -> communication.Response:
"""Helper method for passing a command to the scheduler.
Args:
command (str): command type
data (Any, optional): the data required for the command. Defaults to None.
Returns:
communication.Response: response
"""
command = communication.Command(
command=command,
data=data
)
communication.scheduler_queue_input.put(command)
response = communication.scheduler_queue_output.get()
return response
def add_config(config: Config) -> communication.Response:
"""Add a config to the scheduler and its response.
Args:
config (Config): configuration of the execution
Returns:
communication.Response: reponse object
"""
return _do_command(
command=communication.SCHEDULER_ADD_CONFIG,
data=config
)
def remove_config(identifier: str) -> communication.Response:
"""Remove the configuration with the given identifier.
Args:
identifier (str): identifier of the configuration
Returns:
communication.Response: response object
"""
return _do_command(
command=communication.SCHEDULER_REMOVE_CONFIG,
data=identifier
)
def get_schedule() -> communication.Response:
"""Return the current schedule of the TreeWalk.
Returns:
communication.Response: response object
"""
return _do_command(command=communication.SCHEDULER_GET_SCHEDULE)
def add_interval(interval: TimeInterval) -> communication.Response:
"""Add an interval for maximum resource consumption.
Args:
interval (TimeInterval): time interval to add
Returns:
communication.Response: response object
"""
return _do_command(
command=communication.SCHEDULER_ADD_INTERVAL,
data=interval
)
def remove_interval(identifier: str) -> communication.Response:
"""Remove an interval for maximum resource consumption.
Args:
identifier (str): identifier of interval to remove
Returns:
communication.Response: response object
"""
return _do_command(
command=communication.SCHEDULER_REMOVE_INTERVAL,
data=identifier
)
def get_intervals() -> communication.Response:
"""Get all present intervals for maximum resource consumption.
Returns:
communication.Response: response object
"""
return _do_command(command=communication.SCHEDULER_GET_INTERVALS)
def shutdown() -> None:
"""Shutdown the TreeWalk scheduler."""
command = communication.Command(
command=communication.SCHEDULER_SHUTDOWN,
data=None
)
communication.scheduler_queue_input.put(command) | crawler/crawler/treewalk/scheduler/interface.py | from typing import Any
# Local imports
from crawler.services.config import Config
from crawler.services.intervals import TimeInterval
import crawler.communication as communication
def _do_command(command: str, data: Any = None) -> communication.Response:
"""Helper method for passing a command to the scheduler.
Args:
command (str): command type
data (Any, optional): the data required for the command. Defaults to None.
Returns:
communication.Response: response
"""
command = communication.Command(
command=command,
data=data
)
communication.scheduler_queue_input.put(command)
response = communication.scheduler_queue_output.get()
return response
def add_config(config: Config) -> communication.Response:
"""Add a config to the scheduler and its response.
Args:
config (Config): configuration of the execution
Returns:
communication.Response: reponse object
"""
return _do_command(
command=communication.SCHEDULER_ADD_CONFIG,
data=config
)
def remove_config(identifier: str) -> communication.Response:
"""Remove the configuration with the given identifier.
Args:
identifier (str): identifier of the configuration
Returns:
communication.Response: response object
"""
return _do_command(
command=communication.SCHEDULER_REMOVE_CONFIG,
data=identifier
)
def get_schedule() -> communication.Response:
"""Return the current schedule of the TreeWalk.
Returns:
communication.Response: response object
"""
return _do_command(command=communication.SCHEDULER_GET_SCHEDULE)
def add_interval(interval: TimeInterval) -> communication.Response:
"""Add an interval for maximum resource consumption.
Args:
interval (TimeInterval): time interval to add
Returns:
communication.Response: response object
"""
return _do_command(
command=communication.SCHEDULER_ADD_INTERVAL,
data=interval
)
def remove_interval(identifier: str) -> communication.Response:
"""Remove an interval for maximum resource consumption.
Args:
identifier (str): identifier of interval to remove
Returns:
communication.Response: response object
"""
return _do_command(
command=communication.SCHEDULER_REMOVE_INTERVAL,
data=identifier
)
def get_intervals() -> communication.Response:
"""Get all present intervals for maximum resource consumption.
Returns:
communication.Response: response object
"""
return _do_command(command=communication.SCHEDULER_GET_INTERVALS)
def shutdown() -> None:
"""Shutdown the TreeWalk scheduler."""
command = communication.Command(
command=communication.SCHEDULER_SHUTDOWN,
data=None
)
communication.scheduler_queue_input.put(command) | 0.941405 | 0.188212 |
import torch
from torch import nn as nn
from torch.nn import functional as F
from .initialized_conv1d import Initialized_Conv1d
from .functional import mask_logits
class SelfAttention(nn.Module):
def __init__(self, d_model, num_head, dropout):
super().__init__()
self.d_model = d_model
self.num_head = num_head
self.dropout = dropout
self.mem_conv = Initialized_Conv1d(in_channels=d_model, out_channels=d_model * 2, kernel_size=1, relu=False, bias=False)
self.query_conv = Initialized_Conv1d(in_channels=d_model, out_channels=d_model, kernel_size=1, relu=False, bias=False)
bias = torch.empty(1)
nn.init.constant_(bias, 0)
self.bias = nn.Parameter(bias)
def forward(self, queries, mask):
memory = queries
memory = self.mem_conv(memory)
query = self.query_conv(queries)
memory = memory.transpose(1, 2)
query = query.transpose(1, 2)
Q = self.split_last_dim(query, self.num_head)
K, V = [self.split_last_dim(tensor, self.num_head) for tensor in torch.split(memory, self.d_model, dim=2)]
key_depth_per_head = self.d_model // self.num_head
Q *= key_depth_per_head ** -0.5
x = self.dot_product_attention(Q, K, V, mask=mask)
return self.combine_last_two_dim(x.permute(0, 2, 1, 3)).transpose(1, 2)
def dot_product_attention(self, q, k, v, bias=False, mask=None):
"""dot-product attention.
Args:
q: a Tensor with shape [batch, heads, length_q, depth_k]
k: a Tensor with shape [batch, heads, length_kv, depth_k]
v: a Tensor with shape [batch, heads, length_kv, depth_v]
bias: bias Tensor (see attention_bias())
is_training: a bool of training
scope: an optional string
Returns:
A Tensor.
"""
logits = torch.matmul(q, k.permute(0, 1, 3, 2))
if bias:
logits += self.bias
if mask is not None:
shapes = [x if x != None else -1 for x in list(logits.size())]
mask = mask.view(shapes[0], 1, 1, shapes[-1])
logits = mask_logits(logits, mask)
weights = F.softmax(logits, dim=-1)
# dropping out the attention links for each of the heads
weights = F.dropout(weights, p=self.dropout, training=self.training)
return torch.matmul(weights, v)
def split_last_dim(self, x, n):
"""Reshape x so that the last dimension becomes two dimensions.
The first of these two dimensions is n.
Args:
x: a Tensor with shape [..., m]
n: an integer.
Returns:
a Tensor with shape [..., n, m/n]
"""
old_shape = list(x.size())
last = old_shape[-1]
new_shape = old_shape[:-1] + [n] + [last // n if last else None]
ret = x.view(new_shape)
return ret.permute(0, 2, 1, 3)
def combine_last_two_dim(self, x):
"""Reshape x so that the last two dimension become one.
Args:
x: a Tensor with shape [..., a, b]
Returns:
a Tensor with shape [..., ab]
"""
old_shape = list(x.size())
a, b = old_shape[-2:]
new_shape = old_shape[:-2] + [a * b if a and b else None]
ret = x.contiguous().view(new_shape)
return ret | models/qanet2/modules/self_attention.py | import torch
from torch import nn as nn
from torch.nn import functional as F
from .initialized_conv1d import Initialized_Conv1d
from .functional import mask_logits
class SelfAttention(nn.Module):
def __init__(self, d_model, num_head, dropout):
super().__init__()
self.d_model = d_model
self.num_head = num_head
self.dropout = dropout
self.mem_conv = Initialized_Conv1d(in_channels=d_model, out_channels=d_model * 2, kernel_size=1, relu=False, bias=False)
self.query_conv = Initialized_Conv1d(in_channels=d_model, out_channels=d_model, kernel_size=1, relu=False, bias=False)
bias = torch.empty(1)
nn.init.constant_(bias, 0)
self.bias = nn.Parameter(bias)
def forward(self, queries, mask):
memory = queries
memory = self.mem_conv(memory)
query = self.query_conv(queries)
memory = memory.transpose(1, 2)
query = query.transpose(1, 2)
Q = self.split_last_dim(query, self.num_head)
K, V = [self.split_last_dim(tensor, self.num_head) for tensor in torch.split(memory, self.d_model, dim=2)]
key_depth_per_head = self.d_model // self.num_head
Q *= key_depth_per_head ** -0.5
x = self.dot_product_attention(Q, K, V, mask=mask)
return self.combine_last_two_dim(x.permute(0, 2, 1, 3)).transpose(1, 2)
def dot_product_attention(self, q, k, v, bias=False, mask=None):
"""dot-product attention.
Args:
q: a Tensor with shape [batch, heads, length_q, depth_k]
k: a Tensor with shape [batch, heads, length_kv, depth_k]
v: a Tensor with shape [batch, heads, length_kv, depth_v]
bias: bias Tensor (see attention_bias())
is_training: a bool of training
scope: an optional string
Returns:
A Tensor.
"""
logits = torch.matmul(q, k.permute(0, 1, 3, 2))
if bias:
logits += self.bias
if mask is not None:
shapes = [x if x != None else -1 for x in list(logits.size())]
mask = mask.view(shapes[0], 1, 1, shapes[-1])
logits = mask_logits(logits, mask)
weights = F.softmax(logits, dim=-1)
# dropping out the attention links for each of the heads
weights = F.dropout(weights, p=self.dropout, training=self.training)
return torch.matmul(weights, v)
def split_last_dim(self, x, n):
"""Reshape x so that the last dimension becomes two dimensions.
The first of these two dimensions is n.
Args:
x: a Tensor with shape [..., m]
n: an integer.
Returns:
a Tensor with shape [..., n, m/n]
"""
old_shape = list(x.size())
last = old_shape[-1]
new_shape = old_shape[:-1] + [n] + [last // n if last else None]
ret = x.view(new_shape)
return ret.permute(0, 2, 1, 3)
def combine_last_two_dim(self, x):
"""Reshape x so that the last two dimension become one.
Args:
x: a Tensor with shape [..., a, b]
Returns:
a Tensor with shape [..., ab]
"""
old_shape = list(x.size())
a, b = old_shape[-2:]
new_shape = old_shape[:-2] + [a * b if a and b else None]
ret = x.contiguous().view(new_shape)
return ret | 0.965932 | 0.616359 |
import os
import json
import logging
import requests
logger = logging.Logger('catch_all')
def query_az(query):
json_cis=os.popen(query).read()
return json.loads(json_cis)
def check21():
print("Processing 21...")
return "Check not available with azure CLI"
def check22(subid):
print("Processing 22...")
try:
query20=('az account get-access-token --subscription %s --query [accessToken]' % subid)
score22=['<font color="red">Failed</font>',0]
score23=['<font color="red">Failed</font>',0]
score24=['<font color="red">Failed</font>',0]
score25=['<font color="red">Failed</font>',0]
score26=['<font color="red">Failed</font>',0]
score27=['<font color="red">Failed</font>',0]
score28=['<font color="red">Failed</font>',0]
score29=['<font color="red">Failed</font>',0]
score210=['<font color="red">Failed</font>',0]
score211=['<font color="red">Failed</font>',0]
score212=['<font color="red">Failed</font>',0]
score213=['<font color="red">Failed</font>',0]
score214=['<font color="red">Failed</font>',0]
score215=['<font color="red">Failed</font>',0]
score216=['<font color="red">Failed</font>',0]
score217=['<font color="red">Failed</font>',0]
score218=['<font color="red">Failed</font>',0]
score219=['<font color="red">Failed</font>',0]
json_cis20=query_az(query20)
access_token=json_cis20[0]
headers = {"Authorization": 'Bearer ' + access_token}
request = ('https://management.azure.com/subscriptions/%s/providers/microsoft.Security/policies?api-version=2015-06-01-preview' % subid)
try:
json_output = requests.get(request, headers=headers).json()
value22=json_output['value'][0]['properties']['logCollection']
value23=json_output['value'][0]['properties']['recommendations']['patch']
value24=json_output['value'][0]['properties']['recommendations']['baseline']
value25=json_output['value'][0]['properties']['recommendations']['antimalware']
value26=json_output['value'][0]['properties']['recommendations']['diskEncryption']
value27=json_output['value'][0]['properties']['recommendations']['nsgs']
value28=json_output['value'][0]['properties']['recommendations']['waf']
value29=json_output['value'][0]['properties']['recommendations']['ngfw']
value210=json_output['value'][0]['properties']['recommendations']['vulnerabilityAssessment']
value211=json_output['value'][0]['properties']['recommendations']['storageEncryption']
value212=json_output['value'][0]['properties']['recommendations']['jitNetworkAccess']
value213=json_output['value'][0]['properties']['recommendations']['appWhitelisting']
value214=json_output['value'][0]['properties']['recommendations']['sqlAuditing']
value215=json_output['value'][0]['properties']['recommendations']['sqlTde']
value216=json_output['value'][0]['properties']['securityContactConfiguration']['securityContactEmails']
value217=json_output['value'][0]['properties']['securityContactConfiguration']['securityContactPhone']
value218=json_output['value'][0]['properties']['securityContactConfiguration']['areNotificationsOn']
value219=json_output['value'][0]['properties']['securityContactConfiguration']['sendToAdminOn']
if (value22=="On"):
score22=['<font color="green">Passed</font>',1]
if (value23=="On"):
score23=['<font color="green">Passed</font>',1]
if (value24=="On"):
score24=['<font color="green">Passed</font>',1]
if (value25=="On"):
score25=['<font color="green">Passed</font>',1]
if (value26=="On"):
score26=['<font color="green">Passed</font>',1]
if (value27=="On"):
score27=['<font color="green">Passed</font>',1]
if (value28=="On"):
score28=['<font color="green">Passed</font>',1]
if (value29=="On"):
score29=['<font color="green">Passed</font>',1]
if (value210=="On"):
score210=['<font color="green">Passed</font>',1]
if (value211=="On"):
score211=['<font color="green">Passed</font>',1]
if (value212=="On"):
score212=['<font color="green">Passed</font>',1]
if (value213=="On"):
score213=['<font color="green">Passed</font>',1]
if (value214=="On"):
score214=['<font color="green">Passed</font>',1]
if (value215=="On"):
score215=['<font color="green">Passed</font>',1]
if (value216!="[]"):
score216=['<font color="green">Passed</font>',1]
if (value217!=""):
score217=['<font color="green">Passed</font>',1]
if (value218):
score218=['<font color="green">Passed</font>',1]
if (value219):
score219=['<font color="green">Passed</font>',1]
chk22=('Current Setting: <font color="blue"> %s</b></font>' % value22)
chk23=('Current Setting: <font color="blue"> %s</b></font>' % value23)
chk24=('Current Setting: <font color="blue"> %s</b></font>' % value24)
chk25=('Current Setting: <font color="blue"> %s</b></font>' % value25)
chk26=('Current Setting: <font color="blue"> %s</b></font>' % value26)
chk27=('Current Setting: <font color="blue"> %s</b></font>' % value27)
chk28=('Current Setting: <font color="blue"> %s</b></font>' % value28)
chk29=('Current Setting: <font color="blue"> %s</b></font>' % value29)
chk210=('Current Setting: <font color="blue"> %s</b></font>' % value210)
chk211=('Current Setting: <font color="blue"> %s</b></font>' % value211)
chk212=('Current Setting: <font color="blue"> %s</b></font>' % value212)
chk213=('Current Setting: <font color="blue"> %s</b></font>' % value213)
chk214=('Current Setting: <font color="blue"> %s</b></font>' % value214)
chk215=('Current Setting: <font color="blue"> %s</b></font>' % value215)
chk216=('Current Setting: <font color="blue"> %s</b></font>' % value216)
chk217=('Current Setting: <font color="blue"> %s</b></font>' % value217)
chk218=('Current Setting: <font color="blue"> %s</b></font>' % value218)
chk219=('Current Setting: <font color="blue"> %s</b></font>' % value219)
return [chk22,chk23,chk24,chk25,chk26,chk27,chk28,chk29,chk210,chk211,chk212,chk213,chk214,chk215,chk216,chk217,chk218,chk219,score22,score23,score24,score25,score26,score27,score28,score29,score210,score211,score212,score213,score214,score215,score216,score217,score218,score219]
except Exception as e:
logger.error("Exception in check2: %s %s" %(type(e), str(e.args)))
unkScore=['<font color="orange">UNKNOWN </font>',0]
chk="Failed to make API call"
return [chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore]
except Exception as e:
logger.error("Exception in check2: %s %s" %(type(e), str(e.args)))
unkScore=['<font color="orange">UNKNOWN </font>',0]
chk="Failed to Query"
return [chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore] | include/check2.py |
import os
import json
import logging
import requests
logger = logging.Logger('catch_all')
def query_az(query):
json_cis=os.popen(query).read()
return json.loads(json_cis)
def check21():
print("Processing 21...")
return "Check not available with azure CLI"
def check22(subid):
print("Processing 22...")
try:
query20=('az account get-access-token --subscription %s --query [accessToken]' % subid)
score22=['<font color="red">Failed</font>',0]
score23=['<font color="red">Failed</font>',0]
score24=['<font color="red">Failed</font>',0]
score25=['<font color="red">Failed</font>',0]
score26=['<font color="red">Failed</font>',0]
score27=['<font color="red">Failed</font>',0]
score28=['<font color="red">Failed</font>',0]
score29=['<font color="red">Failed</font>',0]
score210=['<font color="red">Failed</font>',0]
score211=['<font color="red">Failed</font>',0]
score212=['<font color="red">Failed</font>',0]
score213=['<font color="red">Failed</font>',0]
score214=['<font color="red">Failed</font>',0]
score215=['<font color="red">Failed</font>',0]
score216=['<font color="red">Failed</font>',0]
score217=['<font color="red">Failed</font>',0]
score218=['<font color="red">Failed</font>',0]
score219=['<font color="red">Failed</font>',0]
json_cis20=query_az(query20)
access_token=json_cis20[0]
headers = {"Authorization": 'Bearer ' + access_token}
request = ('https://management.azure.com/subscriptions/%s/providers/microsoft.Security/policies?api-version=2015-06-01-preview' % subid)
try:
json_output = requests.get(request, headers=headers).json()
value22=json_output['value'][0]['properties']['logCollection']
value23=json_output['value'][0]['properties']['recommendations']['patch']
value24=json_output['value'][0]['properties']['recommendations']['baseline']
value25=json_output['value'][0]['properties']['recommendations']['antimalware']
value26=json_output['value'][0]['properties']['recommendations']['diskEncryption']
value27=json_output['value'][0]['properties']['recommendations']['nsgs']
value28=json_output['value'][0]['properties']['recommendations']['waf']
value29=json_output['value'][0]['properties']['recommendations']['ngfw']
value210=json_output['value'][0]['properties']['recommendations']['vulnerabilityAssessment']
value211=json_output['value'][0]['properties']['recommendations']['storageEncryption']
value212=json_output['value'][0]['properties']['recommendations']['jitNetworkAccess']
value213=json_output['value'][0]['properties']['recommendations']['appWhitelisting']
value214=json_output['value'][0]['properties']['recommendations']['sqlAuditing']
value215=json_output['value'][0]['properties']['recommendations']['sqlTde']
value216=json_output['value'][0]['properties']['securityContactConfiguration']['securityContactEmails']
value217=json_output['value'][0]['properties']['securityContactConfiguration']['securityContactPhone']
value218=json_output['value'][0]['properties']['securityContactConfiguration']['areNotificationsOn']
value219=json_output['value'][0]['properties']['securityContactConfiguration']['sendToAdminOn']
if (value22=="On"):
score22=['<font color="green">Passed</font>',1]
if (value23=="On"):
score23=['<font color="green">Passed</font>',1]
if (value24=="On"):
score24=['<font color="green">Passed</font>',1]
if (value25=="On"):
score25=['<font color="green">Passed</font>',1]
if (value26=="On"):
score26=['<font color="green">Passed</font>',1]
if (value27=="On"):
score27=['<font color="green">Passed</font>',1]
if (value28=="On"):
score28=['<font color="green">Passed</font>',1]
if (value29=="On"):
score29=['<font color="green">Passed</font>',1]
if (value210=="On"):
score210=['<font color="green">Passed</font>',1]
if (value211=="On"):
score211=['<font color="green">Passed</font>',1]
if (value212=="On"):
score212=['<font color="green">Passed</font>',1]
if (value213=="On"):
score213=['<font color="green">Passed</font>',1]
if (value214=="On"):
score214=['<font color="green">Passed</font>',1]
if (value215=="On"):
score215=['<font color="green">Passed</font>',1]
if (value216!="[]"):
score216=['<font color="green">Passed</font>',1]
if (value217!=""):
score217=['<font color="green">Passed</font>',1]
if (value218):
score218=['<font color="green">Passed</font>',1]
if (value219):
score219=['<font color="green">Passed</font>',1]
chk22=('Current Setting: <font color="blue"> %s</b></font>' % value22)
chk23=('Current Setting: <font color="blue"> %s</b></font>' % value23)
chk24=('Current Setting: <font color="blue"> %s</b></font>' % value24)
chk25=('Current Setting: <font color="blue"> %s</b></font>' % value25)
chk26=('Current Setting: <font color="blue"> %s</b></font>' % value26)
chk27=('Current Setting: <font color="blue"> %s</b></font>' % value27)
chk28=('Current Setting: <font color="blue"> %s</b></font>' % value28)
chk29=('Current Setting: <font color="blue"> %s</b></font>' % value29)
chk210=('Current Setting: <font color="blue"> %s</b></font>' % value210)
chk211=('Current Setting: <font color="blue"> %s</b></font>' % value211)
chk212=('Current Setting: <font color="blue"> %s</b></font>' % value212)
chk213=('Current Setting: <font color="blue"> %s</b></font>' % value213)
chk214=('Current Setting: <font color="blue"> %s</b></font>' % value214)
chk215=('Current Setting: <font color="blue"> %s</b></font>' % value215)
chk216=('Current Setting: <font color="blue"> %s</b></font>' % value216)
chk217=('Current Setting: <font color="blue"> %s</b></font>' % value217)
chk218=('Current Setting: <font color="blue"> %s</b></font>' % value218)
chk219=('Current Setting: <font color="blue"> %s</b></font>' % value219)
return [chk22,chk23,chk24,chk25,chk26,chk27,chk28,chk29,chk210,chk211,chk212,chk213,chk214,chk215,chk216,chk217,chk218,chk219,score22,score23,score24,score25,score26,score27,score28,score29,score210,score211,score212,score213,score214,score215,score216,score217,score218,score219]
except Exception as e:
logger.error("Exception in check2: %s %s" %(type(e), str(e.args)))
unkScore=['<font color="orange">UNKNOWN </font>',0]
chk="Failed to make API call"
return [chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore]
except Exception as e:
logger.error("Exception in check2: %s %s" %(type(e), str(e.args)))
unkScore=['<font color="orange">UNKNOWN </font>',0]
chk="Failed to Query"
return [chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,chk,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore,unkScore] | 0.169612 | 0.064241 |
import numpy as np
import os
import keras
from keras import losses
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import SGD
from keras.utils import np_utils
from keras import backend as K
import re
batch_size = 16
epochs = 5000
def label_to_int(lbl):
if lbl == 'line':
return 0
elif lbl == 'rectangle':
return 1
elif lbl == 'ellipse':
return 2
elif lbl == 'triangle':
return 3
return 0
def load_dataset(dataset_dir):
# Our classifications
dataset = []
labels = []
test = []
test_labels = []
i = 0
for subdir, dirs, files in os.walk(dataset_dir):
for f in files:
filepath = subdir + os.sep + f
if filepath.endswith('.csv'):
data = np.loadtxt(filepath, delimiter=',')
# Get the subdirectory after the path seperator
label = subdir[subdir.find(os.sep) + 1:]
if i % 4 == 0:
test.append(data)
test_labels.append(label_to_int(label))
else:
dataset.append(data)
labels.append(label_to_int(label))
i += 1
return (np.array(dataset), np.array(labels)), (np.array(test), np.array(test_labels))
# Load the dataset and reshape
train_set, test_set = load_dataset('dataset')
#print(train_set)
print(test_set)
input_shape = (1, 10, 10)
train_dataset = ()
test_dataset = ()
if K.image_data_format() == 'channels_first':
train_dataset = train_set[0].reshape(len(train_set[0]), 1, 10, 10)
test_dataset = test_set[0].reshape(len(test_set[0]), 1, 10, 10)
input_shape = (1, 10, 10)
else:
train_dataset = train_set[0].reshape(len(train_set[0]), 10, 10, 1)
test_dataset = test_set[0].reshape(len(test_set[0]), 10, 10, 1)
input_shape = (10, 10, 1)
train_labels = np_utils.to_categorical(train_set[1], 4)
test_labels = np_utils.to_categorical(test_set[1], 4)
print('dataset shape:', train_dataset.shape)
print('labels shape:', train_labels.shape)
# Use tanh instead of ReLU to prevent NaN errors
model = Sequential()
model.add(Conv2D(10,
kernel_size=(2, 2),
activation='tanh',
padding='same',
input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(1, 1),
strides=2,
padding='same',
data_format=None))
model.add(Dropout(0.1))
model.add(Conv2D(10,
kernel_size=(2, 2),
activation='tanh',
padding='same'))
model.add(MaxPooling2D(pool_size=(1, 1),
strides=2,
padding='same',
data_format=None))
model.add(Flatten())
#"Squash" to probabilities
model.add(Dense(4, activation='softmax'))
model.summary()
# Use a Stochastic-Gradient-Descent as a learning optimizer
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
# Prevent kernel biases from being exactly 0 and giving nan errors
def constrainedCrossEntropy(ytrue, ypred):
ypred = K.clip(ypred, 1e-7, 1e7)
return losses.categorical_crossentropy(ytrue, ypred)
model.compile(loss=constrainedCrossEntropy,
optimizer=sgd,
metrics=['accuracy'])
filepath = 'model.h5'
model.fit(train_dataset, train_labels,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(test_dataset, test_labels))
#Evaluate on other half of dataset
score = model.evaluate(test_dataset, test_labels, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
#Saves the model
#Serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
#Serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk") | main.py | import numpy as np
import os
import keras
from keras import losses
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import SGD
from keras.utils import np_utils
from keras import backend as K
import re
batch_size = 16
epochs = 5000
def label_to_int(lbl):
if lbl == 'line':
return 0
elif lbl == 'rectangle':
return 1
elif lbl == 'ellipse':
return 2
elif lbl == 'triangle':
return 3
return 0
def load_dataset(dataset_dir):
# Our classifications
dataset = []
labels = []
test = []
test_labels = []
i = 0
for subdir, dirs, files in os.walk(dataset_dir):
for f in files:
filepath = subdir + os.sep + f
if filepath.endswith('.csv'):
data = np.loadtxt(filepath, delimiter=',')
# Get the subdirectory after the path seperator
label = subdir[subdir.find(os.sep) + 1:]
if i % 4 == 0:
test.append(data)
test_labels.append(label_to_int(label))
else:
dataset.append(data)
labels.append(label_to_int(label))
i += 1
return (np.array(dataset), np.array(labels)), (np.array(test), np.array(test_labels))
# Load the dataset and reshape
train_set, test_set = load_dataset('dataset')
#print(train_set)
print(test_set)
input_shape = (1, 10, 10)
train_dataset = ()
test_dataset = ()
if K.image_data_format() == 'channels_first':
train_dataset = train_set[0].reshape(len(train_set[0]), 1, 10, 10)
test_dataset = test_set[0].reshape(len(test_set[0]), 1, 10, 10)
input_shape = (1, 10, 10)
else:
train_dataset = train_set[0].reshape(len(train_set[0]), 10, 10, 1)
test_dataset = test_set[0].reshape(len(test_set[0]), 10, 10, 1)
input_shape = (10, 10, 1)
train_labels = np_utils.to_categorical(train_set[1], 4)
test_labels = np_utils.to_categorical(test_set[1], 4)
print('dataset shape:', train_dataset.shape)
print('labels shape:', train_labels.shape)
# Use tanh instead of ReLU to prevent NaN errors
model = Sequential()
model.add(Conv2D(10,
kernel_size=(2, 2),
activation='tanh',
padding='same',
input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(1, 1),
strides=2,
padding='same',
data_format=None))
model.add(Dropout(0.1))
model.add(Conv2D(10,
kernel_size=(2, 2),
activation='tanh',
padding='same'))
model.add(MaxPooling2D(pool_size=(1, 1),
strides=2,
padding='same',
data_format=None))
model.add(Flatten())
#"Squash" to probabilities
model.add(Dense(4, activation='softmax'))
model.summary()
# Use a Stochastic-Gradient-Descent as a learning optimizer
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
# Prevent kernel biases from being exactly 0 and giving nan errors
def constrainedCrossEntropy(ytrue, ypred):
ypred = K.clip(ypred, 1e-7, 1e7)
return losses.categorical_crossentropy(ytrue, ypred)
model.compile(loss=constrainedCrossEntropy,
optimizer=sgd,
metrics=['accuracy'])
filepath = 'model.h5'
model.fit(train_dataset, train_labels,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(test_dataset, test_labels))
#Evaluate on other half of dataset
score = model.evaluate(test_dataset, test_labels, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
#Saves the model
#Serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
#Serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk") | 0.811303 | 0.395076 |
import torch
import torch.nn as nn
from torch import sqrt
from torch.distributions.normal import Normal
import sys
sys.path.append("../../")
from popsan_drl.popsan_ppo.popsan import PopSpikeActor
class CriticNet(nn.Module):
"""Critic network: can use for Q Net and V Net"""
def __init__(self, network_shape, state_shape):
"""
:param network_shape: list of hidden layer sizes
:param state_shape: shape of state
:param action_shape: shape of action
"""
super(CriticNet, self).__init__()
layer_num = len(network_shape)
self.model = [nn.Linear(state_shape, network_shape[0]),
nn.ReLU()]
if layer_num > 1:
for layer in range(layer_num-1):
self.model.extend(
[nn.Linear(network_shape[layer], network_shape[layer+1]),
nn.ReLU()])
self.model.extend([nn.Linear(network_shape[-1], 1)])
self.model = nn.Sequential(*self.model)
def forward(self, state):
out = self.model(state)
return out
class SpikeActorDeepCritic(nn.Module):
def __init__(self, state_shape, action_shape, encoder_pop_dim,
decoder_pop_dim, mean_range, std, spike_ts, device,
hidden_size):
super(SpikeActorDeepCritic, self).__init__()
self.welford_state_mean = torch.zeros(1)
self.welford_state_mean_diff = torch.ones(1)
self.welford_state_n = 1
self.device = device
self.critic = CriticNet(hidden_size, state_shape)
self.popsan = PopSpikeActor(state_shape, action_shape, encoder_pop_dim, decoder_pop_dim,
hidden_size, mean_range, std, spike_ts, device)
def forward(self, x, batch_size):
value = self.critic(x)
mu, std = self.popsan(x, batch_size)
dist = Normal(mu, std)
return dist, value
def normalize_state(self, state, update=True):
"""
Use Welford's algorithm to normalize a state, and optionally update the statistics
for normalizing states using the new state, online.
"""
if self.welford_state_n == 1:
self.welford_state_mean = torch.zeros(state.size(-1)).to(self.device)
self.welford_state_mean_diff = torch.ones(state.size(-1)).to(self.device)
if update:
if len(state.size()) == 1: # If we get a single state vector
state_old = self.welford_state_mean
self.welford_state_mean += (state - state_old) / self.welford_state_n
self.welford_state_mean_diff += (state - state_old) * (state - state_old)
self.welford_state_n += 1
else:
raise RuntimeError
return (state - self.welford_state_mean) / sqrt(self.welford_state_mean_diff / self.welford_state_n)
def copy_normalizer_stats(self, net):
self.welford_state_mean = net.self_state_mean
self.welford_state_mean_diff = net.welford_state_mean_diff
self.welford_state_n = net.welford_state_n | popsan_drl/popsan_ppo/core_norm.py | import torch
import torch.nn as nn
from torch import sqrt
from torch.distributions.normal import Normal
import sys
sys.path.append("../../")
from popsan_drl.popsan_ppo.popsan import PopSpikeActor
class CriticNet(nn.Module):
"""Critic network: can use for Q Net and V Net"""
def __init__(self, network_shape, state_shape):
"""
:param network_shape: list of hidden layer sizes
:param state_shape: shape of state
:param action_shape: shape of action
"""
super(CriticNet, self).__init__()
layer_num = len(network_shape)
self.model = [nn.Linear(state_shape, network_shape[0]),
nn.ReLU()]
if layer_num > 1:
for layer in range(layer_num-1):
self.model.extend(
[nn.Linear(network_shape[layer], network_shape[layer+1]),
nn.ReLU()])
self.model.extend([nn.Linear(network_shape[-1], 1)])
self.model = nn.Sequential(*self.model)
def forward(self, state):
out = self.model(state)
return out
class SpikeActorDeepCritic(nn.Module):
def __init__(self, state_shape, action_shape, encoder_pop_dim,
decoder_pop_dim, mean_range, std, spike_ts, device,
hidden_size):
super(SpikeActorDeepCritic, self).__init__()
self.welford_state_mean = torch.zeros(1)
self.welford_state_mean_diff = torch.ones(1)
self.welford_state_n = 1
self.device = device
self.critic = CriticNet(hidden_size, state_shape)
self.popsan = PopSpikeActor(state_shape, action_shape, encoder_pop_dim, decoder_pop_dim,
hidden_size, mean_range, std, spike_ts, device)
def forward(self, x, batch_size):
value = self.critic(x)
mu, std = self.popsan(x, batch_size)
dist = Normal(mu, std)
return dist, value
def normalize_state(self, state, update=True):
"""
Use Welford's algorithm to normalize a state, and optionally update the statistics
for normalizing states using the new state, online.
"""
if self.welford_state_n == 1:
self.welford_state_mean = torch.zeros(state.size(-1)).to(self.device)
self.welford_state_mean_diff = torch.ones(state.size(-1)).to(self.device)
if update:
if len(state.size()) == 1: # If we get a single state vector
state_old = self.welford_state_mean
self.welford_state_mean += (state - state_old) / self.welford_state_n
self.welford_state_mean_diff += (state - state_old) * (state - state_old)
self.welford_state_n += 1
else:
raise RuntimeError
return (state - self.welford_state_mean) / sqrt(self.welford_state_mean_diff / self.welford_state_n)
def copy_normalizer_stats(self, net):
self.welford_state_mean = net.self_state_mean
self.welford_state_mean_diff = net.welford_state_mean_diff
self.welford_state_n = net.welford_state_n | 0.845465 | 0.54468 |
import os
import unittest
from lxml import etree
from corpora.europarl.extractor import EuroparlExtractor, EuroparlPerfectExtractor, EuroparlRecentPastExtractor, \
EuroparlPoSExtractor, EuroparlSinceDurationExtractor, EuroparlFrenchArticleExtractor
from apps.extractor.perfectextractor import PAST
EUROPARL_DATA = os.path.join(os.path.dirname(__file__), 'data/europarl')
DCEP_DATA = os.path.join(os.path.dirname(__file__), 'data/dcep')
SWITCHBOARD_DATA = os.path.join(os.path.dirname(__file__), 'data/switchboard')
class TestEuroparlPerfectExtractor(unittest.TestCase):
def setUp(self):
self.nl_filename = os.path.join(EUROPARL_DATA, 'nl/ep-00-12-15.xml')
self.en_filename = os.path.join(EUROPARL_DATA, 'en/ep-00-12-15.xml')
self.fr_filename = os.path.join(EUROPARL_DATA, 'fr/ep-00-12-15.xml')
self.nl_extractor = EuroparlPerfectExtractor('nl', ['en'], search_in_to=True)
self.nl_tree = etree.parse(self.nl_filename)
self.nl_alignmenttrees, self.nl_translationtrees = self.nl_extractor.parse_alignment_trees(self.nl_filename)
self.en_extractor = EuroparlPerfectExtractor('en', ['nl'], search_in_to=True)
self.en_tree = etree.parse(self.en_filename)
self.en_alignmenttrees, self.en_translationtrees = self.en_extractor.parse_alignment_trees(self.en_filename)
def test_init(self):
self.assertEqual(self.nl_extractor.config.get('nl', 'perfect_tags'), 'verbpapa')
self.assertIn('dunken', self.nl_extractor.aux_be_list['nl'])
def test_get_translated_lines(self):
from_lines, to_lines, align = self.nl_extractor.get_translated_lines(self.nl_alignmenttrees, 'nl', 'en', '17')
self.assertEqual(from_lines, ['17'])
self.assertEqual(to_lines, ['11'])
self.assertEqual(align, '1 => 1')
from_lines, to_lines, align = self.nl_extractor.get_translated_lines(self.nl_alignmenttrees, 'nl', 'en', '18')
self.assertEqual(from_lines, ['18', '19'])
self.assertEqual(to_lines, ['12'])
self.assertEqual(align, '2 => 1')
from_lines, to_lines, align = self.nl_extractor.get_translated_lines(self.nl_alignmenttrees, 'nl', 'en', '57')
self.assertEqual(from_lines, ['57'])
self.assertEqual(to_lines, ['46', '47'])
self.assertEqual(align, '1 => 2')
from_lines, to_lines, align = self.nl_extractor.get_translated_lines(self.nl_alignmenttrees, 'nl', 'en', '9')
self.assertEqual(from_lines, ['9'])
self.assertEqual(to_lines, [])
self.assertEqual(align, '')
from_lines, to_lines, align = self.en_extractor.get_translated_lines(self.en_alignmenttrees, 'en', 'nl', '19')
self.assertEqual(from_lines, ['19'])
self.assertEqual(to_lines, ['27'])
self.assertEqual(align, '1 => 1')
from_lines, to_lines, align = self.en_extractor.get_translated_lines(self.en_alignmenttrees, 'en', 'nl', '8')
self.assertEqual(from_lines, ['8'])
self.assertEqual(to_lines, ['13', '14'])
self.assertEqual(align, '1 => 2')
from_lines, to_lines, align = self.en_extractor.get_translated_lines(self.en_alignmenttrees, 'en', 'nl', '234')
self.assertEqual(from_lines, ['234', '235'])
self.assertEqual(to_lines, ['290'])
self.assertEqual(align, '2 => 1')
def test_get_line_by_number(self):
xml_sentence, _, pp = self.nl_extractor.get_line_and_pp(self.nl_tree, 'nl', '4')
self.assertEqual(etree.fromstring(xml_sentence).get('id'), '4')
self.assertEqual(pp.get_sentence_id(), '4')
self.assertEqual(pp.verbs(), ['is', 'aangebroken'])
self.assertEqual(pp.verb_ids(), 'w4.9 w4.19')
self.assertEqual(pp.words_between(), 9)
self.assertFalse(pp.is_passive)
self.assertFalse(pp.is_continuous)
xml_sentence, _, pp = self.nl_extractor.get_line_and_pp(self.nl_tree, 'nl', '15')
self.assertEqual(etree.fromstring(xml_sentence).get('id'), '15')
self.assertEqual(pp.verbs(), ['heeft', 'bemoeid'])
self.assertEqual(pp.words_between(), 0)
self.assertFalse(pp.is_passive)
self.assertFalse(pp.is_continuous)
xml_sentence, _, pp = self.nl_extractor.get_line_and_pp(self.nl_translationtrees['en'], 'en', '6')
self.assertEqual(etree.fromstring(xml_sentence).get('id'), '6')
self.assertEqual(pp.verbs(), ['has', 'said'])
self.assertEqual(pp.words_between(), 1)
self.assertFalse(pp.is_passive)
self.assertFalse(pp.is_continuous)
xml_sentence, _, pp = self.en_extractor.get_line_and_pp(self.en_tree, 'en', '89')
self.assertEqual(etree.fromstring(xml_sentence).get('id'), '89')
self.assertEqual(pp.verbs(), ['has', 'been', 'mentioned'])
self.assertEqual(pp.words_between(), 1)
self.assertEqual(pp.words_between_verbs(), [0, 1, 0])
self.assertTrue(pp.is_passive)
self.assertFalse(pp.is_continuous)
xml_sentence, _, pp = self.en_extractor.get_line_and_pp(self.en_tree, 'en', '121')
self.assertEqual(etree.fromstring(xml_sentence).get('id'), '121')
self.assertEqual(pp.verbs(), ['has', 'been', 'carrying'])
self.assertEqual(pp.words_between(), 0)
self.assertEqual(pp.words_between_verbs(), [0, 0, 0])
self.assertFalse(pp.is_passive)
self.assertTrue(pp.is_continuous)
xml_sentence, _, pp = self.en_extractor.get_line_and_pp(self.en_tree, 'en', '180')
self.assertEqual(etree.fromstring(xml_sentence).get('id'), '180')
self.assertEqual(pp.verbs(), ['has', 'brought'])
self.assertEqual(pp.words_between(), 1)
self.assertFalse(pp.is_passive)
self.assertFalse(pp.is_continuous)
def test_list_filenames(self):
files = self.nl_extractor.list_filenames(os.path.join(EUROPARL_DATA, 'nl'))
self.assertEqual([os.path.basename(f) for f in files], ['ep-00-12-15.xml'])
def test_recent_past_extraction(self):
fr_extractor = EuroparlRecentPastExtractor('fr', ['en', 'nl'])
results = fr_extractor.process_file(self.fr_filename)
self.assertEqual(len(results), 4)
self.assertEqual(results[0][3], u'vient de dire')
self.assertEqual(results[1][3], u'viens d\' aborder')
self.assertEqual(results[2][3], u'viens d\' évoquer')
self.assertEqual(results[3][3], u'vient d\' être dit')
def test_append_extractor(self):
perfect_extractor = EuroparlPerfectExtractor('en', ['nl'], search_in_to=False)
for_extractor = EuroparlPoSExtractor('en', ['nl'], lemmata=['for'])
year_extractor = EuroparlPoSExtractor('en', ['nl'], lemmata=['year'])
results = for_extractor.generate_results(os.path.join(EUROPARL_DATA, 'en'))
self.assertEqual(len(results), 177)
for_extractor.add_extractor(year_extractor)
results = for_extractor.generate_results(os.path.join(EUROPARL_DATA, 'en'))
self.assertEqual(len(results), 14)
for_extractor.add_extractor(perfect_extractor)
results = for_extractor.generate_results(os.path.join(EUROPARL_DATA, 'en'))
self.assertEqual(len(results), 7)
self.assertEqual(results[0][3], u'has been focused')
def test_position(self):
when_extractor = EuroparlPoSExtractor('en', ['nl'], lemmata=['when'], position=1)
results = when_extractor.generate_results(os.path.join(EUROPARL_DATA, 'en'))
self.assertEqual(len(results), 3)
def test_average_alignment_certainty(self):
extractor = EuroparlExtractor('en', ['nl', 'de'])
file_names = extractor.sort_by_alignment_certainty(extractor.list_filenames(os.path.join(DCEP_DATA, 'en')))
file_names = [os.path.basename(f) for f in file_names]
self.assertEqual(file_names[0], '20764633__IM-PRESS__20081211-STO-44307__EN.xml')
self.assertEqual(file_names[1], '16609396__IM-PRESS__20060905-STO-10339__EN.xml')
self.assertEqual(file_names[2], '16451293__IM-PRESS__20060131-IPR-04891__EN.xml')
def test_file_limit(self):
extractor = EuroparlExtractor('en', ['nl', 'de'], file_limit=2)
results = extractor.generate_results(os.path.join(DCEP_DATA, 'en'))
self.assertEqual(len(results), 55)
extractor = EuroparlExtractor('en', ['nl', 'de'], file_limit=1)
results = extractor.generate_results(os.path.join(DCEP_DATA, 'en'))
self.assertEqual(len(results), 37)
def test_tokens(self):
tokens_extractor = EuroparlPoSExtractor('en', ['nl'], tokens=[('w1.13', 'w1.15'), ('w2.5', 'w2.8')])
results = tokens_extractor.generate_results(os.path.join(EUROPARL_DATA, 'en'))
self.assertEqual(len(results), 2)
self.assertEqual(results[0][3], u'a historic sitting')
self.assertEqual(results[1][3], u'my very great pleasure')
tokens_extractor = EuroparlPoSExtractor('en', ['nl'], tokens=[('w1.19', 'w1.17')])
self.assertRaises(ValueError, tokens_extractor.generate_results, os.path.join(EUROPARL_DATA, 'en'))
def test_metadata(self):
metadata_extractor = EuroparlPoSExtractor('en', [], lemmata=['when'],
metadata=[('topic', 'text'), ('damsl_act_tag', 's')])
results = metadata_extractor.generate_results(os.path.join(SWITCHBOARD_DATA, 'en'))
self.assertEqual(len(results), 5)
self.assertEqual(results[0][6], u'CHILD CARE')
self.assertEqual(results[0][7], u'sd')
self.assertEqual(results[4][7], u'qy')
def test_since(self):
metadata_extractor = EuroparlSinceDurationExtractor('nl', [])
results = metadata_extractor.generate_results(os.path.join(EUROPARL_DATA, 'nl'))
self.assertEqual(len(results), 1)
self.assertEqual(results[0][3], u'sinds tien jaar')
def test_articles(self):
article_extractor = EuroparlFrenchArticleExtractor('fr', [])
results = article_extractor.generate_results(os.path.join(EUROPARL_DATA, 'fr'))
self.assertEqual(len(results), 2041)
self.assertEqual(results[0][2], u'indefinite partitive')
def test_past_perfect(self):
past_perfect_extractor = EuroparlPerfectExtractor('en', [], tense=PAST)
results = past_perfect_extractor.generate_results(os.path.join(EUROPARL_DATA, 'en'))
self.assertEqual(len(results), 5)
self.assertEqual(results[0][3], u'had preordained')
past_perfect_extractor = EuroparlPerfectExtractor('nl', [], tense=PAST)
results = past_perfect_extractor.generate_results(os.path.join(EUROPARL_DATA, 'nl'))
self.assertEqual(len(results), 5)
self.assertEqual(results[0][3], u'had gelegd')
past_perfect_extractor = EuroparlPerfectExtractor('fr', [], tense=PAST)
results = past_perfect_extractor.generate_results(os.path.join(EUROPARL_DATA, 'fr'))
self.assertEqual(len(results), 9)
self.assertEqual(results[0][3], u'avait rétabli')
past_perfect_extractor = EuroparlPerfectExtractor('de', [], tense=PAST)
results = past_perfect_extractor.generate_results(os.path.join(DCEP_DATA, 'de'))
self.assertEqual(len(results), 1)
self.assertEqual(results[0][3], u'hatte beschlossen') | tests/test_europarl_extractor.py |
import os
import unittest
from lxml import etree
from corpora.europarl.extractor import EuroparlExtractor, EuroparlPerfectExtractor, EuroparlRecentPastExtractor, \
EuroparlPoSExtractor, EuroparlSinceDurationExtractor, EuroparlFrenchArticleExtractor
from apps.extractor.perfectextractor import PAST
EUROPARL_DATA = os.path.join(os.path.dirname(__file__), 'data/europarl')
DCEP_DATA = os.path.join(os.path.dirname(__file__), 'data/dcep')
SWITCHBOARD_DATA = os.path.join(os.path.dirname(__file__), 'data/switchboard')
class TestEuroparlPerfectExtractor(unittest.TestCase):
def setUp(self):
self.nl_filename = os.path.join(EUROPARL_DATA, 'nl/ep-00-12-15.xml')
self.en_filename = os.path.join(EUROPARL_DATA, 'en/ep-00-12-15.xml')
self.fr_filename = os.path.join(EUROPARL_DATA, 'fr/ep-00-12-15.xml')
self.nl_extractor = EuroparlPerfectExtractor('nl', ['en'], search_in_to=True)
self.nl_tree = etree.parse(self.nl_filename)
self.nl_alignmenttrees, self.nl_translationtrees = self.nl_extractor.parse_alignment_trees(self.nl_filename)
self.en_extractor = EuroparlPerfectExtractor('en', ['nl'], search_in_to=True)
self.en_tree = etree.parse(self.en_filename)
self.en_alignmenttrees, self.en_translationtrees = self.en_extractor.parse_alignment_trees(self.en_filename)
def test_init(self):
self.assertEqual(self.nl_extractor.config.get('nl', 'perfect_tags'), 'verbpapa')
self.assertIn('dunken', self.nl_extractor.aux_be_list['nl'])
def test_get_translated_lines(self):
from_lines, to_lines, align = self.nl_extractor.get_translated_lines(self.nl_alignmenttrees, 'nl', 'en', '17')
self.assertEqual(from_lines, ['17'])
self.assertEqual(to_lines, ['11'])
self.assertEqual(align, '1 => 1')
from_lines, to_lines, align = self.nl_extractor.get_translated_lines(self.nl_alignmenttrees, 'nl', 'en', '18')
self.assertEqual(from_lines, ['18', '19'])
self.assertEqual(to_lines, ['12'])
self.assertEqual(align, '2 => 1')
from_lines, to_lines, align = self.nl_extractor.get_translated_lines(self.nl_alignmenttrees, 'nl', 'en', '57')
self.assertEqual(from_lines, ['57'])
self.assertEqual(to_lines, ['46', '47'])
self.assertEqual(align, '1 => 2')
from_lines, to_lines, align = self.nl_extractor.get_translated_lines(self.nl_alignmenttrees, 'nl', 'en', '9')
self.assertEqual(from_lines, ['9'])
self.assertEqual(to_lines, [])
self.assertEqual(align, '')
from_lines, to_lines, align = self.en_extractor.get_translated_lines(self.en_alignmenttrees, 'en', 'nl', '19')
self.assertEqual(from_lines, ['19'])
self.assertEqual(to_lines, ['27'])
self.assertEqual(align, '1 => 1')
from_lines, to_lines, align = self.en_extractor.get_translated_lines(self.en_alignmenttrees, 'en', 'nl', '8')
self.assertEqual(from_lines, ['8'])
self.assertEqual(to_lines, ['13', '14'])
self.assertEqual(align, '1 => 2')
from_lines, to_lines, align = self.en_extractor.get_translated_lines(self.en_alignmenttrees, 'en', 'nl', '234')
self.assertEqual(from_lines, ['234', '235'])
self.assertEqual(to_lines, ['290'])
self.assertEqual(align, '2 => 1')
def test_get_line_by_number(self):
xml_sentence, _, pp = self.nl_extractor.get_line_and_pp(self.nl_tree, 'nl', '4')
self.assertEqual(etree.fromstring(xml_sentence).get('id'), '4')
self.assertEqual(pp.get_sentence_id(), '4')
self.assertEqual(pp.verbs(), ['is', 'aangebroken'])
self.assertEqual(pp.verb_ids(), 'w4.9 w4.19')
self.assertEqual(pp.words_between(), 9)
self.assertFalse(pp.is_passive)
self.assertFalse(pp.is_continuous)
xml_sentence, _, pp = self.nl_extractor.get_line_and_pp(self.nl_tree, 'nl', '15')
self.assertEqual(etree.fromstring(xml_sentence).get('id'), '15')
self.assertEqual(pp.verbs(), ['heeft', 'bemoeid'])
self.assertEqual(pp.words_between(), 0)
self.assertFalse(pp.is_passive)
self.assertFalse(pp.is_continuous)
xml_sentence, _, pp = self.nl_extractor.get_line_and_pp(self.nl_translationtrees['en'], 'en', '6')
self.assertEqual(etree.fromstring(xml_sentence).get('id'), '6')
self.assertEqual(pp.verbs(), ['has', 'said'])
self.assertEqual(pp.words_between(), 1)
self.assertFalse(pp.is_passive)
self.assertFalse(pp.is_continuous)
xml_sentence, _, pp = self.en_extractor.get_line_and_pp(self.en_tree, 'en', '89')
self.assertEqual(etree.fromstring(xml_sentence).get('id'), '89')
self.assertEqual(pp.verbs(), ['has', 'been', 'mentioned'])
self.assertEqual(pp.words_between(), 1)
self.assertEqual(pp.words_between_verbs(), [0, 1, 0])
self.assertTrue(pp.is_passive)
self.assertFalse(pp.is_continuous)
xml_sentence, _, pp = self.en_extractor.get_line_and_pp(self.en_tree, 'en', '121')
self.assertEqual(etree.fromstring(xml_sentence).get('id'), '121')
self.assertEqual(pp.verbs(), ['has', 'been', 'carrying'])
self.assertEqual(pp.words_between(), 0)
self.assertEqual(pp.words_between_verbs(), [0, 0, 0])
self.assertFalse(pp.is_passive)
self.assertTrue(pp.is_continuous)
xml_sentence, _, pp = self.en_extractor.get_line_and_pp(self.en_tree, 'en', '180')
self.assertEqual(etree.fromstring(xml_sentence).get('id'), '180')
self.assertEqual(pp.verbs(), ['has', 'brought'])
self.assertEqual(pp.words_between(), 1)
self.assertFalse(pp.is_passive)
self.assertFalse(pp.is_continuous)
def test_list_filenames(self):
files = self.nl_extractor.list_filenames(os.path.join(EUROPARL_DATA, 'nl'))
self.assertEqual([os.path.basename(f) for f in files], ['ep-00-12-15.xml'])
def test_recent_past_extraction(self):
fr_extractor = EuroparlRecentPastExtractor('fr', ['en', 'nl'])
results = fr_extractor.process_file(self.fr_filename)
self.assertEqual(len(results), 4)
self.assertEqual(results[0][3], u'vient de dire')
self.assertEqual(results[1][3], u'viens d\' aborder')
self.assertEqual(results[2][3], u'viens d\' évoquer')
self.assertEqual(results[3][3], u'vient d\' être dit')
def test_append_extractor(self):
perfect_extractor = EuroparlPerfectExtractor('en', ['nl'], search_in_to=False)
for_extractor = EuroparlPoSExtractor('en', ['nl'], lemmata=['for'])
year_extractor = EuroparlPoSExtractor('en', ['nl'], lemmata=['year'])
results = for_extractor.generate_results(os.path.join(EUROPARL_DATA, 'en'))
self.assertEqual(len(results), 177)
for_extractor.add_extractor(year_extractor)
results = for_extractor.generate_results(os.path.join(EUROPARL_DATA, 'en'))
self.assertEqual(len(results), 14)
for_extractor.add_extractor(perfect_extractor)
results = for_extractor.generate_results(os.path.join(EUROPARL_DATA, 'en'))
self.assertEqual(len(results), 7)
self.assertEqual(results[0][3], u'has been focused')
def test_position(self):
when_extractor = EuroparlPoSExtractor('en', ['nl'], lemmata=['when'], position=1)
results = when_extractor.generate_results(os.path.join(EUROPARL_DATA, 'en'))
self.assertEqual(len(results), 3)
def test_average_alignment_certainty(self):
extractor = EuroparlExtractor('en', ['nl', 'de'])
file_names = extractor.sort_by_alignment_certainty(extractor.list_filenames(os.path.join(DCEP_DATA, 'en')))
file_names = [os.path.basename(f) for f in file_names]
self.assertEqual(file_names[0], '20764633__IM-PRESS__20081211-STO-44307__EN.xml')
self.assertEqual(file_names[1], '16609396__IM-PRESS__20060905-STO-10339__EN.xml')
self.assertEqual(file_names[2], '16451293__IM-PRESS__20060131-IPR-04891__EN.xml')
def test_file_limit(self):
extractor = EuroparlExtractor('en', ['nl', 'de'], file_limit=2)
results = extractor.generate_results(os.path.join(DCEP_DATA, 'en'))
self.assertEqual(len(results), 55)
extractor = EuroparlExtractor('en', ['nl', 'de'], file_limit=1)
results = extractor.generate_results(os.path.join(DCEP_DATA, 'en'))
self.assertEqual(len(results), 37)
def test_tokens(self):
tokens_extractor = EuroparlPoSExtractor('en', ['nl'], tokens=[('w1.13', 'w1.15'), ('w2.5', 'w2.8')])
results = tokens_extractor.generate_results(os.path.join(EUROPARL_DATA, 'en'))
self.assertEqual(len(results), 2)
self.assertEqual(results[0][3], u'a historic sitting')
self.assertEqual(results[1][3], u'my very great pleasure')
tokens_extractor = EuroparlPoSExtractor('en', ['nl'], tokens=[('w1.19', 'w1.17')])
self.assertRaises(ValueError, tokens_extractor.generate_results, os.path.join(EUROPARL_DATA, 'en'))
def test_metadata(self):
metadata_extractor = EuroparlPoSExtractor('en', [], lemmata=['when'],
metadata=[('topic', 'text'), ('damsl_act_tag', 's')])
results = metadata_extractor.generate_results(os.path.join(SWITCHBOARD_DATA, 'en'))
self.assertEqual(len(results), 5)
self.assertEqual(results[0][6], u'CHILD CARE')
self.assertEqual(results[0][7], u'sd')
self.assertEqual(results[4][7], u'qy')
def test_since(self):
metadata_extractor = EuroparlSinceDurationExtractor('nl', [])
results = metadata_extractor.generate_results(os.path.join(EUROPARL_DATA, 'nl'))
self.assertEqual(len(results), 1)
self.assertEqual(results[0][3], u'sinds tien jaar')
def test_articles(self):
article_extractor = EuroparlFrenchArticleExtractor('fr', [])
results = article_extractor.generate_results(os.path.join(EUROPARL_DATA, 'fr'))
self.assertEqual(len(results), 2041)
self.assertEqual(results[0][2], u'indefinite partitive')
def test_past_perfect(self):
past_perfect_extractor = EuroparlPerfectExtractor('en', [], tense=PAST)
results = past_perfect_extractor.generate_results(os.path.join(EUROPARL_DATA, 'en'))
self.assertEqual(len(results), 5)
self.assertEqual(results[0][3], u'had preordained')
past_perfect_extractor = EuroparlPerfectExtractor('nl', [], tense=PAST)
results = past_perfect_extractor.generate_results(os.path.join(EUROPARL_DATA, 'nl'))
self.assertEqual(len(results), 5)
self.assertEqual(results[0][3], u'had gelegd')
past_perfect_extractor = EuroparlPerfectExtractor('fr', [], tense=PAST)
results = past_perfect_extractor.generate_results(os.path.join(EUROPARL_DATA, 'fr'))
self.assertEqual(len(results), 9)
self.assertEqual(results[0][3], u'avait rétabli')
past_perfect_extractor = EuroparlPerfectExtractor('de', [], tense=PAST)
results = past_perfect_extractor.generate_results(os.path.join(DCEP_DATA, 'de'))
self.assertEqual(len(results), 1)
self.assertEqual(results[0][3], u'hatte beschlossen') | 0.482429 | 0.249617 |
import inspect
class BaseType:
"""Base class for all types.
"""
def valid(self, value):
raise NotImplementedError()
def __or__(self, other):
return OneOf([self, other])
class SimpleType(BaseType):
"""Type class to the simple types like string, integer etc.
"""
def __init__(self, type_, label=None):
self.type_ = type_
self.label = label or str(type_)
def valid(self, value):
return isinstance(value, self.type_)
def __repr__(self):
return self.label
class AnyType(BaseType):
"""Type class to match any value.
"""
def valid(self, value):
return True
def __repr__(self):
return "Any"
class Literal(BaseType):
"""Type class to match a literal value.
Plus = Literal("+")
print(Plus.valid("+")) # True
"""
def __init__(self, value, label=None):
self.value = value
self.label = label
def valid(self, value):
return self.value == value
def __repr__(self):
return self.label or "<{}>".format(self.value)
class List(BaseType):
"""Type class to represent a list of values.
List is a homogeneous collection and each element of
the collection must be of the specified type.
Numbers = List(Integer)
print(Numbers.valid([1, 2, 3]) # True
"""
def __init__(self, type_):
self.type_ = type_
def valid(self, value):
return isinstance(value, list) and all(self.type_.valid(v) for v in value)
def __repr__(self):
return "List({})".format(self.type_)
class Tuple(BaseType):
"""Tuple represents is a fixed length record.
Point = Tuple([Integer, Integer])
Point.valid([1, 2]) # True
"""
def __init__(self, *types):
self.types = types
def valid(self, value):
return isinstance(value, (list, tuple)) \
and len(value) == len(self.types) \
and all(t.valid(v) for t, v in zip(self.types, value))
def __repr__(self):
return "Tuple({})".format(", ".join(str(t) for t in self.types))
class Record(BaseType):
"""Type class to represent a record with fixed keys.
Point = Record({"x": Integer, "y": Integer})
print(Point.valid({"x": 1, "y": 2})) # True
"""
def __init__(self, schema):
self.schema = schema
def valid(self, value):
return isinstance(value, dict) \
and all(k in value and type_.valid(value[k]) for k, type_ in self.schema.items())
def __repr__(self):
return "Record({})".format(self.schema)
class Dict(BaseType):
"""Type class to represent homogeneous key-value pairs.
PriceList = Dict(String, Float)
PriceList.valid({
"apple": 10.0,
"mango": 10.0,
}) // True
"""
def __init__(self, schema):
self.schema = schema
def valid(self, value):
return isinstance(value, dict) \
and all(k in value and type_.valid(value[k]) for k, type_ in self.schema.items())
def __repr__(self):
return "Record({})".format(self.schema)
class OneOf(BaseType):
"""Type class to match one of the given types.
Value = Integer | List[Integer]
print(Value.valid(1)) # True
print(Value.valid([1, 2, 3])) # True
"""
def __init__(self, types):
self.types = types
def valid(self, value):
return any(t.valid(value) for t in self.types)
def __or__(self, other):
return OneOf(self.types + [other])
def __repr__(self):
return " | ".join(str(t) for t in self.types)
class Reference(BaseType):
"""The Reference represents a Forward Reference to a type.
When defining types for recursive data structures, it is
required to use the type in defining itself. In Python, it
wouldn't be possible to it that and way and Reference solves
that issues.
BinOp = Literal("+") | Literal("*")
Expr = Reference()
Expr.set(
Integer
| Record({"left": Expr, "op": BinOp, "right": Expr})
)
print(Expr.valid(1)) # True
print(Expr.valid({"left": 1, "op": "+", "right": 2})) # True
print(Expr.valid({
"left": 1,
"op": "+",
"right": {
"left": 2,
"op": "*",
"right": 3
}})) # True
"""
def __init__(self, label=None):
self.type_ = None
self.label = label
def set(self, type_):
self.type_ = type_
def __irshift__(self, type_):
self.type_ = type_
return self
def valid(self, value):
if not self.type_:
raise Exception("Undefined Reference: " + self.label or "<Unnamed>")
return self.type_.valid(value)
def __repr__(self):
if self.label:
return self.label
if self.type_:
return repr(self.type_)
else:
return "Reference()"
String = SimpleType(str, label="String")
Integer = SimpleType(int, label="Integer")
Float = SimpleType(float, label="Float")
Boolean = SimpleType(bool, label="Boolean")
Nothing = SimpleType(type(None), label="Nothing")
Any = AnyType()
# It is more natural to call it a Type when declaring it.
Type = Reference
_methods = {}
class MultiMethod:
"""MultiMethod implements function polymorphism based on the
type of the data.
See the method decorator for more details.
"""
def __init__(self, name):
self.name = name
self._methods = []
self.nargs = -1
def add_method(self, method):
specs = inspect.getfullargspec(method)
if specs.varargs or specs.varargs or specs.kwonlyargs:
raise Exception("hyptertype methods supports only simple arguments. varargs, kwargs etc. are not supported.")
if self.nargs >= 0 and self.nargs != len(specs.args):
raise Exception(
"Method {} is expected to have {} args. Found {}.".format(
self.name, self.nargs, len(specs.args)))
argtypes = [specs.annotations.get(a, Any) for a in specs.args]
t = Tuple(*argtypes)
self._methods.append((t, method))
self.nargs = len(specs.args)
def __call__(self, *args):
if len(args) != self.nargs:
raise TypeError(
"method {} expects {} args, given {}".format(
self.name,
self.nargs,
len(args)))
for t, method in self._methods:
valid = t.valid(args)
if valid:
return method(*args)
raise ValueError("Unable to find a matching method for {}".format(self.name))
def __repr__(self):
return "Method:{}".format(self.name)
def method(f):
"""Decorator to mark a function as a hypertype method.
Hypertype method implements multiple-dispatch or function polymorphism
based on the type of the arguments. The types of the arguments are
specified using the function annotations.
This is some what like the pattern-matching in Haskell as we the types
are nothing but the shape of the data.
@method
def display(n Integer):
print(n, "is an integer")
@method
def display(s String):
print(s, "is a string")
display(42) # 42 is an integer
display("Magic") # Magic is a string
"""
m = _methods.setdefault(f.__name__, MultiMethod(f.__name__))
m.add_method(f)
return m
def nested_apply(value, method):
if isinstance(value, list):
return [method(v) for v in value]
elif isinstance(value, dict):
return {k: method(v) for k, v in value.items()}
else:
return method(value) | hypertype.py | import inspect
class BaseType:
"""Base class for all types.
"""
def valid(self, value):
raise NotImplementedError()
def __or__(self, other):
return OneOf([self, other])
class SimpleType(BaseType):
"""Type class to the simple types like string, integer etc.
"""
def __init__(self, type_, label=None):
self.type_ = type_
self.label = label or str(type_)
def valid(self, value):
return isinstance(value, self.type_)
def __repr__(self):
return self.label
class AnyType(BaseType):
"""Type class to match any value.
"""
def valid(self, value):
return True
def __repr__(self):
return "Any"
class Literal(BaseType):
"""Type class to match a literal value.
Plus = Literal("+")
print(Plus.valid("+")) # True
"""
def __init__(self, value, label=None):
self.value = value
self.label = label
def valid(self, value):
return self.value == value
def __repr__(self):
return self.label or "<{}>".format(self.value)
class List(BaseType):
"""Type class to represent a list of values.
List is a homogeneous collection and each element of
the collection must be of the specified type.
Numbers = List(Integer)
print(Numbers.valid([1, 2, 3]) # True
"""
def __init__(self, type_):
self.type_ = type_
def valid(self, value):
return isinstance(value, list) and all(self.type_.valid(v) for v in value)
def __repr__(self):
return "List({})".format(self.type_)
class Tuple(BaseType):
"""Tuple represents is a fixed length record.
Point = Tuple([Integer, Integer])
Point.valid([1, 2]) # True
"""
def __init__(self, *types):
self.types = types
def valid(self, value):
return isinstance(value, (list, tuple)) \
and len(value) == len(self.types) \
and all(t.valid(v) for t, v in zip(self.types, value))
def __repr__(self):
return "Tuple({})".format(", ".join(str(t) for t in self.types))
class Record(BaseType):
"""Type class to represent a record with fixed keys.
Point = Record({"x": Integer, "y": Integer})
print(Point.valid({"x": 1, "y": 2})) # True
"""
def __init__(self, schema):
self.schema = schema
def valid(self, value):
return isinstance(value, dict) \
and all(k in value and type_.valid(value[k]) for k, type_ in self.schema.items())
def __repr__(self):
return "Record({})".format(self.schema)
class Dict(BaseType):
"""Type class to represent homogeneous key-value pairs.
PriceList = Dict(String, Float)
PriceList.valid({
"apple": 10.0,
"mango": 10.0,
}) // True
"""
def __init__(self, schema):
self.schema = schema
def valid(self, value):
return isinstance(value, dict) \
and all(k in value and type_.valid(value[k]) for k, type_ in self.schema.items())
def __repr__(self):
return "Record({})".format(self.schema)
class OneOf(BaseType):
"""Type class to match one of the given types.
Value = Integer | List[Integer]
print(Value.valid(1)) # True
print(Value.valid([1, 2, 3])) # True
"""
def __init__(self, types):
self.types = types
def valid(self, value):
return any(t.valid(value) for t in self.types)
def __or__(self, other):
return OneOf(self.types + [other])
def __repr__(self):
return " | ".join(str(t) for t in self.types)
class Reference(BaseType):
"""The Reference represents a Forward Reference to a type.
When defining types for recursive data structures, it is
required to use the type in defining itself. In Python, it
wouldn't be possible to it that and way and Reference solves
that issues.
BinOp = Literal("+") | Literal("*")
Expr = Reference()
Expr.set(
Integer
| Record({"left": Expr, "op": BinOp, "right": Expr})
)
print(Expr.valid(1)) # True
print(Expr.valid({"left": 1, "op": "+", "right": 2})) # True
print(Expr.valid({
"left": 1,
"op": "+",
"right": {
"left": 2,
"op": "*",
"right": 3
}})) # True
"""
def __init__(self, label=None):
self.type_ = None
self.label = label
def set(self, type_):
self.type_ = type_
def __irshift__(self, type_):
self.type_ = type_
return self
def valid(self, value):
if not self.type_:
raise Exception("Undefined Reference: " + self.label or "<Unnamed>")
return self.type_.valid(value)
def __repr__(self):
if self.label:
return self.label
if self.type_:
return repr(self.type_)
else:
return "Reference()"
String = SimpleType(str, label="String")
Integer = SimpleType(int, label="Integer")
Float = SimpleType(float, label="Float")
Boolean = SimpleType(bool, label="Boolean")
Nothing = SimpleType(type(None), label="Nothing")
Any = AnyType()
# It is more natural to call it a Type when declaring it.
Type = Reference
_methods = {}
class MultiMethod:
"""MultiMethod implements function polymorphism based on the
type of the data.
See the method decorator for more details.
"""
def __init__(self, name):
self.name = name
self._methods = []
self.nargs = -1
def add_method(self, method):
specs = inspect.getfullargspec(method)
if specs.varargs or specs.varargs or specs.kwonlyargs:
raise Exception("hyptertype methods supports only simple arguments. varargs, kwargs etc. are not supported.")
if self.nargs >= 0 and self.nargs != len(specs.args):
raise Exception(
"Method {} is expected to have {} args. Found {}.".format(
self.name, self.nargs, len(specs.args)))
argtypes = [specs.annotations.get(a, Any) for a in specs.args]
t = Tuple(*argtypes)
self._methods.append((t, method))
self.nargs = len(specs.args)
def __call__(self, *args):
if len(args) != self.nargs:
raise TypeError(
"method {} expects {} args, given {}".format(
self.name,
self.nargs,
len(args)))
for t, method in self._methods:
valid = t.valid(args)
if valid:
return method(*args)
raise ValueError("Unable to find a matching method for {}".format(self.name))
def __repr__(self):
return "Method:{}".format(self.name)
def method(f):
"""Decorator to mark a function as a hypertype method.
Hypertype method implements multiple-dispatch or function polymorphism
based on the type of the arguments. The types of the arguments are
specified using the function annotations.
This is some what like the pattern-matching in Haskell as we the types
are nothing but the shape of the data.
@method
def display(n Integer):
print(n, "is an integer")
@method
def display(s String):
print(s, "is a string")
display(42) # 42 is an integer
display("Magic") # Magic is a string
"""
m = _methods.setdefault(f.__name__, MultiMethod(f.__name__))
m.add_method(f)
return m
def nested_apply(value, method):
if isinstance(value, list):
return [method(v) for v in value]
elif isinstance(value, dict):
return {k: method(v) for k, v in value.items()}
else:
return method(value) | 0.744378 | 0.452536 |
from datetime import date, timedelta
from workalendar.core import WesternCalendar, ChristianMixin
from workalendar.core import SUN, MON, THU, SAT
class UnitedStatesCalendar(WesternCalendar, ChristianMixin):
"USA calendar"
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(7, 4, 'Independence Day'),
(11, 11, 'Veterans Day'),
)
@staticmethod
def is_presidential_year(year):
return (year % 4) == 0
def get_variable_days(self, year):
# usual variable days
days = super(UnitedStatesCalendar, self).get_variable_days(year)
days += [
(WesternCalendar.get_nth_weekday_in_month(year, 1, MON, 3),
'<NAME>, J<NAME>'),
(WesternCalendar.get_nth_weekday_in_month(year, 2, MON, 3),
"Washington's Birthday"),
(WesternCalendar.get_last_weekday_in_month(year, 5, MON),
"Memorial Day"),
(WesternCalendar.get_nth_weekday_in_month(year, 9, MON),
"Labor Day"),
(WesternCalendar.get_nth_weekday_in_month(year, 10, MON, 2),
"Colombus Day"),
(WesternCalendar.get_nth_weekday_in_month(year, 11, THU, 4),
"Thanksgiving Day"),
]
# Inauguration day
if UnitedStatesCalendar.is_presidential_year(year - 1):
inauguration_day = date(year, 1, 20)
if inauguration_day.weekday() == SUN:
inauguration_day = date(year, 1, 21)
days.append((inauguration_day, "Inauguration Day"))
return days
class BrazilCalendar(WesternCalendar, ChristianMixin):
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(4, 21, "Tiradentes' Day"),
(5, 1, "Labour Day"),
(9, 7, "Independance Day"),
(10, 12, "Our Lady of Aparecida"),
(11, 2, "All Souls' Day"),
(11, 15, "Republic Day"),
)
class BrazilSaoPaoloStateCalendar(BrazilCalendar):
FIXED_HOLIDAYS = BrazilCalendar.FIXED_HOLIDAYS + (
(7, 9, "Constitutional Revolution of 1932"),
)
class BrazilSaoPaoloCityCalendar(BrazilSaoPaoloStateCalendar):
FIXED_HOLIDAYS = BrazilSaoPaoloStateCalendar.FIXED_HOLIDAYS + (
(1, 25, "Aniversary of the city of São Paulo"),
(11, 20, "Dia da Consciência Negra")
)
include_easter_sunday = True
def get_corpus_christi(self, year):
return self.get_easter_sunday(year) + timedelta(days=60)
def get_carnaval(self, year):
return self.get_easter_sunday(year) - timedelta(days=47)
def get_variable_days(self, year):
days = super(BrazilSaoPaoloCityCalendar, self).get_variable_days(year)
days.append((self.get_carnaval(year), "Carnaval"))
days.append((self.get_good_friday(year), "Sexta-feira da Paixão"))
days.append((self.get_corpus_christi(year), "Corpus Christi"))
return days
class MexicoCalendar(WesternCalendar, ChristianMixin):
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(5, 1, "Labour Day"),
(9, 16, "Independance Day"),
)
def get_variable_days(self, year):
days = super(MexicoCalendar, self).get_variable_days(year)
days.append(
(MexicoCalendar.get_nth_weekday_in_month(year, 2, MON),
"Constitution Day"))
days.append(
(MexicoCalendar.get_nth_weekday_in_month(year, 3, MON, 3),
"<NAME>'s birthday"))
days.append(
(MexicoCalendar.get_nth_weekday_in_month(year, 11, MON, 3),
"Revolution Day"))
return days
def get_calendar_holidays(self, year):
days = super(MexicoCalendar, self).get_calendar_holidays(year)
# If any statutory day is on Sunday, the monday is off
# If it's on a Saturday, the Friday is off
for day, label in days:
if day.weekday() == SAT:
days.append((day - timedelta(days=1), "%s substitute" % label))
elif day.weekday() == SUN:
days.append((day + timedelta(days=1), "%s substitute" % label))
# Extra: if new year's day is a saturday, the friday before is off
next_new_year = date(year + 1, 1, 1)
if next_new_year.weekday():
days.append((date(year, 12, 31), "New Year Day substitute"))
return days | workalendar/america.py | from datetime import date, timedelta
from workalendar.core import WesternCalendar, ChristianMixin
from workalendar.core import SUN, MON, THU, SAT
class UnitedStatesCalendar(WesternCalendar, ChristianMixin):
"USA calendar"
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(7, 4, 'Independence Day'),
(11, 11, 'Veterans Day'),
)
@staticmethod
def is_presidential_year(year):
return (year % 4) == 0
def get_variable_days(self, year):
# usual variable days
days = super(UnitedStatesCalendar, self).get_variable_days(year)
days += [
(WesternCalendar.get_nth_weekday_in_month(year, 1, MON, 3),
'<NAME>, J<NAME>'),
(WesternCalendar.get_nth_weekday_in_month(year, 2, MON, 3),
"Washington's Birthday"),
(WesternCalendar.get_last_weekday_in_month(year, 5, MON),
"Memorial Day"),
(WesternCalendar.get_nth_weekday_in_month(year, 9, MON),
"Labor Day"),
(WesternCalendar.get_nth_weekday_in_month(year, 10, MON, 2),
"Colombus Day"),
(WesternCalendar.get_nth_weekday_in_month(year, 11, THU, 4),
"Thanksgiving Day"),
]
# Inauguration day
if UnitedStatesCalendar.is_presidential_year(year - 1):
inauguration_day = date(year, 1, 20)
if inauguration_day.weekday() == SUN:
inauguration_day = date(year, 1, 21)
days.append((inauguration_day, "Inauguration Day"))
return days
class BrazilCalendar(WesternCalendar, ChristianMixin):
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(4, 21, "Tiradentes' Day"),
(5, 1, "Labour Day"),
(9, 7, "Independance Day"),
(10, 12, "Our Lady of Aparecida"),
(11, 2, "All Souls' Day"),
(11, 15, "Republic Day"),
)
class BrazilSaoPaoloStateCalendar(BrazilCalendar):
FIXED_HOLIDAYS = BrazilCalendar.FIXED_HOLIDAYS + (
(7, 9, "Constitutional Revolution of 1932"),
)
class BrazilSaoPaoloCityCalendar(BrazilSaoPaoloStateCalendar):
FIXED_HOLIDAYS = BrazilSaoPaoloStateCalendar.FIXED_HOLIDAYS + (
(1, 25, "Aniversary of the city of São Paulo"),
(11, 20, "Dia da Consciência Negra")
)
include_easter_sunday = True
def get_corpus_christi(self, year):
return self.get_easter_sunday(year) + timedelta(days=60)
def get_carnaval(self, year):
return self.get_easter_sunday(year) - timedelta(days=47)
def get_variable_days(self, year):
days = super(BrazilSaoPaoloCityCalendar, self).get_variable_days(year)
days.append((self.get_carnaval(year), "Carnaval"))
days.append((self.get_good_friday(year), "Sexta-feira da Paixão"))
days.append((self.get_corpus_christi(year), "Corpus Christi"))
return days
class MexicoCalendar(WesternCalendar, ChristianMixin):
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(5, 1, "Labour Day"),
(9, 16, "Independance Day"),
)
def get_variable_days(self, year):
days = super(MexicoCalendar, self).get_variable_days(year)
days.append(
(MexicoCalendar.get_nth_weekday_in_month(year, 2, MON),
"Constitution Day"))
days.append(
(MexicoCalendar.get_nth_weekday_in_month(year, 3, MON, 3),
"<NAME>'s birthday"))
days.append(
(MexicoCalendar.get_nth_weekday_in_month(year, 11, MON, 3),
"Revolution Day"))
return days
def get_calendar_holidays(self, year):
days = super(MexicoCalendar, self).get_calendar_holidays(year)
# If any statutory day is on Sunday, the monday is off
# If it's on a Saturday, the Friday is off
for day, label in days:
if day.weekday() == SAT:
days.append((day - timedelta(days=1), "%s substitute" % label))
elif day.weekday() == SUN:
days.append((day + timedelta(days=1), "%s substitute" % label))
# Extra: if new year's day is a saturday, the friday before is off
next_new_year = date(year + 1, 1, 1)
if next_new_year.weekday():
days.append((date(year, 12, 31), "New Year Day substitute"))
return days | 0.506591 | 0.266715 |
import math
import traceback
from pathlib import Path
from typing import Any, Callable, Iterator, List, NoReturn, Optional, Union
from seutil import IOUtils, LoggingUtils
from tqdm import tqdm
logger = LoggingUtils.get_logger(__name__)
class FilesManager:
"""
Handles the loading/dumping of files in a dataset.
"""
ALL_LEMMAS_BACKEND_SEXP_TRANSFORMATIONS = "all-lemmas-bsexp-transformations"
ALL_LEMMAS_FOREEND_SEXP_TRANSFORMATIONS = "all-lemmas-fsexp-transformations"
COQ_DOCUMENTS = "coq-documents"
LEMMAS = "lemmas"
LEMMAS_BACKEND_SEXP_TRANSFORMATIONS = "lemmas-bsexp-transformations"
LEMMAS_FILTERED = "lemmas-filtered"
LEMMAS_FOREEND_SEXP_TRANSFORMATIONS = "lemmas-fsexp-transformations"
DATA_INDEXES = "data-indexes"
RAW_FILES = "raw-files"
ORIGINAL_FILES = "original-files"
DEFINITIONS = "definitions"
def __init__(self, data_dir: Path):
self.data_dir = data_dir
self.data_dir.mkdir(parents=True, exist_ok=True)
return
def clean_path(self, rel_path: Union[str, List[str]]):
abs_path = self.data_dir / self.assemble_rel_path(rel_path)
if abs_path.exists():
logger.info(f"Removing existing things at {abs_path}")
IOUtils.rm(abs_path)
# end if
return
@classmethod
def is_json_format(cls, fmt: IOUtils.Format) -> bool:
return fmt in [IOUtils.Format.json, IOUtils.Format.jsonPretty, IOUtils.Format.jsonNoSort]
def dump_data(
self,
rel_path: Union[str, List[str]],
data: Any,
fmt: IOUtils.Format,
is_batched: bool = False,
per_batch: int = 100,
exist_ok: bool = False,
):
abs_path = self.data_dir / self.assemble_rel_path(rel_path)
if abs_path.exists() and not exist_ok:
raise IOError(f"Cannot rewrite existing data at {abs_path}")
abs_path.parent.mkdir(parents=True, exist_ok=True)
if not is_batched:
if self.is_json_format(fmt):
data = IOUtils.jsonfy(data)
IOUtils.dump(abs_path, data, fmt)
else:
# In batched mode, the data need to be slice-able and sizable
IOUtils.rm(abs_path)
abs_path.mkdir(parents=True)
for batch_i in tqdm(range(math.ceil(len(data) / per_batch))):
data_batch = data[per_batch * batch_i: per_batch * (batch_i + 1)]
if self.is_json_format(fmt):
data_batch = IOUtils.jsonfy(data_batch)
IOUtils.dump(abs_path / f"batch-{batch_i}.{fmt.get_extension()}", data_batch, fmt)
return
def load_data(
self,
rel_path: Union[str, List[str]],
fmt: IOUtils.Format,
is_batched: bool = False,
clz=None,
) -> Any:
if self.is_json_format(fmt) and clz is None:
logger.warning(f"Load data from {rel_path} with json format, but did not specify clz (at {traceback.format_stack()})")
abs_path = self.data_dir / self.assemble_rel_path(rel_path)
if not abs_path.exists():
raise IOError(f"Cannot find data at {abs_path}")
if not is_batched:
data = IOUtils.load(abs_path, fmt)
if self.is_json_format(fmt) and clz is not None:
data = IOUtils.dejsonfy(data, clz)
return data
else:
data = list()
batch_numbers = sorted([int(str(f.stem).split("-")[1]) for f in abs_path.iterdir()])
for batch_number in tqdm(batch_numbers):
batch_file = abs_path / f"batch-{batch_number}.{fmt.get_extension()}"
data_batch = IOUtils.load(batch_file, fmt)
if self.is_json_format(fmt) and clz is not None:
data_batch = IOUtils.dejsonfy(data_batch, clz)
data.extend(data_batch)
return data
def iter_batched_data(
self,
rel_path: Union[str, List[str]],
fmt: IOUtils.Format,
clz=None,
) -> Iterator:
if self.is_json_format(fmt) and clz is None:
logger.warning(f"Load data from {rel_path} with json format, but did not specify clz")
abs_path = self.data_dir / self.assemble_rel_path(rel_path)
if not abs_path.exists():
raise IOError(f"Cannot find data at {abs_path}")
batch_numbers = sorted([int(str(f.stem).split("-")[1]) for f in abs_path.iterdir()])
for batch_number in batch_numbers:
batch_file = abs_path / f"batch-{batch_number}.{fmt.get_extension()}"
for data_entry in IOUtils.load(batch_file, fmt):
if self.is_json_format(fmt) and clz is not None:
data_entry = IOUtils.dejsonfy(data_entry, clz)
# end if
yield data_entry
def dump_ckpt(
self,
rel_path: Union[str, List[str]],
obj: Any,
ckpt_id: int,
dump_func: Callable[[Any, str], NoReturn],
ckpt_keep_max: int = 5,
) -> NoReturn:
abs_path = self.data_dir / self.assemble_rel_path(rel_path)
abs_path.mkdir(parents=True, exist_ok=True)
ckpt_file_name = str(abs_path / str(ckpt_id))
dump_func(obj, ckpt_file_name)
# Remove older checkpoints
if ckpt_keep_max != -1:
ckpt_ids = [int(str(f.name)) for f in abs_path.iterdir()]
for ckpt_id in sorted(ckpt_ids)[:-ckpt_keep_max]:
IOUtils.rm(abs_path / str(ckpt_id))
return
def load_ckpt(
self,
rel_path: Union[str, List[str]],
load_func: Callable[[str], Any],
ckpt_id: Optional[int] = None,
) -> Any:
abs_path = self.data_dir / self.assemble_rel_path(rel_path)
if not abs_path.exists():
raise IOError(f"Cannot find data at {abs_path}")
if ckpt_id is None:
# Find the latest ckpt
ckpt_ids = [int(str(f.name)) for f in abs_path.iterdir()]
ckpt_id = max(ckpt_ids)
logger.info(f"Loading the latest checkpoint {ckpt_id} at {abs_path}")
return load_func(str(abs_path / str(ckpt_id)))
def resolve(self, rel_path: Union[str, List[str]]) -> Path:
return self.data_dir / self.assemble_rel_path(rel_path)
@classmethod
def assemble_rel_path(cls, rel_path: Union[str, List[str]]) -> str:
if not isinstance(rel_path, str):
rel_path = "/".join(rel_path)
return rel_path | roosterize/FilesManager.py | import math
import traceback
from pathlib import Path
from typing import Any, Callable, Iterator, List, NoReturn, Optional, Union
from seutil import IOUtils, LoggingUtils
from tqdm import tqdm
logger = LoggingUtils.get_logger(__name__)
class FilesManager:
"""
Handles the loading/dumping of files in a dataset.
"""
ALL_LEMMAS_BACKEND_SEXP_TRANSFORMATIONS = "all-lemmas-bsexp-transformations"
ALL_LEMMAS_FOREEND_SEXP_TRANSFORMATIONS = "all-lemmas-fsexp-transformations"
COQ_DOCUMENTS = "coq-documents"
LEMMAS = "lemmas"
LEMMAS_BACKEND_SEXP_TRANSFORMATIONS = "lemmas-bsexp-transformations"
LEMMAS_FILTERED = "lemmas-filtered"
LEMMAS_FOREEND_SEXP_TRANSFORMATIONS = "lemmas-fsexp-transformations"
DATA_INDEXES = "data-indexes"
RAW_FILES = "raw-files"
ORIGINAL_FILES = "original-files"
DEFINITIONS = "definitions"
def __init__(self, data_dir: Path):
self.data_dir = data_dir
self.data_dir.mkdir(parents=True, exist_ok=True)
return
def clean_path(self, rel_path: Union[str, List[str]]):
abs_path = self.data_dir / self.assemble_rel_path(rel_path)
if abs_path.exists():
logger.info(f"Removing existing things at {abs_path}")
IOUtils.rm(abs_path)
# end if
return
@classmethod
def is_json_format(cls, fmt: IOUtils.Format) -> bool:
return fmt in [IOUtils.Format.json, IOUtils.Format.jsonPretty, IOUtils.Format.jsonNoSort]
def dump_data(
self,
rel_path: Union[str, List[str]],
data: Any,
fmt: IOUtils.Format,
is_batched: bool = False,
per_batch: int = 100,
exist_ok: bool = False,
):
abs_path = self.data_dir / self.assemble_rel_path(rel_path)
if abs_path.exists() and not exist_ok:
raise IOError(f"Cannot rewrite existing data at {abs_path}")
abs_path.parent.mkdir(parents=True, exist_ok=True)
if not is_batched:
if self.is_json_format(fmt):
data = IOUtils.jsonfy(data)
IOUtils.dump(abs_path, data, fmt)
else:
# In batched mode, the data need to be slice-able and sizable
IOUtils.rm(abs_path)
abs_path.mkdir(parents=True)
for batch_i in tqdm(range(math.ceil(len(data) / per_batch))):
data_batch = data[per_batch * batch_i: per_batch * (batch_i + 1)]
if self.is_json_format(fmt):
data_batch = IOUtils.jsonfy(data_batch)
IOUtils.dump(abs_path / f"batch-{batch_i}.{fmt.get_extension()}", data_batch, fmt)
return
def load_data(
self,
rel_path: Union[str, List[str]],
fmt: IOUtils.Format,
is_batched: bool = False,
clz=None,
) -> Any:
if self.is_json_format(fmt) and clz is None:
logger.warning(f"Load data from {rel_path} with json format, but did not specify clz (at {traceback.format_stack()})")
abs_path = self.data_dir / self.assemble_rel_path(rel_path)
if not abs_path.exists():
raise IOError(f"Cannot find data at {abs_path}")
if not is_batched:
data = IOUtils.load(abs_path, fmt)
if self.is_json_format(fmt) and clz is not None:
data = IOUtils.dejsonfy(data, clz)
return data
else:
data = list()
batch_numbers = sorted([int(str(f.stem).split("-")[1]) for f in abs_path.iterdir()])
for batch_number in tqdm(batch_numbers):
batch_file = abs_path / f"batch-{batch_number}.{fmt.get_extension()}"
data_batch = IOUtils.load(batch_file, fmt)
if self.is_json_format(fmt) and clz is not None:
data_batch = IOUtils.dejsonfy(data_batch, clz)
data.extend(data_batch)
return data
def iter_batched_data(
self,
rel_path: Union[str, List[str]],
fmt: IOUtils.Format,
clz=None,
) -> Iterator:
if self.is_json_format(fmt) and clz is None:
logger.warning(f"Load data from {rel_path} with json format, but did not specify clz")
abs_path = self.data_dir / self.assemble_rel_path(rel_path)
if not abs_path.exists():
raise IOError(f"Cannot find data at {abs_path}")
batch_numbers = sorted([int(str(f.stem).split("-")[1]) for f in abs_path.iterdir()])
for batch_number in batch_numbers:
batch_file = abs_path / f"batch-{batch_number}.{fmt.get_extension()}"
for data_entry in IOUtils.load(batch_file, fmt):
if self.is_json_format(fmt) and clz is not None:
data_entry = IOUtils.dejsonfy(data_entry, clz)
# end if
yield data_entry
def dump_ckpt(
self,
rel_path: Union[str, List[str]],
obj: Any,
ckpt_id: int,
dump_func: Callable[[Any, str], NoReturn],
ckpt_keep_max: int = 5,
) -> NoReturn:
abs_path = self.data_dir / self.assemble_rel_path(rel_path)
abs_path.mkdir(parents=True, exist_ok=True)
ckpt_file_name = str(abs_path / str(ckpt_id))
dump_func(obj, ckpt_file_name)
# Remove older checkpoints
if ckpt_keep_max != -1:
ckpt_ids = [int(str(f.name)) for f in abs_path.iterdir()]
for ckpt_id in sorted(ckpt_ids)[:-ckpt_keep_max]:
IOUtils.rm(abs_path / str(ckpt_id))
return
def load_ckpt(
self,
rel_path: Union[str, List[str]],
load_func: Callable[[str], Any],
ckpt_id: Optional[int] = None,
) -> Any:
abs_path = self.data_dir / self.assemble_rel_path(rel_path)
if not abs_path.exists():
raise IOError(f"Cannot find data at {abs_path}")
if ckpt_id is None:
# Find the latest ckpt
ckpt_ids = [int(str(f.name)) for f in abs_path.iterdir()]
ckpt_id = max(ckpt_ids)
logger.info(f"Loading the latest checkpoint {ckpt_id} at {abs_path}")
return load_func(str(abs_path / str(ckpt_id)))
def resolve(self, rel_path: Union[str, List[str]]) -> Path:
return self.data_dir / self.assemble_rel_path(rel_path)
@classmethod
def assemble_rel_path(cls, rel_path: Union[str, List[str]]) -> str:
if not isinstance(rel_path, str):
rel_path = "/".join(rel_path)
return rel_path | 0.78345 | 0.163646 |
import numpy as np
import itertools as it
import random, sys
import scipy.stats as stats
data_dir1 = '/Users/chloe/Documents/Yichen/output_global_compcorr_pc3_v3/'
data_dir2 = '/Users/chloe/Documents/Yichen/output_nondenoise_pc3_v3/'
all_subjects = ['sub-01', 'sub-02', 'sub-04', 'sub-05', 'sub-09', 'sub-15', 'sub-16', 'sub-17', 'sub-18', 'sub-19', 'sub-20']
all_masks = ['rOFA', 'rFFA', 'rATL', 'rSTS', 'rTOS', 'rPPA', 'rPC']
num_sets = 100 # number of iteration for bootstrapping
print(data_dir1)
# get means of within varexp
data_within1 = 0
data_within2 = 0
for sub_index in range(0, len(all_subjects)):
subject = all_subjects[sub_index]
sub_dir1 = data_dir1 + subject + '_to_' + subject + '/'
data_within_dir1 = sub_dir1 + subject + '_to_' + subject + '_raw_ratio_chart.npy'
data_within1 += np.mean(np.load(data_within_dir1))
sub_dir2 = data_dir2 + subject + '_to_' + subject + '/'
data_within_dir2 = sub_dir2 + subject + '_to_' + subject + '_raw_ratio_chart.npy'
data_within2 += np.mean(np.load(data_within_dir2))
data_within1 = data_within1 / len(all_subjects)
data_within2 = data_within2 / len(all_subjects)
'''
Matched t-test
delta_varexp compcorr - delta_varexp nodenoise
'''
print('----------------- Matched t-test ------------------')
# bootstrapping for std and mean
meanD = [] # mean of difference
stdD = [] # std of difference
std1 = [] # seperately calculate std for data 1
def pop_random(lst):
idx = random.randrange(0, len(lst))
return lst.pop(idx)
# Pick several sets/iterations of 5 non-overlapping pairs of participants
for i in range(0, num_sets):
values1 = []
values2 = []
# pick 5 pairs from data1
sublist = all_subjects.copy()
while len(sublist)>=3:
rand1 = pop_random(sublist)
rand2 = pop_random(sublist)
value1 = np.mean(np.load(data_dir1+rand1+'_to_'+rand1+'/'+\
rand1+'_to_'+rand1+'_raw_ratio_chart.npy') - \
np.load(data_dir1+rand1+'_to_'+rand2+'/'\
+rand1+'_to_'+rand2+'_raw_ratio_chart.npy')) # within-cross
values1.append(value1)
# apply the same 5 pairs to the second data folder
value2 = np.mean(np.load(data_dir2+rand1+'_to_'+rand1+'/'+\
rand1+'_to_'+rand1+'_raw_ratio_chart.npy') - \
np.load(data_dir2+rand1+'_to_'+rand2+'/'\
+rand1+'_to_'+rand2+'_raw_ratio_chart.npy')) # within-cross
values2.append(value2)
# Calculate mean and std deviation for this 5-pairs
meanD.append(np.mean(np.array(values1) - np.array(values2)))
stdD.append(np.std(np.array(values1) - np.array(values2), ddof=1))
std1.append(np.std(values1, ddof=1))
# Average the means and std deviations for all iterations of bootstrapping
meanD = np.mean(meanD)
stdD = np.mean(stdD)
std1 = np.mean(std1)
print('std1 = %f' % std1)
print('meanD = %f, stdD = %f' % (meanD, stdD))
# direct difference method
print('----- DF=4 -----')
n = 5
t = meanD / (stdD / np.sqrt(n))
df = 4
pval = stats.t.sf(np.abs(t), df)*2 # two-sided pvalue = Prob(abs(t)>t)
print('t = %f' % t)
print('Df=4, pval =', pval)
print('----- DF=10 -----')
# direct difference method
n = 5
t = meanD / (stdD / np.sqrt(n))
df = 10
pval = stats.t.sf(np.abs(t), df)*2 # two-sided pvalue = Prob(abs(t)>t)
print('t = %f' % t)
print('Df=10, pval =', pval) | revision/t_test.py | import numpy as np
import itertools as it
import random, sys
import scipy.stats as stats
data_dir1 = '/Users/chloe/Documents/Yichen/output_global_compcorr_pc3_v3/'
data_dir2 = '/Users/chloe/Documents/Yichen/output_nondenoise_pc3_v3/'
all_subjects = ['sub-01', 'sub-02', 'sub-04', 'sub-05', 'sub-09', 'sub-15', 'sub-16', 'sub-17', 'sub-18', 'sub-19', 'sub-20']
all_masks = ['rOFA', 'rFFA', 'rATL', 'rSTS', 'rTOS', 'rPPA', 'rPC']
num_sets = 100 # number of iteration for bootstrapping
print(data_dir1)
# get means of within varexp
data_within1 = 0
data_within2 = 0
for sub_index in range(0, len(all_subjects)):
subject = all_subjects[sub_index]
sub_dir1 = data_dir1 + subject + '_to_' + subject + '/'
data_within_dir1 = sub_dir1 + subject + '_to_' + subject + '_raw_ratio_chart.npy'
data_within1 += np.mean(np.load(data_within_dir1))
sub_dir2 = data_dir2 + subject + '_to_' + subject + '/'
data_within_dir2 = sub_dir2 + subject + '_to_' + subject + '_raw_ratio_chart.npy'
data_within2 += np.mean(np.load(data_within_dir2))
data_within1 = data_within1 / len(all_subjects)
data_within2 = data_within2 / len(all_subjects)
'''
Matched t-test
delta_varexp compcorr - delta_varexp nodenoise
'''
print('----------------- Matched t-test ------------------')
# bootstrapping for std and mean
meanD = [] # mean of difference
stdD = [] # std of difference
std1 = [] # seperately calculate std for data 1
def pop_random(lst):
idx = random.randrange(0, len(lst))
return lst.pop(idx)
# Pick several sets/iterations of 5 non-overlapping pairs of participants
for i in range(0, num_sets):
values1 = []
values2 = []
# pick 5 pairs from data1
sublist = all_subjects.copy()
while len(sublist)>=3:
rand1 = pop_random(sublist)
rand2 = pop_random(sublist)
value1 = np.mean(np.load(data_dir1+rand1+'_to_'+rand1+'/'+\
rand1+'_to_'+rand1+'_raw_ratio_chart.npy') - \
np.load(data_dir1+rand1+'_to_'+rand2+'/'\
+rand1+'_to_'+rand2+'_raw_ratio_chart.npy')) # within-cross
values1.append(value1)
# apply the same 5 pairs to the second data folder
value2 = np.mean(np.load(data_dir2+rand1+'_to_'+rand1+'/'+\
rand1+'_to_'+rand1+'_raw_ratio_chart.npy') - \
np.load(data_dir2+rand1+'_to_'+rand2+'/'\
+rand1+'_to_'+rand2+'_raw_ratio_chart.npy')) # within-cross
values2.append(value2)
# Calculate mean and std deviation for this 5-pairs
meanD.append(np.mean(np.array(values1) - np.array(values2)))
stdD.append(np.std(np.array(values1) - np.array(values2), ddof=1))
std1.append(np.std(values1, ddof=1))
# Average the means and std deviations for all iterations of bootstrapping
meanD = np.mean(meanD)
stdD = np.mean(stdD)
std1 = np.mean(std1)
print('std1 = %f' % std1)
print('meanD = %f, stdD = %f' % (meanD, stdD))
# direct difference method
print('----- DF=4 -----')
n = 5
t = meanD / (stdD / np.sqrt(n))
df = 4
pval = stats.t.sf(np.abs(t), df)*2 # two-sided pvalue = Prob(abs(t)>t)
print('t = %f' % t)
print('Df=4, pval =', pval)
print('----- DF=10 -----')
# direct difference method
n = 5
t = meanD / (stdD / np.sqrt(n))
df = 10
pval = stats.t.sf(np.abs(t), df)*2 # two-sided pvalue = Prob(abs(t)>t)
print('t = %f' % t)
print('Df=10, pval =', pval) | 0.249813 | 0.293009 |
import argparse
import pandas as pd
import shutil, os
import time
def die():
print """Usage: python main.py --gmbrc /home/USER/.config/gmusicbrowser/gmbrc --lastdb /PATH/TO/scrobbles.tsv"""
exit(1)
def backup_gmbrc(gmbrc):
shutil.copy2(gmbrc, os.path.join('.', gmbrc + 'backup' + str(int(time.time()))))
def find_data_start(gmbrc):
h = open('gmbrc_header', 'w')
with open(gmbrc) as gmb:
for i, line in enumerate(gmb):
if line.rstrip() == '[Songs]':
gmb.close()
h.write(line)
h.close()
return i+1
else:
h.write(line)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--gmbrc', help='gmusicbrowser database file where your music libary is stored')
parser.add_argument('--lastdb', help='scrobbles.tsv file exported from your last.fm account')
args = parser.parse_args()
gmbrc = args.gmbrc
lastdb = args.lastdb
if not gmbrc or not lastdb:
die()
backup_gmbrc(gmbrc)
gmb_header_end = find_data_start(gmbrc)
gmb_library = pd.read_csv(gmbrc, sep='\t', header=1, skiprows=gmb_header_end, index_col=0)
last_data = pd.read_csv(lastdb, sep='\t')
last_data['playcount'] = 0
last_grouped = last_data['playcount'].\
groupby([last_data['uncorrected artist name'],
last_data['album name'],
last_data['uncorrected track name']]).count()
hits = 0
for id in last_grouped.index:
mask = (gmb_library['artist'].str.lower() == id[0].lower()) & (gmb_library['album'].str.lower() == id[1].lower()) & (gmb_library['title'].str.lower() == id[2].lower())
if not gmb_library[mask].empty:
value = gmb_library.loc[mask, 'playcount']
if value.iloc[0] < last_grouped.ix[id]:
gmb_library.loc[mask, 'playcount'] = last_grouped.ix[id]
hits += last_grouped.ix[id]
else:
print "fail:", id
print "matched ", hits, " scrobbles in your gmb library with last.fm export data (consisting of ", len(last_data.index), " scrobbles)"
gmb_library.to_csv('gmbrc_dump', sep='\t', float_format='%.0f', index_label=False)
os.remove(gmbrc)
destination = open(gmbrc, 'wb')
shutil.copyfileobj(open('gmbrc_header', 'rb'), destination)
shutil.copyfileobj(open('gmbrc_dump', 'rb'), destination)
os.remove('gmbrc_header')
os.remove('gmbrc_dump')
if __name__ == '__main__':
main() | src/main.py | import argparse
import pandas as pd
import shutil, os
import time
def die():
print """Usage: python main.py --gmbrc /home/USER/.config/gmusicbrowser/gmbrc --lastdb /PATH/TO/scrobbles.tsv"""
exit(1)
def backup_gmbrc(gmbrc):
shutil.copy2(gmbrc, os.path.join('.', gmbrc + 'backup' + str(int(time.time()))))
def find_data_start(gmbrc):
h = open('gmbrc_header', 'w')
with open(gmbrc) as gmb:
for i, line in enumerate(gmb):
if line.rstrip() == '[Songs]':
gmb.close()
h.write(line)
h.close()
return i+1
else:
h.write(line)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--gmbrc', help='gmusicbrowser database file where your music libary is stored')
parser.add_argument('--lastdb', help='scrobbles.tsv file exported from your last.fm account')
args = parser.parse_args()
gmbrc = args.gmbrc
lastdb = args.lastdb
if not gmbrc or not lastdb:
die()
backup_gmbrc(gmbrc)
gmb_header_end = find_data_start(gmbrc)
gmb_library = pd.read_csv(gmbrc, sep='\t', header=1, skiprows=gmb_header_end, index_col=0)
last_data = pd.read_csv(lastdb, sep='\t')
last_data['playcount'] = 0
last_grouped = last_data['playcount'].\
groupby([last_data['uncorrected artist name'],
last_data['album name'],
last_data['uncorrected track name']]).count()
hits = 0
for id in last_grouped.index:
mask = (gmb_library['artist'].str.lower() == id[0].lower()) & (gmb_library['album'].str.lower() == id[1].lower()) & (gmb_library['title'].str.lower() == id[2].lower())
if not gmb_library[mask].empty:
value = gmb_library.loc[mask, 'playcount']
if value.iloc[0] < last_grouped.ix[id]:
gmb_library.loc[mask, 'playcount'] = last_grouped.ix[id]
hits += last_grouped.ix[id]
else:
print "fail:", id
print "matched ", hits, " scrobbles in your gmb library with last.fm export data (consisting of ", len(last_data.index), " scrobbles)"
gmb_library.to_csv('gmbrc_dump', sep='\t', float_format='%.0f', index_label=False)
os.remove(gmbrc)
destination = open(gmbrc, 'wb')
shutil.copyfileobj(open('gmbrc_header', 'rb'), destination)
shutil.copyfileobj(open('gmbrc_dump', 'rb'), destination)
os.remove('gmbrc_header')
os.remove('gmbrc_dump')
if __name__ == '__main__':
main() | 0.169612 | 0.079567 |
import unittest
import numpy as np
import sympy as sym
import matplotlib
import matplotlib.pyplot as plt
from fractpy.models import NewtonFractal
x = sym.Symbol("x")
i = sym.I
class TestNewtonFractal(unittest.TestCase):
"""Tests for the class Newton Fractal"""
def test_function_init(self):
func = "(x - 2)(x - 3*I)(x + 3*I)"
model = NewtonFractal(func)
# Test for function
model_func = model.function.function
test_func = (x - 2) * (x - 3 * i) * (x + 3 * i)
self.assertEqual(model_func, test_func)
# Test for roots_list
roots_list = set([2, 3j, -3j])
self.assertEqual(set(model.roots_list), roots_list)
def test_function_repr(self):
func = "x**3 - 2*x**2 -4"
model = NewtonFractal(func)
output = "### FractPy Model ###\nType: Newton Fractal\nFunction: \
x**3 - 2*x**2 - 4"
self.assertEqual(model.__repr__(), output)
def test_make_list(self):
func = "x**2 + 1"
model = NewtonFractal(func)
model._xvals = [i for i in range(3)]
model._yvals = [i for i in range(3)]
model._make_list()
zlist = set(
[
0 + 0j,
0 + 1j,
0 + 2j,
1 + 0j,
1 + 1j,
1 + 2j,
2 + 0j,
2 + 1j,
2 + 2j,
]
)
self.assertEqual(set(model._z_list), zlist)
def test_match_root(self):
func = "x**2 - 1"
model = NewtonFractal(func)
model._xvals = [i for i in range(3)]
model._yvals = [i for i in range(3)]
model._make_list()
rootid = model._match_root()
test_rootid = np.array([-1.0, -1.0, -1.0, 1.0, -1.0, -1.0, -1.0, -1.0, -1.0])
self.assertTrue((rootid == test_rootid).all())
def test_prepare_plot(self):
func = "x**3 - 1"
model = NewtonFractal(func)
model._width = 10
model._height = 10
data = model._prepare_plot(-2, 2, -2, 2)
test_data = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 2, 0, 0],
[1, 1, 1, 1, 1, 1, 2, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[2, 1, 1, 0, 1, 2, 0, 0, 0, 0],
[1, 2, 2, 0, 2, 1, 0, 0, 0, 0],
[2, 2, 2, 2, 2, 0, 0, 0, 0, 0],
[2, 2, 2, 2, 2, 2, 1, 0, 0, 0],
[2, 2, 2, 2, 2, 2, 2, 1, 0, 0],
[2, 2, 2, 2, 2, 2, 2, 2, 0, 0],
]
)
self.assertTrue((data == test_data).all())
def test_plot(self):
func = "2x**2 - 8"
model = NewtonFractal(func)
p = model.plot(-2, 2, -2, 2, (20, 10))
# Check dimensions
self.assertEqual(model._width, 20)
self.assertEqual(model._height, 10)
self.assertIsInstance(p, matplotlib.figure.Figure)
self.assertEqual(
p.axes[0].title.get_text(),
"Newton Fractal for $f(x) = 2 x^{2} - 8$",
)
plt.close(p)
def test_zoom_plot(self):
func = "x**3 - 1"
model = NewtonFractal(func)
p = model.zoom_plot(-2, 2, -2, 2, (10, 20))
self.assertEqual(model._width, 10)
self.assertEqual(model._height, 20)
self.assertIsInstance(p, matplotlib.figure.Figure)
self.assertEqual(
p.get_children()[-1].get_text(),
"Newton Fractal for $f(x) = x^{3} - 1$",
)
self.assertEqual(len(p.axes), 2)
self.assertEqual(
p.axes[1].title.get_text(),
"Zoom here",
)
plt.close(p) | tests/test_newton.py |
import unittest
import numpy as np
import sympy as sym
import matplotlib
import matplotlib.pyplot as plt
from fractpy.models import NewtonFractal
x = sym.Symbol("x")
i = sym.I
class TestNewtonFractal(unittest.TestCase):
"""Tests for the class Newton Fractal"""
def test_function_init(self):
func = "(x - 2)(x - 3*I)(x + 3*I)"
model = NewtonFractal(func)
# Test for function
model_func = model.function.function
test_func = (x - 2) * (x - 3 * i) * (x + 3 * i)
self.assertEqual(model_func, test_func)
# Test for roots_list
roots_list = set([2, 3j, -3j])
self.assertEqual(set(model.roots_list), roots_list)
def test_function_repr(self):
func = "x**3 - 2*x**2 -4"
model = NewtonFractal(func)
output = "### FractPy Model ###\nType: Newton Fractal\nFunction: \
x**3 - 2*x**2 - 4"
self.assertEqual(model.__repr__(), output)
def test_make_list(self):
func = "x**2 + 1"
model = NewtonFractal(func)
model._xvals = [i for i in range(3)]
model._yvals = [i for i in range(3)]
model._make_list()
zlist = set(
[
0 + 0j,
0 + 1j,
0 + 2j,
1 + 0j,
1 + 1j,
1 + 2j,
2 + 0j,
2 + 1j,
2 + 2j,
]
)
self.assertEqual(set(model._z_list), zlist)
def test_match_root(self):
func = "x**2 - 1"
model = NewtonFractal(func)
model._xvals = [i for i in range(3)]
model._yvals = [i for i in range(3)]
model._make_list()
rootid = model._match_root()
test_rootid = np.array([-1.0, -1.0, -1.0, 1.0, -1.0, -1.0, -1.0, -1.0, -1.0])
self.assertTrue((rootid == test_rootid).all())
def test_prepare_plot(self):
func = "x**3 - 1"
model = NewtonFractal(func)
model._width = 10
model._height = 10
data = model._prepare_plot(-2, 2, -2, 2)
test_data = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 2, 0, 0],
[1, 1, 1, 1, 1, 1, 2, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[2, 1, 1, 0, 1, 2, 0, 0, 0, 0],
[1, 2, 2, 0, 2, 1, 0, 0, 0, 0],
[2, 2, 2, 2, 2, 0, 0, 0, 0, 0],
[2, 2, 2, 2, 2, 2, 1, 0, 0, 0],
[2, 2, 2, 2, 2, 2, 2, 1, 0, 0],
[2, 2, 2, 2, 2, 2, 2, 2, 0, 0],
]
)
self.assertTrue((data == test_data).all())
def test_plot(self):
func = "2x**2 - 8"
model = NewtonFractal(func)
p = model.plot(-2, 2, -2, 2, (20, 10))
# Check dimensions
self.assertEqual(model._width, 20)
self.assertEqual(model._height, 10)
self.assertIsInstance(p, matplotlib.figure.Figure)
self.assertEqual(
p.axes[0].title.get_text(),
"Newton Fractal for $f(x) = 2 x^{2} - 8$",
)
plt.close(p)
def test_zoom_plot(self):
func = "x**3 - 1"
model = NewtonFractal(func)
p = model.zoom_plot(-2, 2, -2, 2, (10, 20))
self.assertEqual(model._width, 10)
self.assertEqual(model._height, 20)
self.assertIsInstance(p, matplotlib.figure.Figure)
self.assertEqual(
p.get_children()[-1].get_text(),
"Newton Fractal for $f(x) = x^{3} - 1$",
)
self.assertEqual(len(p.axes), 2)
self.assertEqual(
p.axes[1].title.get_text(),
"Zoom here",
)
plt.close(p) | 0.657758 | 0.590514 |
import os
from pxr import Usd, Sdf, UsdGeom
from avalon import io
class ParentPointcacheExporter(object):
def __init__(self, shot_name, parent_subset_name, frame_range=[]):
from reveries.common import get_frame_range
self.output_path = ""
self.children_data = []
self.shot_name = shot_name
self.parent_subset_name = parent_subset_name
# Check frame range
if frame_range:
self.frame_in, self.frame_out = frame_range
else:
self.frame_in, self.frame_out = get_frame_range.get(self.shot_name)
def _get_shot_id(self):
_filter = {"type": "asset", "name": self.shot_name}
self.shot_data = io.find_one(_filter)
def get_children_data(self):
self._get_shot_id()
_filter = {
"type": "subset",
"data.families": "reveries.pointcache.child.usd",
"parent": self.shot_data["_id"],
"data.parent_pointcache_name": self.parent_subset_name
}
# self.children_data = [s for s in io.find(_filter)]
self.children_data = [
s for s in io.find(_filter)
if s.get("data", {}).get("subsetGroup", "") not in ["Trash Bin"]]
return self.children_data
def export(self, output_dir):
from reveries.common import get_publish_files, get_fps
from reveries.common.usd.utils import get_UpAxis
if not self.children_data:
self.get_children_data()
stage = Usd.Stage.CreateInMemory()
UsdGeom.Xform.Define(stage, "/ROOT")
root_prim = stage.GetPrimAtPath('/ROOT')
# Set parent prim
parent_prim_name = "/ROOT/main"
UsdGeom.Xform.Define(stage, parent_prim_name)
main_prim = stage.GetPrimAtPath(parent_prim_name)
main_prim.GetReferences().SetReferences(
[Sdf.Reference("parent_pointcache_prim.usda")]
)
for child_data in self.children_data:
prim_name = "/ROOT/{}".format(child_data["name"].split(".")[-1])
UsdGeom.Xform.Define(stage, prim_name)
_prim = stage.GetPrimAtPath(prim_name)
_file = get_publish_files.get_files(
child_data["_id"], key="entryFileName").get('USD', "")
if _file:
_prim.GetReferences().SetReferences([Sdf.Reference(_file)])
# Set metadata
stage.SetDefaultPrim(root_prim)
stage.SetStartTimeCode(self.frame_in)
stage.SetEndTimeCode(self.frame_out)
stage.SetFramesPerSecond(get_fps())
stage.SetTimeCodesPerSecond(get_fps())
UsdGeom.SetStageUpAxis(stage, get_UpAxis(host="Maya"))
self.output_path = os.path.join(
output_dir, "pointcache_prim_tmp.usda").replace('\\', '/')
stage.GetRootLayer().Export(self.output_path)
# print stage.GetRootLayer().ExportToString()
print("Parent usd done: {}".format(self.output_path)) | reveries/maya/usd/parent_pointcache_export.py | import os
from pxr import Usd, Sdf, UsdGeom
from avalon import io
class ParentPointcacheExporter(object):
def __init__(self, shot_name, parent_subset_name, frame_range=[]):
from reveries.common import get_frame_range
self.output_path = ""
self.children_data = []
self.shot_name = shot_name
self.parent_subset_name = parent_subset_name
# Check frame range
if frame_range:
self.frame_in, self.frame_out = frame_range
else:
self.frame_in, self.frame_out = get_frame_range.get(self.shot_name)
def _get_shot_id(self):
_filter = {"type": "asset", "name": self.shot_name}
self.shot_data = io.find_one(_filter)
def get_children_data(self):
self._get_shot_id()
_filter = {
"type": "subset",
"data.families": "reveries.pointcache.child.usd",
"parent": self.shot_data["_id"],
"data.parent_pointcache_name": self.parent_subset_name
}
# self.children_data = [s for s in io.find(_filter)]
self.children_data = [
s for s in io.find(_filter)
if s.get("data", {}).get("subsetGroup", "") not in ["Trash Bin"]]
return self.children_data
def export(self, output_dir):
from reveries.common import get_publish_files, get_fps
from reveries.common.usd.utils import get_UpAxis
if not self.children_data:
self.get_children_data()
stage = Usd.Stage.CreateInMemory()
UsdGeom.Xform.Define(stage, "/ROOT")
root_prim = stage.GetPrimAtPath('/ROOT')
# Set parent prim
parent_prim_name = "/ROOT/main"
UsdGeom.Xform.Define(stage, parent_prim_name)
main_prim = stage.GetPrimAtPath(parent_prim_name)
main_prim.GetReferences().SetReferences(
[Sdf.Reference("parent_pointcache_prim.usda")]
)
for child_data in self.children_data:
prim_name = "/ROOT/{}".format(child_data["name"].split(".")[-1])
UsdGeom.Xform.Define(stage, prim_name)
_prim = stage.GetPrimAtPath(prim_name)
_file = get_publish_files.get_files(
child_data["_id"], key="entryFileName").get('USD', "")
if _file:
_prim.GetReferences().SetReferences([Sdf.Reference(_file)])
# Set metadata
stage.SetDefaultPrim(root_prim)
stage.SetStartTimeCode(self.frame_in)
stage.SetEndTimeCode(self.frame_out)
stage.SetFramesPerSecond(get_fps())
stage.SetTimeCodesPerSecond(get_fps())
UsdGeom.SetStageUpAxis(stage, get_UpAxis(host="Maya"))
self.output_path = os.path.join(
output_dir, "pointcache_prim_tmp.usda").replace('\\', '/')
stage.GetRootLayer().Export(self.output_path)
# print stage.GetRootLayer().ExportToString()
print("Parent usd done: {}".format(self.output_path)) | 0.389082 | 0.153169 |
import json
from OpenstackManager import OpenstackManager
from OpenstackContext import OpenstackContext
from AAIManager import AAIManager
class HeatBridge:
def __init__(self):
pass;
def init_bridge(self, openstack_identity_url, username, password, tenant, region, owner):
self.om = OpenstackManager(openstack_identity_url, OpenstackContext(username, password, tenant, region, owner));
self.am = AAIManager(OpenstackContext(username, password, tenant, region, owner));
def filterbyvalue(self, seq, key, value):
for el in seq:
if el[key]==value: yield el
def build_request(self, heat_stack_id):
resources = self.om.get_stack_resources(heat_stack_id)
servers = list(self.filterbyvalue(resources, "resource_type", "OS::Nova::Server"));
#networks = list(self.filterbyvalue(resources, "resource_type", "OS::Neutron::Net"));
#subnets = list(self.filterbyvalue(resources, "resource_type", "OS::Neutron::Subnet"));
ports = list(self.filterbyvalue(resources, "resource_type", "OS::Neutron::Port"));
#keys = list(self.filterbyvalue(resources, "resource_type", "OS::Nova::KeyPair"));
put_blocks = []
#build the servers and attach them to vnf
server_put_blocks = []
image_put_blocks = []
flavor_put_blocks = []
for item in servers:
server_info = self.om.get_server_info(item['physical_resource_id']);
server_volumes = self.om.get_server_volumes(item['physical_resource_id']);
volumes = [];
for vols in server_volumes:
volumes.append(self.om.get_volume_info(vols['id']));
aai_vserver = self.am.create_vserver_put(server_info, volumes);
flavor_info = self.om.get_flavor_info(server_info['flavor']['id']);
aai_flavor = self.am.create_flavor_put(flavor_info);
image_info = self.om.get_image_info(server_info['image']['id']);
aai_image = self.am.create_image_put(image_info, server_info);
server_put_blocks.append(self.am.create_put(aai_vserver));
image_put_blocks.append(self.am.create_put(aai_image));
flavor_put_blocks.append(self.am.create_put(aai_flavor));
put_blocks.extend(image_put_blocks);
put_blocks.extend(flavor_put_blocks);
put_blocks.extend(server_put_blocks);
#build the ports and attach them to servers
linterface_put_blocks = []
#all servers have same vnf id
random_server_info = self.om.get_server_info(servers[0]['physical_resource_id']);
for item in ports:
#todo: pass in the networks from above
port_info = self.om.get_port_info(item['physical_resource_id'])
aai_linterface = self.am.create_l_interface_put(port_info, random_server_info);
linterface_put_blocks.append(self.am.create_put(aai_linterface));
put_blocks.extend(linterface_put_blocks);
return json.dumps(self.am.create_transactions(put_blocks));
def bridge_data(self, heat_stack_id):
request = self.build_request(heat_stack_id);
print request;
return request; | simple-grpc-client/target/test-classes/OpenECOMP_ETE/testsuite/heatbridge/heatbridge/heatbridge/HeatBridge.py | import json
from OpenstackManager import OpenstackManager
from OpenstackContext import OpenstackContext
from AAIManager import AAIManager
class HeatBridge:
def __init__(self):
pass;
def init_bridge(self, openstack_identity_url, username, password, tenant, region, owner):
self.om = OpenstackManager(openstack_identity_url, OpenstackContext(username, password, tenant, region, owner));
self.am = AAIManager(OpenstackContext(username, password, tenant, region, owner));
def filterbyvalue(self, seq, key, value):
for el in seq:
if el[key]==value: yield el
def build_request(self, heat_stack_id):
resources = self.om.get_stack_resources(heat_stack_id)
servers = list(self.filterbyvalue(resources, "resource_type", "OS::Nova::Server"));
#networks = list(self.filterbyvalue(resources, "resource_type", "OS::Neutron::Net"));
#subnets = list(self.filterbyvalue(resources, "resource_type", "OS::Neutron::Subnet"));
ports = list(self.filterbyvalue(resources, "resource_type", "OS::Neutron::Port"));
#keys = list(self.filterbyvalue(resources, "resource_type", "OS::Nova::KeyPair"));
put_blocks = []
#build the servers and attach them to vnf
server_put_blocks = []
image_put_blocks = []
flavor_put_blocks = []
for item in servers:
server_info = self.om.get_server_info(item['physical_resource_id']);
server_volumes = self.om.get_server_volumes(item['physical_resource_id']);
volumes = [];
for vols in server_volumes:
volumes.append(self.om.get_volume_info(vols['id']));
aai_vserver = self.am.create_vserver_put(server_info, volumes);
flavor_info = self.om.get_flavor_info(server_info['flavor']['id']);
aai_flavor = self.am.create_flavor_put(flavor_info);
image_info = self.om.get_image_info(server_info['image']['id']);
aai_image = self.am.create_image_put(image_info, server_info);
server_put_blocks.append(self.am.create_put(aai_vserver));
image_put_blocks.append(self.am.create_put(aai_image));
flavor_put_blocks.append(self.am.create_put(aai_flavor));
put_blocks.extend(image_put_blocks);
put_blocks.extend(flavor_put_blocks);
put_blocks.extend(server_put_blocks);
#build the ports and attach them to servers
linterface_put_blocks = []
#all servers have same vnf id
random_server_info = self.om.get_server_info(servers[0]['physical_resource_id']);
for item in ports:
#todo: pass in the networks from above
port_info = self.om.get_port_info(item['physical_resource_id'])
aai_linterface = self.am.create_l_interface_put(port_info, random_server_info);
linterface_put_blocks.append(self.am.create_put(aai_linterface));
put_blocks.extend(linterface_put_blocks);
return json.dumps(self.am.create_transactions(put_blocks));
def bridge_data(self, heat_stack_id):
request = self.build_request(heat_stack_id);
print request;
return request; | 0.169784 | 0.09314 |
import nacl.utils
from nacl.public import PrivateKey, SealedBox
import pytest
import responses
import pandas as pd
import numerbay
from numerbay import API_ENDPOINT_URL
@pytest.fixture(scope="function", name="api")
def api_fixture():
api = numerbay.NumerBay(verbosity="DEBUG")
return api
def test_NumerBay():
# invalid log level should raise
with pytest.raises(AttributeError):
numerbay.NumerBay(verbosity="FOO")
@responses.activate
def test_get_account(api):
data = {"detail": "Could not validate credentials"}
responses.add(responses.GET, f"{API_ENDPOINT_URL}/users/me", json=data, status=403)
with pytest.raises(ValueError) as excinfo:
api.get_account()
assert str(excinfo.value) == "Could not validate credentials"
responses.reset()
api.user_id = 2
api.token = "Token"
data = {
"email": "<EMAIL>",
"is_active": True,
"is_superuser": False,
"username": "myusername",
"public_address": "0xmymetamaskaddressdde80ca30248e7a8890cacb",
"id": 2,
"numerai_api_key_public_id": "MYNUMERAIAPIKEYRCXBVB66ACTSLDR53",
"numerai_api_key_can_upload_submission": True,
"numerai_api_key_can_stake": True,
"numerai_api_key_can_read_submission_info": True,
"numerai_api_key_can_read_user_info": True,
"numerai_wallet_address": "0x000000000000000000000000mynumeraiaddress",
"models": [],
}
responses.add(responses.GET, f"{API_ENDPOINT_URL}/users/me", json=data)
account = api.get_account()
assert isinstance(account, dict)
assert account.get("username") == "myusername"
@responses.activate
def test_get_my_listings(api):
api.user_id = 2
api.token = "Token"
data = {
"total": 1,
"data": [
{
"avatar": "https://example.com/example.jpg",
"description": "Product description",
"is_active": True,
"is_ready": False,
"expiration_round": None,
"total_num_sales": 0,
"last_sale_price": None,
"last_sale_price_delta": None,
"featured_products": None,
"id": 108,
"name": "mymodel",
"sku": "numerai-predictions-mymodel",
"category": {
"name": "Predictions",
"slug": "numerai-predictions",
"tournament": 8,
"is_per_round": True,
"is_submission": True,
"id": 3,
"items": [],
"parent": {},
},
"owner": {"id": 2, "username": "myusername"},
"options": [
{
"id": 6,
"is_on_platform": True,
"third_party_url": None,
"description": None,
"quantity": 1,
"price": 1,
"currency": "NMR",
"wallet": None,
"chain": None,
"stake_limit": None,
"mode": "file",
"is_active": True,
"coupon": None,
"coupon_specs": None,
"special_price": None,
"applied_coupon": None,
"product_id": 108,
}
],
}
],
}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/products/search-authenticated", json=data
)
listings = api.get_my_listings()
assert listings[0]["name"] == data["data"][0]["name"]
@responses.activate
def test_get_my_orders(api):
api.user_id = 2
api.token = "Token"
data = {
"total": 1,
"data": [
{
"date_order": "2021-12-25T06:34:58.047278",
"round_order": 296,
"quantity": 1,
"price": 9,
"currency": "NMR",
"mode": "file",
"stake_limit": None,
"submit_model_id": None,
"submit_model_name": None,
"submit_state": None,
"chain": "ethereum",
"from_address": "0x00000000000000000000000000000fromaddress",
"to_address": "0x0000000000000000000000000000000toaddress",
"transaction_hash": "0x09bd2a0f814a745f62cb35f1a41dd18208fb653210ff677e946747a20e5abcdef",
"state": "confirmed",
"applied_coupon_id": 1,
"coupon": None,
"coupon_specs": None,
"id": 126,
"product": {},
"buyer": {"id": 2, "username": "myusername"},
}
],
}
responses.add(responses.POST, f"{API_ENDPOINT_URL}/orders/search", json=data)
orders = api.get_my_orders()
assert orders[0]["buyer"]["username"] == data["data"][0]["buyer"]["username"]
@responses.activate
def test_get_my_sales(api):
api.user_id = 2
api.token = "Token"
data = {
"total": 1,
"data": [
{
"date_order": "2021-12-25T06:34:58.047278",
"round_order": 296,
"quantity": 1,
"price": 9,
"currency": "NMR",
"mode": "file",
"stake_limit": None,
"submit_model_id": None,
"submit_model_name": None,
"submit_state": None,
"chain": "ethereum",
"from_address": "0x00000000000000000000000000000fromaddress",
"to_address": "0x0000000000000000000000000000000toaddress",
"transaction_hash": "0x09bd2a0f814a745f62cb35f1a41dd18208fb653210ff677e946747a20e5abcdef",
"state": "confirmed",
"applied_coupon_id": 1,
"coupon": None,
"coupon_specs": None,
"id": 126,
"product": {},
"buyer": {"id": 2, "username": "someusername"},
}
],
}
responses.add(responses.POST, f"{API_ENDPOINT_URL}/orders/search", json=data)
orders = api.get_my_sales()
assert orders[0]["buyer"]["username"] == data["data"][0]["buyer"]["username"]
@responses.activate
def test_upload_artifact(api, tmpdir):
api.user_id = 2
api.token = "Token"
# mock product search
product_id = 2
data = {
"total": 1,
"data": [
{
"id": product_id,
"name": "mymodel",
"sku": "numerai-predictions-mymodel",
}
],
}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/products/search-authenticated", json=data
)
artifact_id = 3
data = {"url": "https://uploadurl", "id": artifact_id}
responses.add(
responses.POST,
f"{API_ENDPOINT_URL}/products/{product_id}/artifacts/generate-upload-url",
json=data,
)
responses.add(responses.PUT, "https://uploadurl")
data = {"id": artifact_id}
responses.add(
responses.POST,
f"{API_ENDPOINT_URL}/products/{product_id}/artifacts/{artifact_id}/validate-upload",
json=data,
)
path = tmpdir.join("somefilepath")
path.write("content")
# upload file with product_id
artifact = api.upload_artifact(str(path), product_id=product_id)
assert artifact["id"] == artifact_id
# upload file with product_full_name
data = {
"total": 1,
"data": [
{
"id": product_id,
"name": "mymodel",
"sku": "numerai-predictions-mymodel",
}
],
}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/products/search-authenticated", json=data
)
artifact = api.upload_artifact(
str(path), product_full_name="numerai-predictions-mymodel"
)
assert artifact["id"] == artifact_id
@responses.activate
def test_upload_artifact_df(api):
api.user_id = 2
api.token = "Token"
# mock product search
product_id = 2
data = {
"total": 1,
"data": [
{
"id": product_id,
"name": "mymodel",
"sku": "numerai-predictions-mymodel",
}
],
}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/products/search-authenticated", json=data
)
artifact_id = 3
data = {"url": "https://uploadurl", "id": artifact_id}
responses.add(
responses.POST,
f"{API_ENDPOINT_URL}/products/{product_id}/artifacts/generate-upload-url",
json=data,
)
responses.add(responses.PUT, "https://uploadurl")
data = {"id": artifact_id}
responses.add(
responses.POST,
f"{API_ENDPOINT_URL}/products/{product_id}/artifacts/{artifact_id}/validate-upload",
json=data,
)
df = pd.DataFrame.from_dict({"id": [], "prediction": []})
# upload df with product_id
artifact = api.upload_artifact(df=df, product_id=product_id)
assert artifact["id"] == artifact_id
# upload df with product_full_name
data = {
"total": 1,
"data": [
{
"id": product_id,
"name": "mymodel",
"sku": "numerai-predictions-mymodel",
}
],
}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/products/search-authenticated", json=data
)
artifact = api.upload_artifact(
df=df, product_full_name="numerai-predictions-mymodel"
)
assert artifact["id"] == artifact_id
@responses.activate
def test_upload_encrypted_artifact(api, tmpdir):
key_pair = nacl.public.PrivateKey.generate()
public_key = key_pair.public_key.encode(encoder=nacl.encoding.Base64Encoder)
api.user_id = 2
api.token = "Token"
# mock product search
product_id = 2
data = {
"total": 1,
"data": [
{
"id": product_id,
"name": "mymodel",
"sku": "numerai-predictions-mymodel",
"use_encryption": True,
}
],
}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/products/search-authenticated", json=data
)
# mock order search
data = {
"total": 1,
"data": [
{
"date_order": "2021-12-25T06:34:58.047278",
"round_order": 296,
"quantity": 1,
"price": 9,
"currency": "NMR",
"mode": "file",
"stake_limit": None,
"submit_model_id": None,
"submit_model_name": None,
"submit_state": None,
"chain": "ethereum",
"from_address": "0x00000000000000000000000000000fromaddress",
"to_address": "0x0000000000000000000000000000000toaddress",
"transaction_hash": "0x09bd2a0f814a745f62cb35f1a41dd18208fb653210ff677e946747a20e5abcdef",
"state": "confirmed",
"applied_coupon_id": 1,
"coupon": None,
"coupon_specs": None,
"id": 126,
"product": {"id": product_id},
"buyer": {"id": 2, "username": "myusername"},
"buyer_public_key": public_key.decode("ascii"),
}
],
}
responses.add(responses.POST, f"{API_ENDPOINT_URL}/orders/search", json=data)
artifact_id = "abc"
data = {"url": "https://uploadurl", "id": artifact_id}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/artifacts/generate-upload-url", json=data
)
responses.add(responses.PUT, "https://uploadurl")
data = {"id": artifact_id}
responses.add(
responses.POST,
f"{API_ENDPOINT_URL}/artifacts/{artifact_id}/validate-upload",
json=data,
)
path = tmpdir.join("somefilepath")
path.write("content")
# upload file with product_id
artifacts = api.upload_artifact(str(path), product_id=product_id)
assert artifacts[0]["id"] == artifact_id
# upload df with product_id
df = pd.DataFrame.from_dict({"id": [], "prediction": []})
artifacts = api.upload_artifact(df=df, product_id=product_id)
assert artifacts[0]["id"] == artifact_id
# upload file with product_full_name
data = {
"total": 1,
"data": [
{
"id": product_id,
"name": "mymodel",
"sku": "numerai-predictions-mymodel",
"use_encryption": True,
}
],
}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/products/search-authenticated", json=data
)
artifacts = api.upload_artifact(
str(path), product_full_name="numerai-predictions-mymodel"
)
assert artifacts[0]["id"] == artifact_id
@responses.activate
def test_upload_numerai_submission(api, tmpdir):
key_pair = nacl.public.PrivateKey.generate()
public_key = key_pair.public_key.encode(encoder=nacl.encoding.Base64Encoder)
api.user_id = 2
api.token = "Token"
# mock product search
product_id = 2
data = {
"total": 1,
"data": [
{
"id": product_id,
"name": "mymodel",
"sku": "numerai-predictions-mymodel",
"use_encryption": True,
}
],
}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/products/search-authenticated", json=data
)
# mock order search
data = {
"total": 1,
"data": [
{
"date_order": "2021-12-25T06:34:58.047278",
"round_order": 296,
"quantity": 1,
"price": 9,
"currency": "NMR",
"mode": "stake",
"stake_limit": None,
"submit_model_id": "some_model_id",
"submit_model_name": None,
"submit_state": None,
"chain": "ethereum",
"from_address": "0x00000000000000000000000000000fromaddress",
"to_address": "0x0000000000000000000000000000000toaddress",
"transaction_hash": "0x09bd2a0f814a745f62cb35f1a41dd18208fb653210ff677e946747a20e5abcdef",
"state": "confirmed",
"applied_coupon_id": 1,
"coupon": None,
"coupon_specs": None,
"id": 126,
"product": {"id": product_id},
"buyer": {"id": 2, "username": "myusername"},
"buyer_public_key": public_key.decode("ascii"),
}
],
}
responses.add(responses.POST, f"{API_ENDPOINT_URL}/orders/search", json=data)
artifact_id = "abc"
data = {"url": "https://numerai_uploadurl", "id": artifact_id}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/artifacts/generate-upload-url", json=data
)
responses.add(responses.PUT, "https://numerai_uploadurl")
data = {"id": artifact_id, "is_numerai_direct": True}
responses.add(
responses.POST,
f"{API_ENDPOINT_URL}/artifacts/{artifact_id}/validate-upload",
json=data,
)
path = tmpdir.join("somefilepath")
path.write("content")
# upload file with product_id
artifacts = api.upload_artifact(str(path), product_id=product_id)
assert artifacts[0]["id"] == artifact_id
assert artifacts[0]["is_numerai_direct"]
# upload df with product_id
df = pd.DataFrame.from_dict({"id": [], "prediction": []})
artifacts = api.upload_artifact(df=df, product_id=product_id)
assert artifacts[0]["id"] == artifact_id
assert artifacts[0]["is_numerai_direct"]
# upload file with product_full_name
data = {
"total": 1,
"data": [
{
"id": product_id,
"name": "mymodel",
"sku": "numerai-predictions-mymodel",
"use_encryption": True,
}
],
}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/products/search-authenticated", json=data
)
artifacts = api.upload_artifact(
str(path), product_full_name="numerai-predictions-mymodel"
)
assert artifacts[0]["id"] == artifact_id
@responses.activate
def test_download_artifact(api, tmpdir):
# mock order search
data = {
"total": 1,
"data": [
{
"date_order": "2021-12-25T06:34:58.047278",
"round_order": 296,
"quantity": 1,
"price": 9,
"currency": "NMR",
"mode": "file",
"stake_limit": None,
"submit_model_id": None,
"submit_model_name": None,
"submit_state": None,
"chain": "ethereum",
"from_address": "0x00000000000000000000000000000fromaddress",
"to_address": "0x0000000000000000000000000000000toaddress",
"transaction_hash": "0x09bd2a0f814a745f62cb35f1a41dd18208fb653210ff677e946747a20e5abcdef",
"state": "confirmed",
"applied_coupon_id": 1,
"coupon": None,
"coupon_specs": None,
"id": 126,
"product": {"id": 4},
"buyer": {"id": 2, "username": "myusername"},
}
],
}
responses.add(responses.POST, f"{API_ENDPOINT_URL}/orders/search", json=data)
# mock product search
product_id = 4
data = {
"total": 1,
"data": [
{
"id": product_id,
"name": "somemodel",
"sku": "numerai-predictions-somemodel",
}
],
}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/products/search-authenticated", json=data
)
api.user_id = 2
api.token = "Token"
api.show_progress_bars = False
artifact_id = 3
data = "https://downloadurl"
responses.add(
responses.GET,
f"{API_ENDPOINT_URL}/products/{product_id}/artifacts/{artifact_id}/generate-download-url",
json=data,
)
responses.add(responses.GET, "https://downloadurl")
# download file with product_id and artifact_id
api.download_artifact(
dest_path=tmpdir.join("artifact.csv"),
product_id=product_id,
artifact_id=artifact_id,
)
# download file with product_full_name only
data = {
"total": 1,
"data": [
{
"id": artifact_id,
}
],
}
responses.add(
responses.GET, f"{API_ENDPOINT_URL}/products/{product_id}/artifacts", json=data
)
api.download_artifact(
dest_path=tmpdir.join("artifact.csv"),
product_full_name="numerai-predictions-somemodel",
)
@responses.activate
def test_download_artifact_no_active_artifact(api):
# mock order search
data = {
"total": 1,
"data": [
{
"date_order": "2021-12-25T06:34:58.047278",
"round_order": 296,
"quantity": 1,
"price": 9,
"currency": "NMR",
"mode": "file",
"stake_limit": None,
"submit_model_id": None,
"submit_model_name": None,
"submit_state": None,
"chain": "ethereum",
"from_address": "0x00000000000000000000000000000fromaddress",
"to_address": "0x0000000000000000000000000000000toaddress",
"transaction_hash": "0x09bd2a0f814a745f62cb35f1a41dd18208fb653210ff677e946747a20e5abcdef",
"state": "confirmed",
"applied_coupon_id": 1,
"coupon": None,
"coupon_specs": None,
"id": 126,
"product": {"id": 4},
"buyer": {"id": 2, "username": "myusername"},
}
],
}
responses.add(responses.POST, f"{API_ENDPOINT_URL}/orders/search", json=data)
api.user_id = 2
api.token = "Token"
api.show_progress_bars = False
product_id = 4
# download file with product_full_name only
data = {
"total": 0,
"data": [
{
"id": product_id,
"name": "somemodel",
"sku": "numerai-predictions-somemodel",
}
],
}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/products/search-authenticated", json=data
)
data = {"total": 0, "data": []}
responses.add(
responses.GET, f"{API_ENDPOINT_URL}/products/{product_id}/artifacts", json=data
)
with pytest.raises(ValueError) as err:
api.download_artifact(
dest_path="artifact.csv", product_full_name="numerai-predictions-somemodel"
)
assert str(err.value).startswith("Failed to resolve artifact")
@responses.activate
def test_download_encrypted_artifact(api, tmpdir):
key_pair = nacl.public.PrivateKey.generate()
public_key = key_pair.public_key.encode(encoder=nacl.encoding.Base64Encoder)
private_key = key_pair.encode(encoder=nacl.encoding.Base64Encoder)
# mock order search
artifact_id = "abc"
data = {
"total": 1,
"data": [
{
"date_order": "2021-12-25T06:34:58.047278",
"round_order": 296,
"quantity": 1,
"price": 9,
"currency": "NMR",
"mode": "file",
"stake_limit": None,
"submit_model_id": None,
"submit_model_name": None,
"submit_state": None,
"chain": "ethereum",
"from_address": "0x00000000000000000000000000000fromaddress",
"to_address": "0x0000000000000000000000000000000toaddress",
"transaction_hash": "0x09bd2a0f814a745f62cb35f1a41dd18208fb653210ff677e946747a20e5abcdef",
"state": "confirmed",
"applied_coupon_id": 1,
"coupon": None,
"coupon_specs": None,
"id": 126,
"product": {"id": 4},
"buyer": {"id": 2, "username": "myusername"},
"buyer_public_key": public_key.decode("ascii"),
"artifacts": [{"id": artifact_id, "state": "active"}],
}
],
}
responses.add(responses.POST, f"{API_ENDPOINT_URL}/orders/search", json=data)
# mock product search
product_id = 4
data = {
"total": 1,
"data": [
{
"id": product_id,
"name": "somemodel",
"sku": "numerai-predictions-somemodel",
"use_encryption": True,
}
],
}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/products/search-authenticated", json=data
)
api.user_id = 2
api.token = "<PASSWORD>"
api.show_progress_bars = False
data = "https://downloadurl"
content = b"hello"
responses.add(
responses.GET,
f"{API_ENDPOINT_URL}/artifacts/{artifact_id}/generate-download-url",
json=data,
)
responses.add(
responses.GET,
"https://downloadurl",
body=SealedBox(key_pair.public_key).encrypt(content),
)
# download file with product_id and artifact_id
api.download_artifact(
dest_path=tmpdir.join("encrypted_artifact_1.csv"),
product_id=product_id,
artifact_id=artifact_id,
key_base64=private_key.decode("ascii"),
)
with open(tmpdir.join("encrypted_artifact_1.csv"), "rb") as file:
assert file.read() == content
# download file with product_full_name only
data = {
"total": 1,
"data": [
{
"id": artifact_id,
}
],
}
responses.add(
responses.GET, f"{API_ENDPOINT_URL}/products/{product_id}/artifacts", json=data
)
api.download_artifact(
dest_path=tmpdir.join("encrypted_artifact_2.csv"),
product_full_name="numerai-predictions-somemodel",
key_base64=private_key.decode("ascii"),
)
with open(tmpdir.join("encrypted_artifact_2.csv"), "rb") as file:
assert file.read() == content | tests/test_numerbay.py | import nacl.utils
from nacl.public import PrivateKey, SealedBox
import pytest
import responses
import pandas as pd
import numerbay
from numerbay import API_ENDPOINT_URL
@pytest.fixture(scope="function", name="api")
def api_fixture():
api = numerbay.NumerBay(verbosity="DEBUG")
return api
def test_NumerBay():
# invalid log level should raise
with pytest.raises(AttributeError):
numerbay.NumerBay(verbosity="FOO")
@responses.activate
def test_get_account(api):
data = {"detail": "Could not validate credentials"}
responses.add(responses.GET, f"{API_ENDPOINT_URL}/users/me", json=data, status=403)
with pytest.raises(ValueError) as excinfo:
api.get_account()
assert str(excinfo.value) == "Could not validate credentials"
responses.reset()
api.user_id = 2
api.token = "Token"
data = {
"email": "<EMAIL>",
"is_active": True,
"is_superuser": False,
"username": "myusername",
"public_address": "0xmymetamaskaddressdde80ca30248e7a8890cacb",
"id": 2,
"numerai_api_key_public_id": "MYNUMERAIAPIKEYRCXBVB66ACTSLDR53",
"numerai_api_key_can_upload_submission": True,
"numerai_api_key_can_stake": True,
"numerai_api_key_can_read_submission_info": True,
"numerai_api_key_can_read_user_info": True,
"numerai_wallet_address": "0x000000000000000000000000mynumeraiaddress",
"models": [],
}
responses.add(responses.GET, f"{API_ENDPOINT_URL}/users/me", json=data)
account = api.get_account()
assert isinstance(account, dict)
assert account.get("username") == "myusername"
@responses.activate
def test_get_my_listings(api):
api.user_id = 2
api.token = "Token"
data = {
"total": 1,
"data": [
{
"avatar": "https://example.com/example.jpg",
"description": "Product description",
"is_active": True,
"is_ready": False,
"expiration_round": None,
"total_num_sales": 0,
"last_sale_price": None,
"last_sale_price_delta": None,
"featured_products": None,
"id": 108,
"name": "mymodel",
"sku": "numerai-predictions-mymodel",
"category": {
"name": "Predictions",
"slug": "numerai-predictions",
"tournament": 8,
"is_per_round": True,
"is_submission": True,
"id": 3,
"items": [],
"parent": {},
},
"owner": {"id": 2, "username": "myusername"},
"options": [
{
"id": 6,
"is_on_platform": True,
"third_party_url": None,
"description": None,
"quantity": 1,
"price": 1,
"currency": "NMR",
"wallet": None,
"chain": None,
"stake_limit": None,
"mode": "file",
"is_active": True,
"coupon": None,
"coupon_specs": None,
"special_price": None,
"applied_coupon": None,
"product_id": 108,
}
],
}
],
}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/products/search-authenticated", json=data
)
listings = api.get_my_listings()
assert listings[0]["name"] == data["data"][0]["name"]
@responses.activate
def test_get_my_orders(api):
api.user_id = 2
api.token = "Token"
data = {
"total": 1,
"data": [
{
"date_order": "2021-12-25T06:34:58.047278",
"round_order": 296,
"quantity": 1,
"price": 9,
"currency": "NMR",
"mode": "file",
"stake_limit": None,
"submit_model_id": None,
"submit_model_name": None,
"submit_state": None,
"chain": "ethereum",
"from_address": "0x00000000000000000000000000000fromaddress",
"to_address": "0x0000000000000000000000000000000toaddress",
"transaction_hash": "0x09bd2a0f814a745f62cb35f1a41dd18208fb653210ff677e946747a20e5abcdef",
"state": "confirmed",
"applied_coupon_id": 1,
"coupon": None,
"coupon_specs": None,
"id": 126,
"product": {},
"buyer": {"id": 2, "username": "myusername"},
}
],
}
responses.add(responses.POST, f"{API_ENDPOINT_URL}/orders/search", json=data)
orders = api.get_my_orders()
assert orders[0]["buyer"]["username"] == data["data"][0]["buyer"]["username"]
@responses.activate
def test_get_my_sales(api):
api.user_id = 2
api.token = "Token"
data = {
"total": 1,
"data": [
{
"date_order": "2021-12-25T06:34:58.047278",
"round_order": 296,
"quantity": 1,
"price": 9,
"currency": "NMR",
"mode": "file",
"stake_limit": None,
"submit_model_id": None,
"submit_model_name": None,
"submit_state": None,
"chain": "ethereum",
"from_address": "0x00000000000000000000000000000fromaddress",
"to_address": "0x0000000000000000000000000000000toaddress",
"transaction_hash": "0x09bd2a0f814a745f62cb35f1a41dd18208fb653210ff677e946747a20e5abcdef",
"state": "confirmed",
"applied_coupon_id": 1,
"coupon": None,
"coupon_specs": None,
"id": 126,
"product": {},
"buyer": {"id": 2, "username": "someusername"},
}
],
}
responses.add(responses.POST, f"{API_ENDPOINT_URL}/orders/search", json=data)
orders = api.get_my_sales()
assert orders[0]["buyer"]["username"] == data["data"][0]["buyer"]["username"]
@responses.activate
def test_upload_artifact(api, tmpdir):
api.user_id = 2
api.token = "Token"
# mock product search
product_id = 2
data = {
"total": 1,
"data": [
{
"id": product_id,
"name": "mymodel",
"sku": "numerai-predictions-mymodel",
}
],
}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/products/search-authenticated", json=data
)
artifact_id = 3
data = {"url": "https://uploadurl", "id": artifact_id}
responses.add(
responses.POST,
f"{API_ENDPOINT_URL}/products/{product_id}/artifacts/generate-upload-url",
json=data,
)
responses.add(responses.PUT, "https://uploadurl")
data = {"id": artifact_id}
responses.add(
responses.POST,
f"{API_ENDPOINT_URL}/products/{product_id}/artifacts/{artifact_id}/validate-upload",
json=data,
)
path = tmpdir.join("somefilepath")
path.write("content")
# upload file with product_id
artifact = api.upload_artifact(str(path), product_id=product_id)
assert artifact["id"] == artifact_id
# upload file with product_full_name
data = {
"total": 1,
"data": [
{
"id": product_id,
"name": "mymodel",
"sku": "numerai-predictions-mymodel",
}
],
}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/products/search-authenticated", json=data
)
artifact = api.upload_artifact(
str(path), product_full_name="numerai-predictions-mymodel"
)
assert artifact["id"] == artifact_id
@responses.activate
def test_upload_artifact_df(api):
api.user_id = 2
api.token = "Token"
# mock product search
product_id = 2
data = {
"total": 1,
"data": [
{
"id": product_id,
"name": "mymodel",
"sku": "numerai-predictions-mymodel",
}
],
}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/products/search-authenticated", json=data
)
artifact_id = 3
data = {"url": "https://uploadurl", "id": artifact_id}
responses.add(
responses.POST,
f"{API_ENDPOINT_URL}/products/{product_id}/artifacts/generate-upload-url",
json=data,
)
responses.add(responses.PUT, "https://uploadurl")
data = {"id": artifact_id}
responses.add(
responses.POST,
f"{API_ENDPOINT_URL}/products/{product_id}/artifacts/{artifact_id}/validate-upload",
json=data,
)
df = pd.DataFrame.from_dict({"id": [], "prediction": []})
# upload df with product_id
artifact = api.upload_artifact(df=df, product_id=product_id)
assert artifact["id"] == artifact_id
# upload df with product_full_name
data = {
"total": 1,
"data": [
{
"id": product_id,
"name": "mymodel",
"sku": "numerai-predictions-mymodel",
}
],
}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/products/search-authenticated", json=data
)
artifact = api.upload_artifact(
df=df, product_full_name="numerai-predictions-mymodel"
)
assert artifact["id"] == artifact_id
@responses.activate
def test_upload_encrypted_artifact(api, tmpdir):
key_pair = nacl.public.PrivateKey.generate()
public_key = key_pair.public_key.encode(encoder=nacl.encoding.Base64Encoder)
api.user_id = 2
api.token = "Token"
# mock product search
product_id = 2
data = {
"total": 1,
"data": [
{
"id": product_id,
"name": "mymodel",
"sku": "numerai-predictions-mymodel",
"use_encryption": True,
}
],
}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/products/search-authenticated", json=data
)
# mock order search
data = {
"total": 1,
"data": [
{
"date_order": "2021-12-25T06:34:58.047278",
"round_order": 296,
"quantity": 1,
"price": 9,
"currency": "NMR",
"mode": "file",
"stake_limit": None,
"submit_model_id": None,
"submit_model_name": None,
"submit_state": None,
"chain": "ethereum",
"from_address": "0x00000000000000000000000000000fromaddress",
"to_address": "0x0000000000000000000000000000000toaddress",
"transaction_hash": "0x09bd2a0f814a745f62cb35f1a41dd18208fb653210ff677e946747a20e5abcdef",
"state": "confirmed",
"applied_coupon_id": 1,
"coupon": None,
"coupon_specs": None,
"id": 126,
"product": {"id": product_id},
"buyer": {"id": 2, "username": "myusername"},
"buyer_public_key": public_key.decode("ascii"),
}
],
}
responses.add(responses.POST, f"{API_ENDPOINT_URL}/orders/search", json=data)
artifact_id = "abc"
data = {"url": "https://uploadurl", "id": artifact_id}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/artifacts/generate-upload-url", json=data
)
responses.add(responses.PUT, "https://uploadurl")
data = {"id": artifact_id}
responses.add(
responses.POST,
f"{API_ENDPOINT_URL}/artifacts/{artifact_id}/validate-upload",
json=data,
)
path = tmpdir.join("somefilepath")
path.write("content")
# upload file with product_id
artifacts = api.upload_artifact(str(path), product_id=product_id)
assert artifacts[0]["id"] == artifact_id
# upload df with product_id
df = pd.DataFrame.from_dict({"id": [], "prediction": []})
artifacts = api.upload_artifact(df=df, product_id=product_id)
assert artifacts[0]["id"] == artifact_id
# upload file with product_full_name
data = {
"total": 1,
"data": [
{
"id": product_id,
"name": "mymodel",
"sku": "numerai-predictions-mymodel",
"use_encryption": True,
}
],
}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/products/search-authenticated", json=data
)
artifacts = api.upload_artifact(
str(path), product_full_name="numerai-predictions-mymodel"
)
assert artifacts[0]["id"] == artifact_id
@responses.activate
def test_upload_numerai_submission(api, tmpdir):
key_pair = nacl.public.PrivateKey.generate()
public_key = key_pair.public_key.encode(encoder=nacl.encoding.Base64Encoder)
api.user_id = 2
api.token = "Token"
# mock product search
product_id = 2
data = {
"total": 1,
"data": [
{
"id": product_id,
"name": "mymodel",
"sku": "numerai-predictions-mymodel",
"use_encryption": True,
}
],
}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/products/search-authenticated", json=data
)
# mock order search
data = {
"total": 1,
"data": [
{
"date_order": "2021-12-25T06:34:58.047278",
"round_order": 296,
"quantity": 1,
"price": 9,
"currency": "NMR",
"mode": "stake",
"stake_limit": None,
"submit_model_id": "some_model_id",
"submit_model_name": None,
"submit_state": None,
"chain": "ethereum",
"from_address": "0x00000000000000000000000000000fromaddress",
"to_address": "0x0000000000000000000000000000000toaddress",
"transaction_hash": "0x09bd2a0f814a745f62cb35f1a41dd18208fb653210ff677e946747a20e5abcdef",
"state": "confirmed",
"applied_coupon_id": 1,
"coupon": None,
"coupon_specs": None,
"id": 126,
"product": {"id": product_id},
"buyer": {"id": 2, "username": "myusername"},
"buyer_public_key": public_key.decode("ascii"),
}
],
}
responses.add(responses.POST, f"{API_ENDPOINT_URL}/orders/search", json=data)
artifact_id = "abc"
data = {"url": "https://numerai_uploadurl", "id": artifact_id}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/artifacts/generate-upload-url", json=data
)
responses.add(responses.PUT, "https://numerai_uploadurl")
data = {"id": artifact_id, "is_numerai_direct": True}
responses.add(
responses.POST,
f"{API_ENDPOINT_URL}/artifacts/{artifact_id}/validate-upload",
json=data,
)
path = tmpdir.join("somefilepath")
path.write("content")
# upload file with product_id
artifacts = api.upload_artifact(str(path), product_id=product_id)
assert artifacts[0]["id"] == artifact_id
assert artifacts[0]["is_numerai_direct"]
# upload df with product_id
df = pd.DataFrame.from_dict({"id": [], "prediction": []})
artifacts = api.upload_artifact(df=df, product_id=product_id)
assert artifacts[0]["id"] == artifact_id
assert artifacts[0]["is_numerai_direct"]
# upload file with product_full_name
data = {
"total": 1,
"data": [
{
"id": product_id,
"name": "mymodel",
"sku": "numerai-predictions-mymodel",
"use_encryption": True,
}
],
}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/products/search-authenticated", json=data
)
artifacts = api.upload_artifact(
str(path), product_full_name="numerai-predictions-mymodel"
)
assert artifacts[0]["id"] == artifact_id
@responses.activate
def test_download_artifact(api, tmpdir):
# mock order search
data = {
"total": 1,
"data": [
{
"date_order": "2021-12-25T06:34:58.047278",
"round_order": 296,
"quantity": 1,
"price": 9,
"currency": "NMR",
"mode": "file",
"stake_limit": None,
"submit_model_id": None,
"submit_model_name": None,
"submit_state": None,
"chain": "ethereum",
"from_address": "0x00000000000000000000000000000fromaddress",
"to_address": "0x0000000000000000000000000000000toaddress",
"transaction_hash": "0x09bd2a0f814a745f62cb35f1a41dd18208fb653210ff677e946747a20e5abcdef",
"state": "confirmed",
"applied_coupon_id": 1,
"coupon": None,
"coupon_specs": None,
"id": 126,
"product": {"id": 4},
"buyer": {"id": 2, "username": "myusername"},
}
],
}
responses.add(responses.POST, f"{API_ENDPOINT_URL}/orders/search", json=data)
# mock product search
product_id = 4
data = {
"total": 1,
"data": [
{
"id": product_id,
"name": "somemodel",
"sku": "numerai-predictions-somemodel",
}
],
}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/products/search-authenticated", json=data
)
api.user_id = 2
api.token = "Token"
api.show_progress_bars = False
artifact_id = 3
data = "https://downloadurl"
responses.add(
responses.GET,
f"{API_ENDPOINT_URL}/products/{product_id}/artifacts/{artifact_id}/generate-download-url",
json=data,
)
responses.add(responses.GET, "https://downloadurl")
# download file with product_id and artifact_id
api.download_artifact(
dest_path=tmpdir.join("artifact.csv"),
product_id=product_id,
artifact_id=artifact_id,
)
# download file with product_full_name only
data = {
"total": 1,
"data": [
{
"id": artifact_id,
}
],
}
responses.add(
responses.GET, f"{API_ENDPOINT_URL}/products/{product_id}/artifacts", json=data
)
api.download_artifact(
dest_path=tmpdir.join("artifact.csv"),
product_full_name="numerai-predictions-somemodel",
)
@responses.activate
def test_download_artifact_no_active_artifact(api):
# mock order search
data = {
"total": 1,
"data": [
{
"date_order": "2021-12-25T06:34:58.047278",
"round_order": 296,
"quantity": 1,
"price": 9,
"currency": "NMR",
"mode": "file",
"stake_limit": None,
"submit_model_id": None,
"submit_model_name": None,
"submit_state": None,
"chain": "ethereum",
"from_address": "0x00000000000000000000000000000fromaddress",
"to_address": "0x0000000000000000000000000000000toaddress",
"transaction_hash": "0x09bd2a0f814a745f62cb35f1a41dd18208fb653210ff677e946747a20e5abcdef",
"state": "confirmed",
"applied_coupon_id": 1,
"coupon": None,
"coupon_specs": None,
"id": 126,
"product": {"id": 4},
"buyer": {"id": 2, "username": "myusername"},
}
],
}
responses.add(responses.POST, f"{API_ENDPOINT_URL}/orders/search", json=data)
api.user_id = 2
api.token = "Token"
api.show_progress_bars = False
product_id = 4
# download file with product_full_name only
data = {
"total": 0,
"data": [
{
"id": product_id,
"name": "somemodel",
"sku": "numerai-predictions-somemodel",
}
],
}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/products/search-authenticated", json=data
)
data = {"total": 0, "data": []}
responses.add(
responses.GET, f"{API_ENDPOINT_URL}/products/{product_id}/artifacts", json=data
)
with pytest.raises(ValueError) as err:
api.download_artifact(
dest_path="artifact.csv", product_full_name="numerai-predictions-somemodel"
)
assert str(err.value).startswith("Failed to resolve artifact")
@responses.activate
def test_download_encrypted_artifact(api, tmpdir):
key_pair = nacl.public.PrivateKey.generate()
public_key = key_pair.public_key.encode(encoder=nacl.encoding.Base64Encoder)
private_key = key_pair.encode(encoder=nacl.encoding.Base64Encoder)
# mock order search
artifact_id = "abc"
data = {
"total": 1,
"data": [
{
"date_order": "2021-12-25T06:34:58.047278",
"round_order": 296,
"quantity": 1,
"price": 9,
"currency": "NMR",
"mode": "file",
"stake_limit": None,
"submit_model_id": None,
"submit_model_name": None,
"submit_state": None,
"chain": "ethereum",
"from_address": "0x00000000000000000000000000000fromaddress",
"to_address": "0x0000000000000000000000000000000toaddress",
"transaction_hash": "0x09bd2a0f814a745f62cb35f1a41dd18208fb653210ff677e946747a20e5abcdef",
"state": "confirmed",
"applied_coupon_id": 1,
"coupon": None,
"coupon_specs": None,
"id": 126,
"product": {"id": 4},
"buyer": {"id": 2, "username": "myusername"},
"buyer_public_key": public_key.decode("ascii"),
"artifacts": [{"id": artifact_id, "state": "active"}],
}
],
}
responses.add(responses.POST, f"{API_ENDPOINT_URL}/orders/search", json=data)
# mock product search
product_id = 4
data = {
"total": 1,
"data": [
{
"id": product_id,
"name": "somemodel",
"sku": "numerai-predictions-somemodel",
"use_encryption": True,
}
],
}
responses.add(
responses.POST, f"{API_ENDPOINT_URL}/products/search-authenticated", json=data
)
api.user_id = 2
api.token = "<PASSWORD>"
api.show_progress_bars = False
data = "https://downloadurl"
content = b"hello"
responses.add(
responses.GET,
f"{API_ENDPOINT_URL}/artifacts/{artifact_id}/generate-download-url",
json=data,
)
responses.add(
responses.GET,
"https://downloadurl",
body=SealedBox(key_pair.public_key).encrypt(content),
)
# download file with product_id and artifact_id
api.download_artifact(
dest_path=tmpdir.join("encrypted_artifact_1.csv"),
product_id=product_id,
artifact_id=artifact_id,
key_base64=private_key.decode("ascii"),
)
with open(tmpdir.join("encrypted_artifact_1.csv"), "rb") as file:
assert file.read() == content
# download file with product_full_name only
data = {
"total": 1,
"data": [
{
"id": artifact_id,
}
],
}
responses.add(
responses.GET, f"{API_ENDPOINT_URL}/products/{product_id}/artifacts", json=data
)
api.download_artifact(
dest_path=tmpdir.join("encrypted_artifact_2.csv"),
product_full_name="numerai-predictions-somemodel",
key_base64=private_key.decode("ascii"),
)
with open(tmpdir.join("encrypted_artifact_2.csv"), "rb") as file:
assert file.read() == content | 0.509032 | 0.322126 |
from django.conf.urls import url
from django.urls import path
from . import views
urlpatterns = [
path('', views.IndexView.index, name='index'),
path('user/', views.UserView.user, name='user'),
path('user/<int:user_id>', views.UserView.user_profile),
path('user/update/', views.UpdateUserView.update),
path('user/change-password/', views.ChangeUserPasswordView.change_password, name='change_password'),
path('register/', views.RegisterView.register, name="register"),
path('register/user/', views.RegisterView.register_user, name="form_user"),
path('register/company/', views.RegisterView.register_company, name="form_company"),
path('login/', views.LoginView.login, name="login"),
path('logout/', views.LogoutView.logout, name="logout"),
path('events/', views.EventsView.events, name='events'),
path('events/create_event/', views.CreateEventView.create_event, name='events/create_event'),
path('events/all_events/', views.AllEventsView.all_events, name='events/all_events'),
path('events/event_info/<int:event_id>', views.EventInfoView.event_info, name='events/event_info'),
path('events/search_events/', views.SearchEventsView.search_events, name='events/search_events'),
path('events/subscribe_events/<int:event_id>/<int:user_id>/', views.SubscribeEventsView.subscribe_events, name='events/subscribe_events'),
path('events/unsubscribe_events/<int:event_id>/<int:user_id>/', views.SubscribeEventsView.unsubscribe_events, name='events/unsubscribe_events'),
path('events/get_participants/<int:event_id>/', views.SubscribeEventsView.get_participants, name='events/get_participants'),
path('error', views.error, name='error'),
path('programs/', views.ProgramsView.programs, name='programs'),
path('programs/create_program/', views.CreateProgramView.create_program, name='programs/create_program'),
path('programs/all_programs/', views.AllProgramsView.all_programs, name='programs/all_programs'),
path('programs/search_programs/', views.SearchProgramView.search_program, name='programs/search_program'),
path('programs/subscribe_programs/', views.SubscribeProgramsView.subscribe_programs,
name='programs/subscribe_programs'),
path('yearbook/', views.YearbookView.yearbook, name='yearbook'),
path('yearbook/search_user/', views.YearbookView.search_user, name='yearbook/search_user'),
path('moderate_user/', views.ModerationView.moderate_user, name='moderate_user'),
path('moderate_user/<int:user_id>', views.ModerationView.moderate_user, name='moderate_user'),
path('moderate_event/', views.ModerationView.moderate_event, name='moderate_event'),
path('moderate_event/<int:event_id>', views.ModerationView.moderate_event, name='moderate_event'),
] | wao/urls.py | from django.conf.urls import url
from django.urls import path
from . import views
urlpatterns = [
path('', views.IndexView.index, name='index'),
path('user/', views.UserView.user, name='user'),
path('user/<int:user_id>', views.UserView.user_profile),
path('user/update/', views.UpdateUserView.update),
path('user/change-password/', views.ChangeUserPasswordView.change_password, name='change_password'),
path('register/', views.RegisterView.register, name="register"),
path('register/user/', views.RegisterView.register_user, name="form_user"),
path('register/company/', views.RegisterView.register_company, name="form_company"),
path('login/', views.LoginView.login, name="login"),
path('logout/', views.LogoutView.logout, name="logout"),
path('events/', views.EventsView.events, name='events'),
path('events/create_event/', views.CreateEventView.create_event, name='events/create_event'),
path('events/all_events/', views.AllEventsView.all_events, name='events/all_events'),
path('events/event_info/<int:event_id>', views.EventInfoView.event_info, name='events/event_info'),
path('events/search_events/', views.SearchEventsView.search_events, name='events/search_events'),
path('events/subscribe_events/<int:event_id>/<int:user_id>/', views.SubscribeEventsView.subscribe_events, name='events/subscribe_events'),
path('events/unsubscribe_events/<int:event_id>/<int:user_id>/', views.SubscribeEventsView.unsubscribe_events, name='events/unsubscribe_events'),
path('events/get_participants/<int:event_id>/', views.SubscribeEventsView.get_participants, name='events/get_participants'),
path('error', views.error, name='error'),
path('programs/', views.ProgramsView.programs, name='programs'),
path('programs/create_program/', views.CreateProgramView.create_program, name='programs/create_program'),
path('programs/all_programs/', views.AllProgramsView.all_programs, name='programs/all_programs'),
path('programs/search_programs/', views.SearchProgramView.search_program, name='programs/search_program'),
path('programs/subscribe_programs/', views.SubscribeProgramsView.subscribe_programs,
name='programs/subscribe_programs'),
path('yearbook/', views.YearbookView.yearbook, name='yearbook'),
path('yearbook/search_user/', views.YearbookView.search_user, name='yearbook/search_user'),
path('moderate_user/', views.ModerationView.moderate_user, name='moderate_user'),
path('moderate_user/<int:user_id>', views.ModerationView.moderate_user, name='moderate_user'),
path('moderate_event/', views.ModerationView.moderate_event, name='moderate_event'),
path('moderate_event/<int:event_id>', views.ModerationView.moderate_event, name='moderate_event'),
] | 0.341473 | 0.0545 |