content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import re
# noinspection PyShadowingBuiltins
def all(_path):
return True
def path_contains(*subs):
def func(path):
return any(map(path.__contains__, subs))
return func
def contains_regex(pattern):
def func(path):
with open(path) as f:
code = f.read()
return bool(re.search(pattern, code))
return func
|
nilq/baby-python
|
python
|
#### import the simple module from the paraview
from paraview.simple import *
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
# get color transfer function/color map for 'DICOMImage'
dICOMImageLUT = GetColorTransferFunction('DICOMImage')
# Rescale transfer function
dICOMImageLUT.RescaleTransferFunction(200.0, 7622.0)
# get opacity transfer function/opacity map for 'DICOMImage'
dICOMImagePWF = GetOpacityTransferFunction('DICOMImage')
# Rescale transfer function
dICOMImagePWF.RescaleTransferFunction(200.0, 7622.0)
#### uncomment the following to render all views
RenderAllViews()
# alternatively, if you want to write images, you can use SaveScreenshot(...).
|
nilq/baby-python
|
python
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.core.cache import cache
from .models import *
@receiver(post_save, sender=Email)
def question__delete_caches_on_create(sender, instance, created, **kwargs):
cache_name = "get_sent_emails_user_id_" + str(1)
if(cache_name in cache):
cache.delete(cache_name)
|
nilq/baby-python
|
python
|
from flask import Blueprint, session, redirect, render_template, request, flash, url_for, abort
from models import PageDetails, Database,Authentication, General
from functools import wraps
from validator_collection import *
admin_remove = Blueprint("admin_remove", __name__)
@admin_remove.route("/Admin/Remove", methods=["POST", "GET"])
@admin_remove.route("/Admin/remove", methods=["POST", "GET"])
@admin_remove.route("/Admin/Delete", methods=["POST", "GET"])
@admin_remove.route("/Admin/delete", methods=["POST", "GET"])
def admin_remove_index():
return render_template("admin/admin_remove_options.html")
@admin_remove.route("/Admin/Remove/Day-Part", methods=["POST", "GET"])
def remove_day_options_admin():
""" The Remove day options Page as an admin. """
if request.method == "POST":
def form_handler(request):
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
try:
int(request.form.get("day"))
except ValueError:
return {"Result": False, "Message": "روز دوره باید عدد باشد."}
if Database().get_courses_data_from_db(request.form.get("slug")) is False:
return {"Result": False, "Message": "همچین دوره ای وجود ندارد."}
remove_ones = []
day_parts = [
"Name",
"Cover",
"Description",
"Text",
"To_Do",
"Quotes",
"Musics",
"Musics_Description",
"Ted_Video",
"Ted_Video_Description",
"Animation_Link",
"Animation_Description",
"Movie_Links",
"Movie_Text",
"Movie_Cover",
"Podcast",
"Podcast_Description",
]
for part in day_parts:
if (part) == request.form.get(part):
remove_ones.append(part)
message = Database().delete_parts_of_day_of_course_data_in_db(
slug=request.form.get("slug"),
day_of_course=request.form.get("day"),
remove_parts_names=remove_ones,
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت حذف شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_day_options_admin"))
Parts = PageDetails().all_day_parts_to_remove_in_admin_page()
return render_template(
"admin/admin_remove_day_options.html",
Parts=Parts,
Courses=PageDetails(session).all_courses_page_info_html(),
)
@admin_remove.route("/Admin/Remove/Course", methods=["POST", "GET"])
def remove_course_admin():
""" The Remove day options Page as an admin. """
if request.method == "POST":
def form_handler(request):
remove_ones = []
course_names = []
for course in PageDetails().all_courses_page_info_html():
course_names.append(course["Slug"])
for part in course_names:
if (part) == request.form.get(part):
remove_ones.append(part)
for course in remove_ones:
Database().delete_courses_data_from_db(course)
General().remove_file_to_trash("static/assets/images/blog/{slug}/".format(slug=course))
return True
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت حذف شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_course_admin"))
Parts = PageDetails().all_course_data_slug_name_to_remove_in_admin_page()
return render_template(
"admin/admin_remove_courses.html",
Parts=Parts,
)
@admin_remove.route("/Admin/Remove/Day", methods=["POST", "GET"])
def remove_full_day_admin():
""" The Remove Full Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
try:
int(request.form.get("day"))
except ValueError:
return {"Result": False, "Message": "روز دوره باید عدد باشد."}
if Database().get_courses_data_from_db(request.form.get("slug")) is False:
return {"Result": False, "Message": "همچین دوره ای وجود ندارد."}
message = Database().delete_day_of_course_data_to_db(
slug=request.form.get("slug"),
day=request.form.get("day"),
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت حذف شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_full_day_admin"))
Parts = PageDetails().all_day_parts_to_remove_in_admin_page()
return render_template(
"admin/admin_remove_day.html",
Parts=Parts,
Courses=PageDetails(session).all_courses_page_info_html(),
)
@admin_remove.route("/Admin/Remove/Day/Essential", methods=["POST", "GET"])
def remove_day_essential_main_data_admin():
""" The Remove Essential Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
message = Database().add_delete_day_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_day_essential_main_data_admin"))
return render_template(
"admin/admin_remove_day_essential.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_remove.route("/Admin/Remove/Day/Text", methods=["POST", "GET"])
def remove_day_text_data_admin():
""" The Remove Main Text Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
message = Database().add_delete_day_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_day_text_data_admin"))
return render_template(
"admin/admin_remove_day_text.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_remove.route("/Admin/Remove/Day/Todo", methods=["POST", "GET"])
def remove_day_todo_data_admin():
""" The Remove Todo-Excersices Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
message = Database().add_delete_day_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_day_todo_data_admin"))
return render_template(
"admin/admin_remove_day_todo.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_remove.route("/Admin/Remove/Day/Quotes", methods=["POST", "GET"])
def remove_day_quotes_data_admin():
""" The Remove Quotes Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
message = Database().add_delete_day_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_day_quotes_data_admin"))
return render_template(
"admin/admin_remove_day_quotes.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_remove.route("/Admin/Remove/Day/Music", methods=["POST", "GET"])
def remove_day_music_data_admin():
""" The Remove Music Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
message = Database().add_delete_day_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_day_music_data_admin"))
return render_template(
"admin/admin_remove_day_music.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_remove.route("/Admin/Remove/Day/Ted", methods=["POST", "GET"])
def remove_day_ted_data_admin():
""" The Remove TED video Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
message = Database().add_delete_day_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_day_ted_data_admin"))
return render_template(
"admin/admin_remove_day_ted.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_remove.route("/Admin/Remove/Day/Animation", methods=["POST", "GET"])
def remove_day_animation_data_admin():
""" The Remove short Animation film Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
message = Database().add_delete_day_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_day_animation_data_admin"))
return render_template(
"admin/admin_remove_day_animation.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_remove.route("/Admin/Remove/Day/Podcast", methods=["POST", "GET"])
def remove_day_podcast_data_admin():
""" The Remove podcast Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
message = Database().add_delete_day_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_day_podcast_data_admin"))
return render_template(
"admin/admin_remove_day_podcast.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_remove.route("/Admin/Remove/Day/Movie", methods=["POST", "GET"])
def remove_day_movie_data_admin():
""" The Remove Movie Day Page as an admin. """
if request.method == "POST":
def form_handler(request):
if request.form.get("slug") == "":
return {"Result": False, "Message": "نام انگلیسی دوره را وارد کنید."}
if request.form.get("day") == "":
return {"Result": False, "Message": "روز دوره را وارد کنید."}
message = Database().add_delete_day_data_to_db(
course_name_slug=request.form.get("slug"),
day_num=request.form.get("day"),
)
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت اضافه شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_day_movie_data_admin"))
return render_template(
"admin/admin_remove_day_movie.html",
Courses=Database().get_all_slug_and_names_of_courses_from_db(),
)
@admin_remove.route("/Admin/Remove/Post/<slug_post>", methods=["POST", "GET"])
def remove_post_blog_admin(slug_post):
""" The remove a Post for blog Page as an admin. """
post = Database().get_blog_post_data_from_db(slug_post)
if post is False:
abort(404)
if request.method == "POST":
def form_handler(request):
if request.form.get("confirmation") == "True":
message = Database().delete_post_blog_data_from_db(slug_post)
else:
message = {"Result": False, "Message": "حذف تایید نشده است."}
General().remove_file_to_trash("static/assets/images/blog/{slug}/".format(slug=slug_post))
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت حذف شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_edit.post_blog_options_admin"))
return render_template(
"admin/admin_remove_post.html",
post=post
)
@admin_remove.route("/Admin/Remove/Music", methods=["POST", "GET"])
def remove_music_admin():
""" The Remove Music as an admin. """
if request.method == "POST":
def form_handler(request):
name = request.form.get("music_name")
remove_ones = []
music_names = []
for music_name in Database().get_all_musics_data_from_db():
music_names.append(music_name["Music_Name"])
for part in music_names:
if (part) == request.form.get(part):
remove_ones.append(part)
for music_name in remove_ones:
Database().delete_music_data_from_db(music_name)
message = True
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت حذف شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_music_admin"))
Parts = PageDetails().all_music_data_name_creator_to_remove_in_admin_page()
return render_template(
"admin/admin_remove_musics.html",
Parts=Parts,
)
@admin_remove.route("/Admin/Remove/Tool", methods=["POST", "GET"])
@admin_remove.route("/Admin/Remove/tool", methods=["POST", "GET"])
def remove_tool_admin():
""" The Remove tool as an admin. """
if request.method == "POST":
def form_handler(request):
name = request.form.get("tool_name")
remove_ones = []
tool_names = []
for tool_name in Database().get_all_tools_data_db():
tool_names.append(tool_name["Slug"])
for part in tool_names:
if part == request.form.get(part):
remove_ones.append(part)
for tool_slug in remove_ones:
Database().delete_tool_data_from_db(tool_slug)
message = True
return message
message = form_handler(request)
if message is True:
message = {"Color": "green", "Result": "با موفقیت حذف شد."}
else:
if message["Result"] is False:
message["Color"] = "red"
else:
message["Color"] = "green"
message["Result"] = message["Message"]
flash(message)
return redirect(url_for("admin_remove.remove_tool_admin"))
Parts = PageDetails().all_tools_data_name_slug_to_remove_in_admin_page()
return render_template(
"admin/admin_remove_tool.html",
Parts=Parts,
)
|
nilq/baby-python
|
python
|
#
# Test Netatmo class
#
import logging
import Netatmo
def main():
logging.basicConfig(level=logging.DEBUG)
netatmo = Netatmo.Netatmo("PyAtmo.conf")
home=netatmo.getHomesData()
netatmo.getHomeStatus()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tempfile
import warnings
from typing import Any, Dict, Generator, List, Optional, Tuple, Union
from kubernetes import client, config, watch
from kubernetes.config import ConfigException
from airflow.compat.functools import cached_property
from airflow.kubernetes.kube_client import _disable_verify_ssl, _enable_tcp_keepalive
try:
import airflow.utils.yaml as yaml
except ImportError:
import yaml # type: ignore[no-redef]
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
def _load_body_to_dict(body):
try:
body_dict = yaml.safe_load(body)
except yaml.YAMLError as e:
raise AirflowException(f"Exception when loading resource definition: {e}\n")
return body_dict
class KubernetesHook(BaseHook):
"""
Creates Kubernetes API connection.
- use in cluster configuration by using ``extra__kubernetes__in_cluster`` in connection
- use custom config by providing path to the file using ``extra__kubernetes__kube_config_path``
- use custom configuration by providing content of kubeconfig file via
``extra__kubernetes__kube_config`` in connection
- use default config by providing no extras
This hook check for configuration option in the above order. Once an option is present it will
use this configuration.
.. seealso::
For more information about Kubernetes connection:
:doc:`/connections/kubernetes`
:param conn_id: The :ref:`kubernetes connection <howto/connection:kubernetes>`
to Kubernetes cluster.
:param client_configuration: Optional dictionary of client configuration params.
Passed on to kubernetes client.
:param cluster_context: Optionally specify a context to use (e.g. if you have multiple
in your kubeconfig.
:param config_file: Path to kubeconfig file.
:param in_cluster: Set to ``True`` if running from within a kubernetes cluster.
:param disable_verify_ssl: Set to ``True`` if SSL verification should be disabled.
:param disable_tcp_keepalive: Set to ``True`` if you want to disable keepalive logic.
"""
conn_name_attr = 'kubernetes_conn_id'
default_conn_name = 'kubernetes_default'
conn_type = 'kubernetes'
hook_name = 'Kubernetes Cluster Connection'
@staticmethod
def get_connection_form_widgets() -> Dict[str, Any]:
"""Returns connection widgets to add to connection form"""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import BooleanField, StringField
return {
"extra__kubernetes__in_cluster": BooleanField(lazy_gettext('In cluster configuration')),
"extra__kubernetes__kube_config_path": StringField(
lazy_gettext('Kube config path'), widget=BS3TextFieldWidget()
),
"extra__kubernetes__kube_config": StringField(
lazy_gettext('Kube config (JSON format)'), widget=BS3TextFieldWidget()
),
"extra__kubernetes__namespace": StringField(
lazy_gettext('Namespace'), widget=BS3TextFieldWidget()
),
"extra__kubernetes__cluster_context": StringField(
lazy_gettext('Cluster context'), widget=BS3TextFieldWidget()
),
"extra__kubernetes__disable_verify_ssl": BooleanField(lazy_gettext('Disable SSL')),
"extra__kubernetes__disable_tcp_keepalive": BooleanField(lazy_gettext('Disable TCP keepalive')),
}
@staticmethod
def get_ui_field_behaviour() -> Dict[str, Any]:
"""Returns custom field behaviour"""
return {
"hidden_fields": ['host', 'schema', 'login', 'password', 'port', 'extra'],
"relabeling": {},
}
def __init__(
self,
conn_id: Optional[str] = default_conn_name,
client_configuration: Optional[client.Configuration] = None,
cluster_context: Optional[str] = None,
config_file: Optional[str] = None,
in_cluster: Optional[bool] = None,
disable_verify_ssl: Optional[bool] = None,
disable_tcp_keepalive: Optional[bool] = None,
) -> None:
super().__init__()
self.conn_id = conn_id
self.client_configuration = client_configuration
self.cluster_context = cluster_context
self.config_file = config_file
self.in_cluster = in_cluster
self.disable_verify_ssl = disable_verify_ssl
self.disable_tcp_keepalive = disable_tcp_keepalive
# these params used for transition in KPO to K8s hook
# for a deprecation period we will continue to consider k8s settings from airflow.cfg
self._deprecated_core_disable_tcp_keepalive: Optional[bool] = None
self._deprecated_core_disable_verify_ssl: Optional[bool] = None
self._deprecated_core_in_cluster: Optional[bool] = None
self._deprecated_core_cluster_context: Optional[str] = None
self._deprecated_core_config_file: Optional[str] = None
@staticmethod
def _coalesce_param(*params):
for param in params:
if param is not None:
return param
@cached_property
def conn_extras(self):
if self.conn_id:
connection = self.get_connection(self.conn_id)
extras = connection.extra_dejson
else:
extras = {}
return extras
def _get_field(self, field_name):
if field_name.startswith('extra_'):
raise ValueError(
f"Got prefixed name {field_name}; please remove the 'extra__kubernetes__' prefix "
f"when using this method."
)
if field_name in self.conn_extras:
return self.conn_extras[field_name] or None
prefixed_name = f"extra__kubernetes__{field_name}"
return self.conn_extras.get(prefixed_name) or None
@staticmethod
def _deprecation_warning_core_param(deprecation_warnings):
settings_list_str = ''.join([f"\n\t{k}={v!r}" for k, v in deprecation_warnings])
warnings.warn(
f"\nApplying core Airflow settings from section [kubernetes] with the following keys:"
f"{settings_list_str}\n"
"In a future release, KubernetesPodOperator will no longer consider core\n"
"Airflow settings; define an Airflow connection instead.",
DeprecationWarning,
)
def get_conn(self) -> Any:
"""Returns kubernetes api session for use with requests"""
in_cluster = self._coalesce_param(
self.in_cluster, self.conn_extras.get("extra__kubernetes__in_cluster") or None
)
cluster_context = self._coalesce_param(
self.cluster_context, self.conn_extras.get("extra__kubernetes__cluster_context") or None
)
kubeconfig_path = self._coalesce_param(
self.config_file, self.conn_extras.get("extra__kubernetes__kube_config_path") or None
)
kubeconfig = self.conn_extras.get("extra__kubernetes__kube_config") or None
num_selected_configuration = len([o for o in [in_cluster, kubeconfig, kubeconfig_path] if o])
if num_selected_configuration > 1:
raise AirflowException(
"Invalid connection configuration. Options kube_config_path, "
"kube_config, in_cluster are mutually exclusive. "
"You can only use one option at a time."
)
disable_verify_ssl = self._coalesce_param(
self.disable_verify_ssl, _get_bool(self._get_field("disable_verify_ssl"))
)
disable_tcp_keepalive = self._coalesce_param(
self.disable_tcp_keepalive, _get_bool(self._get_field("disable_tcp_keepalive"))
)
# BEGIN apply settings from core kubernetes configuration
# this section should be removed in next major release
deprecation_warnings: List[Tuple[str, Any]] = []
if disable_verify_ssl is None and self._deprecated_core_disable_verify_ssl is True:
deprecation_warnings.append(('verify_ssl', False))
disable_verify_ssl = self._deprecated_core_disable_verify_ssl
# by default, hook will try in_cluster first. so we only need to
# apply core airflow config and alert when False and in_cluster not otherwise set.
if in_cluster is None and self._deprecated_core_in_cluster is False:
deprecation_warnings.append(('in_cluster', self._deprecated_core_in_cluster))
in_cluster = self._deprecated_core_in_cluster
if not cluster_context and self._deprecated_core_cluster_context:
deprecation_warnings.append(('cluster_context', self._deprecated_core_cluster_context))
cluster_context = self._deprecated_core_cluster_context
if not kubeconfig_path and self._deprecated_core_config_file:
deprecation_warnings.append(('config_file', self._deprecated_core_config_file))
kubeconfig_path = self._deprecated_core_config_file
if disable_tcp_keepalive is None and self._deprecated_core_disable_tcp_keepalive is True:
deprecation_warnings.append(('enable_tcp_keepalive', False))
disable_tcp_keepalive = True
if deprecation_warnings:
self._deprecation_warning_core_param(deprecation_warnings)
# END apply settings from core kubernetes configuration
if disable_verify_ssl is True:
_disable_verify_ssl()
if disable_tcp_keepalive is not True:
_enable_tcp_keepalive()
if in_cluster:
self.log.debug("loading kube_config from: in_cluster configuration")
config.load_incluster_config()
return client.ApiClient()
if kubeconfig_path is not None:
self.log.debug("loading kube_config from: %s", kubeconfig_path)
config.load_kube_config(
config_file=kubeconfig_path,
client_configuration=self.client_configuration,
context=cluster_context,
)
return client.ApiClient()
if kubeconfig is not None:
with tempfile.NamedTemporaryFile() as temp_config:
self.log.debug("loading kube_config from: connection kube_config")
temp_config.write(kubeconfig.encode())
temp_config.flush()
config.load_kube_config(
config_file=temp_config.name,
client_configuration=self.client_configuration,
context=cluster_context,
)
return client.ApiClient()
return self._get_default_client(cluster_context=cluster_context)
def _get_default_client(self, *, cluster_context=None):
# if we get here, then no configuration has been supplied
# we should try in_cluster since that's most likely
# but failing that just load assuming a kubeconfig file
# in the default location
try:
config.load_incluster_config(client_configuration=self.client_configuration)
except ConfigException:
self.log.debug("loading kube_config from: default file")
config.load_kube_config(
client_configuration=self.client_configuration,
context=cluster_context,
)
return client.ApiClient()
@cached_property
def api_client(self) -> Any:
"""Cached Kubernetes API client"""
return self.get_conn()
@cached_property
def core_v1_client(self):
return client.CoreV1Api(api_client=self.api_client)
def create_custom_object(
self, group: str, version: str, plural: str, body: Union[str, dict], namespace: Optional[str] = None
):
"""
Creates custom resource definition object in Kubernetes
:param group: api group
:param version: api version
:param plural: api plural
:param body: crd object definition
:param namespace: kubernetes namespace
"""
api = client.CustomObjectsApi(self.api_client)
if namespace is None:
namespace = self.get_namespace()
if isinstance(body, str):
body_dict = _load_body_to_dict(body)
else:
body_dict = body
try:
api.delete_namespaced_custom_object(
group=group,
version=version,
namespace=namespace,
plural=plural,
name=body_dict["metadata"]["name"],
)
self.log.warning("Deleted SparkApplication with the same name.")
except client.rest.ApiException:
self.log.info("SparkApp %s not found.", body_dict['metadata']['name'])
try:
response = api.create_namespaced_custom_object(
group=group, version=version, namespace=namespace, plural=plural, body=body_dict
)
self.log.debug("Response: %s", response)
return response
except client.rest.ApiException as e:
raise AirflowException(f"Exception when calling -> create_custom_object: {e}\n")
def get_custom_object(
self, group: str, version: str, plural: str, name: str, namespace: Optional[str] = None
):
"""
Get custom resource definition object from Kubernetes
:param group: api group
:param version: api version
:param plural: api plural
:param name: crd object name
:param namespace: kubernetes namespace
"""
api = client.CustomObjectsApi(self.api_client)
if namespace is None:
namespace = self.get_namespace()
try:
response = api.get_namespaced_custom_object(
group=group, version=version, namespace=namespace, plural=plural, name=name
)
return response
except client.rest.ApiException as e:
raise AirflowException(f"Exception when calling -> get_custom_object: {e}\n")
def get_namespace(self) -> Optional[str]:
"""Returns the namespace that defined in the connection"""
if self.conn_id:
connection = self.get_connection(self.conn_id)
extras = connection.extra_dejson
namespace = extras.get("extra__kubernetes__namespace", "default")
return namespace
return None
def get_pod_log_stream(
self,
pod_name: str,
container: Optional[str] = "",
namespace: Optional[str] = None,
) -> Tuple[watch.Watch, Generator[str, None, None]]:
"""
Retrieves a log stream for a container in a kubernetes pod.
:param pod_name: pod name
:param container: container name
:param namespace: kubernetes namespace
"""
api = client.CoreV1Api(self.api_client)
watcher = watch.Watch()
return (
watcher,
watcher.stream(
api.read_namespaced_pod_log,
name=pod_name,
container=container,
namespace=namespace if namespace else self.get_namespace(),
),
)
def get_pod_logs(
self,
pod_name: str,
container: Optional[str] = "",
namespace: Optional[str] = None,
):
"""
Retrieves a container's log from the specified pod.
:param pod_name: pod name
:param container: container name
:param namespace: kubernetes namespace
"""
api = client.CoreV1Api(self.api_client)
return api.read_namespaced_pod_log(
name=pod_name,
container=container,
_preload_content=False,
namespace=namespace if namespace else self.get_namespace(),
)
def _get_bool(val) -> Optional[bool]:
"""
Converts val to bool if can be done with certainty.
If we cannot infer intention we return None.
"""
if isinstance(val, bool):
return val
elif isinstance(val, str):
if val.strip().lower() == 'true':
return True
elif val.strip().lower() == 'false':
return False
return None
|
nilq/baby-python
|
python
|
import json
import random
from flask import Flask, request
from flask_restplus import Resource, Api, Namespace
import os
from datetime import date, datetime
from faker import Faker
import security
fake = Faker('en_AU')
## load bsb data into memory
with open('./resources/bsbs.json') as json_file:
bsbs = json.load(json_file)
api = Namespace('account', description='Account Namespace')
## comment
def Generate_Account():
account = random.choice(bsbs)
bankAccount = {}
bankAccount['institutionName'] = account['bsb']['content']['bsbDetails'][
'institutionName'].title()
bankAccount['institutionCode'] = account['bsb']['content']['bsbDetails'][
'financialInstitutionCode']
bankAccount['bankStateBranchCode'] = account['bsb']['content'][
'bsbDetails']['bankStateBranchCode']
bankAccount['branchName'] = account['bsb']['content']['activity'][0][
'role'][0]['party'][0]['name'][0]['fullName']
bankAccount['streetAddress'] = account['bsb']['content']['activity'][0][
'role'][0]['party'][0]['address'][0]['streetAddress'].title()
try:
bankAccount['postcode'] = account['bsb']['content']['activity'][0][
'role'][0]['party'][0]['address'][0]['postcode']
except:
bankAccount['postcode'] = '2000'
try:
bankAccount['suburb'] = account['bsb']['content']['activity'][0][
'role'][0]['party'][0]['address'][0]['suburb'].title()
except:
bankAccount['suburb'] = 'Sydney'
bankAccount['state'] = account['bsb']['content']['activity'][0]['role'][0][
'party'][0]['address'][0]['state']
bankAccount['accountNumber'] = fake.numerify(text="##-###-####")
return bankAccount
@api.route('/random')
class GenerateName(Resource):
@security.token_required
def get(self):
account = Generate_Account()
return account
|
nilq/baby-python
|
python
|
import os
import pathlib
import re
from collections import defaultdict
from functools import lru_cache
import stanza
import torch
from loguru import logger
from nlgeval import NLGEval
from transformers import AutoModelForCausalLM, AutoTokenizer
current_dir = pathlib.Path(__file__).parent.absolute()
def step_len(func):
def wrapper(*args, **kwargs):
self = args[0]
self.len += 1
return func(*args, **kwargs)
return wrapper
class Scorer:
def __init__(self, preprocess=True, metrics_to_omit=["CIDEr", "METEOR"]):
self.preprocess = preprocess
self.nlgeval = NLGEval(
no_glove=True, no_skipthoughts=True, metrics_to_omit=metrics_to_omit
)
self.score = defaultdict(lambda: 0.0)
self.len = 0
if self.preprocess:
self.nlp = stanza.Pipeline(
lang="en", processors="tokenize", tokenize_no_ssplit=True, verbose=False
)
#
self.stop_words_sign = (
open(os.path.join(current_dir, "stopwords-sign.txt"), "r", encoding="utf-8")
.read()
.split()
)
self.stop_words_sign_rule = "|".join(
[re.escape(sign) for sign in self.stop_words_sign]
)
@lru_cache(maxsize=200)
def _preprocess(self, raw_sentence):
result = self.nlp(raw_sentence.replace("\n\n", ""))
tokens = []
try:
for token in result.sentences[0].tokens:
tokens.append(token.text.lower())
tokenize_sentence = " ".join(tokens)
tokenize_sentence = re.sub(self.stop_words_sign_rule, "", tokenize_sentence)
except Exception as e:
logger.warning(e)
logger.warning(
f'preprocess fail, return "" raw_sentence:{raw_sentence} result:{result}'
)
return ""
return tokenize_sentence
def clean(self):
self.score = defaultdict(lambda: 0.0)
self.len = 0
@step_len
def add(*args, **kwargs):
raise NotImplementedError
def compute(
self, save_report_dir=None, save_file_name="score.txt", return_score=True
):
#
out_score = {}
if save_report_dir is not None:
os.makedirs(save_report_dir, exist_ok=True)
save_score_report_path = os.path.join(save_report_dir, save_file_name)
score_f = open(save_score_report_path, "w", encoding="utf-8")
for score_key in self.score.keys():
_score = self.score[score_key] / self.len
out_score[score_key] = _score
if save_report_dir is not None:
score_f.write("%s\t%3.5f\n" % (score_key, _score))
if return_score:
return out_score
class SimilarityScorer(Scorer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@step_len
def add(self, hyp, refs):
refs = refs[:]
if self.preprocess:
hyp = self._preprocess(hyp)
refs = [self._preprocess(ref) for ref in refs]
_score = self.nlgeval.compute_individual_metrics(hyp=hyp, ref=refs)
for score_key in _score.keys():
self.score[score_key] += _score[score_key]
class CoverageScorer(Scorer):
def __init__(self, preprocess=True):
super().__init__(preprocess=preprocess)
self.stop_words_en = open(
os.path.join(current_dir, "stopwords-en.txt"), "r", encoding="utf-8"
)
self.stop_words_sign = open(
os.path.join(current_dir, "stopwords-sign.txt"), "r", encoding="utf-8"
)
self.stop_words = (
self.stop_words_en.read().split() + self.stop_words_sign.read().split()
)
# some sign used to split context to sentence, remove them from `stopwords-sign`
self.stop_words_sign = (
open(os.path.join(current_dir, "stopwords-sign.txt"), "r", encoding="utf-8")
.read()
.split()
)
self.stop_words_sign.remove(",")
self.stop_words_sign.remove(".")
self.stop_words_sign.remove("!")
self.stop_words_sign.remove("?")
self.stop_words_sign_rule = "|".join(
[re.escape(sign) for sign in self.stop_words_sign]
)
def _compute_coverage_score(self, sents: list, article: str):
sent = " ".join(sents)
sent_list = re.split(r",|\.|\!|\?", sent)
for sent in sent_list[:]:
if sent == "":
sent_list.remove(sent)
# get sents keywords
keyword_list = []
for sent in sent_list[:]:
sent = sent.lower()
word_list = sent.split()
for word in word_list:
if word not in self.stop_words:
keyword_list.append(word)
# process acticle into words and compute coverage
article_sent_list = re.split(r",|\.|\!|\?", article)
count_article_sent = len(article_sent_list)
if count_article_sent == 0:
return 0.0
count_coverage = 0
for article_sent in article_sent_list:
article_sent = article_sent.lower().split()
for keyword in keyword_list:
if keyword in article_sent:
count_coverage += 1
break
return count_coverage / count_article_sent
@step_len
def add(self, sents: list, article: str):
sents = sents[:]
if self.preprocess:
sents = [self._preprocess(sent) for sent in sents]
article = self._preprocess(article)
coverage_score = self._compute_coverage_score(sents, article)
self.score["keyword_coverage"] += coverage_score
class PPLScorer(Scorer):
def __init__(self, model, tokenizer, stride=512, max_length=512, use_sppl=False):
self.model = model
self.tokenizer = tokenizer
self.stride = stride
self.max_length = max_length
self.use_sppl = use_sppl
#
self.score = defaultdict(lambda: 0.0)
self.len = 0
@step_len
def add(self, sentence):
if self.use_sppl:
self.score["ppl"] += self._compute_scaled_ppl(sentence)
else:
self.score["ppl"] += self._compute_ppl(sentence)
def _compute_scaled_ppl(self, sentence, alpha=0.2):
# https://www.desmos.com/calculator/scqyyq0ody
avg_ll = self._compute_avg_log_likelihood(sentence)
return torch.exp(-avg_ll * alpha).item()
def _compute_ppl(self, sentence):
# https://huggingface.co/transformers/perplexity.html
avg_ll = self._compute_avg_log_likelihood(sentence)
return torch.exp(avg_ll).item()
@lru_cache(maxsize=200)
def _compute_avg_log_likelihood(self, sentence):
stride = self.stride
max_length = self.max_length
encodings = self.tokenizer(sentence, return_tensors="pt")
model = self.model
lls = []
for i in range(0, encodings.input_ids.size(1), stride):
begin_loc = max(i + stride - max_length, 0)
end_loc = min(i + stride, encodings.input_ids.size(1))
trg_len = end_loc - i # may be different from stride on last loop
input_ids = encodings.input_ids[:, begin_loc:end_loc].to(self.model.device)
target_ids = input_ids.clone()
target_ids[:, :-trg_len] = -100
with torch.no_grad():
outputs = model(input_ids, labels=target_ids)
log_likelihood = outputs[0] * trg_len
lls.append(log_likelihood)
return torch.stack(lls).sum() / end_loc
|
nilq/baby-python
|
python
|
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# VulnerableCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/vulnerablecode for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import logging
from datetime import datetime
from datetime import timezone
from typing import List
from typing import Tuple
from django.db import transaction
from vulnerabilities import models
from vulnerabilities.importer import PackageURL
from vulnerabilities.improver import Inference
from vulnerabilities.models import Advisory
logger = logging.getLogger(__name__)
class ImproveRunner:
"""
ImproveRunner is responsible for populating the database with any
consumable data. It does so in its ``run`` method by invoking the given
improver and parsing the returned Inferences into proper database fields
"""
def __init__(self, improver_class):
self.improver_class = improver_class
def run(self) -> None:
improver = self.improver_class()
logger.info(f"Running improver: {improver.qualified_name}")
for advisory in improver.interesting_advisories:
inferences = improver.get_inferences(advisory_data=advisory.to_advisory_data())
process_inferences(
inferences=inferences, advisory=advisory, improver_name=improver.qualified_name
)
logger.info("Finished improving using %s.", self.improver_class.qualified_name)
@transaction.atomic
def process_inferences(inferences: List[Inference], advisory: Advisory, improver_name: str):
"""
An atomic transaction that updates both the Advisory (e.g. date_improved)
and processes the given inferences to create or update corresponding
database fields.
This avoids failing the entire improver when only a single inference is
erroneous. Also, the atomic transaction for every advisory and its
inferences makes sure that date_improved of advisory is consistent.
"""
if not inferences:
logger.warn(f"Nothing to improve. Source: {improver_name} Advisory id: {advisory.id}")
return
logger.info(f"Improving advisory id: {advisory.id}")
for inference in inferences:
vuln = get_or_create_vulnerability_and_aliases(
inference.vulnerability_id, inference.aliases, inference.summary
)
if not vuln:
logger.warn(f"Unable to get vulnerability for inference: {inference!r}")
continue
for ref in inference.references:
reference, _ = models.VulnerabilityReference.objects.get_or_create(
reference_id=ref.reference_id, url=ref.url
)
models.VulnerabilityRelatedReference.objects.update_or_create(
reference=reference, vulnerability=vuln
)
for severity in ref.severities:
_vs, updated = models.VulnerabilitySeverity.objects.update_or_create(
scoring_system=severity.system.identifier,
reference=reference,
defaults={"value": str(severity.value)},
)
if updated:
logger.info(f"Severity updated for reference {ref!r} to {severity.value!r}")
if inference.affected_purls:
for pkg in inference.affected_purls:
vulnerable_package, _ = _get_or_create_package(pkg)
models.PackageRelatedVulnerability(
vulnerability=vuln,
package=vulnerable_package,
created_by=improver_name,
confidence=inference.confidence,
fix=False,
).update_or_create()
if inference.fixed_purl:
fixed_package, _ = _get_or_create_package(inference.fixed_purl)
models.PackageRelatedVulnerability(
vulnerability=vuln,
package=fixed_package,
created_by=improver_name,
confidence=inference.confidence,
fix=True,
).update_or_create()
advisory.date_improved = datetime.now(timezone.utc)
advisory.save()
def _get_or_create_package(p: PackageURL) -> Tuple[models.Package, bool]:
query_kwargs = {}
# TODO: this should be revisited as this should best be a model or manager method... and possibly streamlined
query_kwargs = dict(
type=p.type or "",
namespace=p.namespace or "",
name=p.name or "",
version=p.version or "",
qualifiers=p.qualifiers or {},
subpath=p.subpath or "",
)
return models.Package.objects.get_or_create(**query_kwargs)
def _package_url_to_package(purl: PackageURL) -> models.Package:
# FIXME: this is is likely creating a package from a purl?
p = models.Package()
p.set_package_url(purl)
return p
def get_or_create_vulnerability_and_aliases(vulnerability_id, alias_names, summary):
"""
Get or create vulnerabilitiy and aliases such that all existing and new
aliases point to the same vulnerability
"""
existing_vulns = set()
alias_names = set(alias_names)
new_alias_names = set()
for alias_name in alias_names:
try:
alias = models.Alias.objects.get(alias=alias_name)
existing_vulns.add(alias.vulnerability)
except models.Alias.DoesNotExist:
new_alias_names.add(alias_name)
# If given set of aliases point to different vulnerabilities in the
# database, request is malformed
# TODO: It is possible that all those vulnerabilities are actually
# the same at data level, figure out a way to merge them
if len(existing_vulns) > 1:
logger.warn(
f"Given aliases {alias_names} already exist and do not point "
f"to a single vulnerability. Cannot improve. Skipped."
)
return
existing_alias_vuln = existing_vulns.pop() if existing_vulns else None
if (
existing_alias_vuln
and vulnerability_id
and existing_alias_vuln.vulnerability_id != vulnerability_id
):
logger.warn(
f"Given aliases {alias_names!r} already exist and point to existing"
f"vulnerability {existing_alias_vuln}. Unable to create Vulnerability "
f"with vulnerability_id {vulnerability_id}. Skipped"
)
return
if existing_alias_vuln:
vulnerability = existing_alias_vuln
elif vulnerability_id:
try:
vulnerability = models.Vulnerability.objects.get(vulnerability_id=vulnerability_id)
except models.Vulnerability.DoesNotExist:
logger.warn(
f"Given vulnerability_id: {vulnerability_id} does not exist in the database"
)
return
else:
vulnerability = models.Vulnerability(summary=summary)
vulnerability.save()
if summary and summary != vulnerability.summary:
logger.warn(
f"Inconsistent summary for {vulnerability!r}. "
f"Existing: {vulnerability.summary}, provided: {summary}"
)
for alias_name in new_alias_names:
alias = models.Alias(alias=alias_name, vulnerability=vulnerability)
alias.save()
logger.info(f"New alias for {vulnerability!r}: {alias_name}")
return vulnerability
|
nilq/baby-python
|
python
|
__all__ = ("MDRenderer", "LOGGER", "RenderTreeNode", "DEFAULT_RENDERER_FUNCS")
import logging
from types import MappingProxyType
from typing import Any, Mapping, MutableMapping, Sequence
from markdown_it.common.normalize_url import unescape_string
from markdown_it.token import Token
from mdformat.renderer._default_renderers import DEFAULT_RENDERER_FUNCS
from mdformat.renderer._tree import SyntaxTreeNode
from mdformat.renderer.typing import RendererFunc
LOGGER = logging.getLogger(__name__)
class MDRenderer:
"""Markdown renderer.
A renderer class that outputs formatted Markdown. Compatible with
`markdown_it.MarkdownIt`.
"""
__output__ = "md"
def __init__(self, parser: Any = None):
"""__init__ must have `parser` parameter for markdown-it-py
compatibility."""
def render(
self,
tokens: Sequence[Token],
options: Mapping[str, Any],
env: MutableMapping,
*,
finalize: bool = True,
) -> str:
"""Takes token stream and generates Markdown.
Args:
tokens: A sequence of block tokens to render
options: Params of parser instance
env: Additional data from parsed input
finalize: write references and add trailing newline
"""
tree = RenderTreeNode(tokens)
return self.render_tree(tree, options, env, finalize=finalize)
def render_tree(
self,
tree: "RenderTreeNode",
options: Mapping[str, Any],
env: MutableMapping,
*,
finalize: bool = True,
) -> str:
# Update RENDERER_MAP defaults with renderer functions defined
# by plugins.
updated_renderers = {}
for plugin in options.get("parser_extension", []):
for token_name, renderer_func in plugin.RENDERER_FUNCS.items():
if token_name in updated_renderers:
LOGGER.warning(
"Plugin conflict. More than one plugin defined a renderer"
f' for "{token_name}" token or token pair.'
)
else:
updated_renderers[token_name] = renderer_func
renderer_map = MappingProxyType({**DEFAULT_RENDERER_FUNCS, **updated_renderers})
text = tree.render(renderer_map, options, env)
if finalize:
if env.get("used_refs"):
text += "\n\n"
text += self._write_references(env)
text += "\n"
return text
@staticmethod
def _write_references(env: MutableMapping) -> str:
ref_list = []
for label in sorted(env.get("used_refs", [])):
ref = env["references"][label]
destination = ref["href"] if ref["href"] else "<>"
destination = unescape_string(destination)
item = f"[{label.lower()}]: {destination}"
title = ref["title"]
if title:
title = title.replace('"', '\\"')
item += f' "{title}"'
ref_list.append(item)
return "\n".join(ref_list)
class RenderTreeNode(SyntaxTreeNode):
def render(
self,
renderer_funcs: Mapping[str, RendererFunc],
options: Mapping[str, Any],
env: MutableMapping,
) -> str:
renderer_func = renderer_funcs[self.type]
return renderer_func(self, renderer_funcs, options, env)
|
nilq/baby-python
|
python
|
import numpy as np
from utils import env_paths as paths
from base import Train
import time
class TrainModel(Train):
def __init__(self, model, output_freq=1, pickle_f_custom_freq=None,
f_custom_eval=None):
super(TrainModel, self).__init__(model, pickle_f_custom_freq, f_custom_eval)
self.output_freq = output_freq
def train_model(self, f_train, train_args, f_test, test_args, f_validate, validation_args,
n_train_batches=600, n_valid_batches=1, n_test_batches=1, n_epochs=100, anneal=None):
self.write_to_logger("### MODEL PARAMS ###")
self.write_to_logger(self.model.model_info())
self.write_to_logger("### TRAINING PARAMS ###")
self.write_to_logger(
"Train -> %s: %s" % (";".join(train_args['inputs'].keys()), str(train_args['inputs'].values())))
self.write_to_logger(
"Test -> %s: %s" % (";".join(test_args['inputs'].keys()), str(test_args['inputs'].values())))
if anneal is not None:
for t in anneal:
key, freq, rate, min_val = t
self.write_to_logger(
"Anneal %s %0.4f after %i epochs with minimum value %f." % (key, rate, int(freq), min_val))
self.write_to_logger("### TRAINING MODEL ###")
if self.custom_eval_func is not None:
self.custom_eval_func(self.model, paths.get_custom_eval_path(0, self.model.root_path))
done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
epoch += 1
start_time = time.time()
train_outputs = []
for i in xrange(n_train_batches):
train_output = f_train(i, *train_args['inputs'].values())
train_outputs.append(train_output)
self.eval_train[epoch] = np.mean(np.array(train_outputs), axis=0)
self.model.after_epoch()
end_time = time.time() - start_time
if anneal is not None:
for t in anneal:
key, freq, rate, min_val = t
new_val = train_args['inputs'][key] * rate
if new_val < min_val:
train_args['inputs'][key] = min_val
elif epoch % freq == 0:
train_args['inputs'][key] = new_val
if epoch % self.output_freq == 0:
if n_test_batches == 1:
self.eval_test[epoch] = f_test(*test_args['inputs'].values())
else:
test_outputs = []
for i in xrange(n_test_batches):
test_output = f_test(i, *test_args['inputs'].values())
test_outputs.append(test_output)
self.eval_test[epoch] = np.mean(np.array(test_outputs), axis=0)
if f_validate is not None:
if n_valid_batches == 1:
self.eval_validation[epoch] = f_validate(*validation_args['inputs'].values())
else:
valid_outputs = []
for i in xrange(n_valid_batches):
valid_output = f_validate(i, *validation_args['inputs'].values())
valid_outputs.append(valid_output)
self.eval_validation[epoch] = np.mean(np.array(valid_outputs), axis=0)
else:
self.eval_validation[epoch] = [0.] * len(validation_args['outputs'].keys())
# Formatting the output string from the generic and the user-defined values.
output_str = "epoch=%0" + str(len(str(n_epochs))) + "i; time=%0.2f;"
output_str %= (epoch, end_time)
def concatenate_output_str(out_str, d):
for k, v in zip(d.keys(), d.values()):
out_str += " %s=%s;" % (k, v)
return out_str
output_str = concatenate_output_str(output_str, train_args['outputs'])
output_str = concatenate_output_str(output_str, test_args['outputs'])
output_str = concatenate_output_str(output_str, validation_args['outputs'])
outputs = [float(o) for o in self.eval_train[epoch]]
outputs += [float(o) for o in self.eval_test[epoch]]
outputs += [float(o) for o in self.eval_validation[epoch]]
output_str %= tuple(outputs)
self.write_to_logger(output_str)
if self.pickle_f_custom_freq is not None and epoch % self.pickle_f_custom_freq == 0:
if self.custom_eval_func is not None:
self.custom_eval_func(self.model, paths.get_custom_eval_path(epoch, self.model.root_path))
self.plot_eval(self.eval_train, train_args['outputs'].keys(), "_train")
self.plot_eval(self.eval_test, test_args['outputs'].keys(), "_test")
self.plot_eval(self.eval_validation, validation_args['outputs'].keys(), "_validation")
self.dump_dicts()
self.model.dump_model()
if self.pickle_f_custom_freq is not None:
self.model.dump_model()
|
nilq/baby-python
|
python
|
from typing import Text, Type
from aiogram import types
from aiogram.dispatcher.filters.builtin import Command, Text
from aiogram.dispatcher import FSMContext
from aiogram.types import message
from middlewares.states.all_states import download_sticker_state
import os
from loader import dp
@dp.message_handler(text="/cancel", state=download_sticker_state)
async def cancel(message: types.Message, state: FSMContext):
await message.answer("✅ Функция остановлена!\n\nВведите новую команду /commands")
await state.finish()
@dp.message_handler(Command("download_sticker"), state=None)
async def get_sticker_id(message: types.Message):
await message.answer('''Вы зашли в функцию по загрузке стикеров.\n
Скиньте стикер боту!\n
❗️ Всё, что вы будете сюда скидывать автоматически будут обрабатываться в этой функции.
❗️ Если вам нужно её остановить, то введите /cancel''')
await download_sticker_state.step1.set()
@dp.message_handler(content_types="sticker", state=download_sticker_state.step1)
async def get_sticker_id_send(message: types.Message):
if message.sticker.is_animated == True:
await message.answer("❗️ Загрузка анимированных стикер не работает!")
elif message.sticker.is_animated == False:
stickerpack_name = message.sticker.set_name
file_id = message.sticker.file_unique_id
await message.sticker.download(f"./handlers/download_sticker/temp/{stickerpack_name} - @{file_id}.png")
await message.reply_document(types.InputFile(f"./handlers/download_sticker/temp/{stickerpack_name} - @{file_id}.png"))
os.remove(f"./handlers/download_sticker/temp/{stickerpack_name} - @{file_id}.png")
|
nilq/baby-python
|
python
|
import csv
import argparse
import enum
import sys
from normalizer import normalizer
from common import common
def RepresentsFloat(val):
try:
float(val)
return True
except ValueError:
return False
if __name__ == "__main__":
ft_type = enum.Enum("ft_type", ("train", "valid"))
parser = argparse.ArgumentParser(
description='Hazır veri setinde yer alan yorumları FastText kütüphanesi kullanımı için etiketleyerek kaydeder.')
parser.add_argument('--ft_type', type=str, nargs='?',
choices=tuple(t.name for t in ft_type),
default=ft_type.train.name,
help='FastText öğrenmesi esnasında kullanım tipini giriniz')
parser.add_argument('--start_line', type=int, nargs='?', const=2,
default=2,
help='Okumaya başlanacak satır sayısını giriniz')
args = parser.parse_args()
inputs = []
with open(f"dataset/sentiment_data.csv", 'r') as f:
reader = csv.reader(f)
[next(reader, None) for item in range(args.start_line - 1)]
for row in reader:
rate_is_convertable = RepresentsFloat(row[1])
if rate_is_convertable:
label = "__label__"
comment = normalizer.normalize(row[2])
rate = float(row[1])
if rate == 0:
label += "negative"
elif rate == 1:
label += "positive"
else:
label += "notr"
inputs.append(f"{label} {comment}")
common.colored_print(
common.bcolors.WARNING, f"All items {len(inputs)} are labeled. {args.ft_type} file creation is starting...")
with open(f'dataset/comments.{args.ft_type}', 'a') as f:
for item in inputs:
f.write(f"{item}\n")
common.colored_print(common.bcolors.OKBLUE, "=== COMPLETED ===")
|
nilq/baby-python
|
python
|
import os
import sys
import json
import tweepy
import requests
import pandas as pd
from defipulse import DefiPulse
from coingecko import CoinGecko
from subprocess import call
# Data Preprocessing and Feature Engineering
consumer_key = os.environ.get('TWITTER_CONSUMER_KEY', 'ap-northeast-1')
consumer_secret = os.environ.get('TWITTER_CONSUMER_SECRET', 'ap-northeast-1')
access_token = os.environ.get('TWITTER_ACCESS_TOKEN', 'ap-northeast-1')
access_secret = os.environ.get('TWITTER_ACCESS_SECRET', 'ap-northeast-1')
def rates(token):
obj = DefiPulse()
rates = obj.getRates(token)
names = ['Maker', 'Compound', 'Aave']
tweet = "Current DeFi Rates for {} #DeFi #Ethereum\n".format(token)
for name in names:
tweet = tweet + "{0}, lend: {1}%, borrow: {2}%\n".format(name, rates['rates'][name]['lend']['rate'], rates['rates'][name]['borrow']['rate'])
# initialize tweepy instance
try:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
if tweet is not None:
print(tweet)
api.update_status(tweet)
else:
print('empty tweet')
except Exception as e:
print(e)
def prices():
obj = DefiPulse()
projects, names = obj.getProjects()
# print(' '.join([project['name'] for project in projects]))
tweet = "Current DeFi Top3 in TVL/USD #DeFi #Ethereum\n"
for project in projects[:3]:
tweet = tweet + "Name: {0}, tvlUSD: {1}, USD 1day relative {2}%\n".format(project['name'], project['value']['tvl']['USD']['value'], project['value']['tvl']['USD']['relative_1d'])
# tweet = 'Name: {0}, tvlUSD: {1}, USD 1day relative {2}%, tvlETH: {3}, ETH 1day relative {4}%'.format(project['name'], project['value']['tvl']['USD']['value'], project['value']['tvl']['USD']['relative_1d'], project['value']['tvl']['ETH']['value'], project['value']['tvl']['ETH']['relative_1d'])
# initialize tweepy instance
try:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
if tweet is not None:
print(tweet)
api.update_status(tweet)
else:
print('empty tweet')
except Exception as e:
print(e)
def tvl(coin, vs_currency, days, period='1w'):
obj = DefiPulse()
tvls = obj.getTVL(period)
obj1 = CoinGecko()
df = obj1.getCoinVolume(coin, vs_currency, days)
path1 = obj.drawTVLinUSD(tvls, df)
path2 = obj.drawTVLinETH(tvls, df)
# initialize tweepy instance
try:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
if os.path.exists(path1):
api.update_with_media(filename=path1, status='Eth trading volume and Total Value Locked in USD #DeFi #Ethereum')
else:
print('empty tweet')
if os.path.exists(path2):
api.update_with_media(filename=path2, status='Eth trading volume and Total Value Locked in ETH #DeFi #Ethereum')
else:
print('empty tweet')
except Exception as e:
print(e)
call('rm -rf /tmp/*', shell=True)
def tokenprices(coins, vs_currency, days):
obj = CoinGecko()
df = pd.DataFrame()
for coin in coins:
y = obj.getCoinData(coin, vs_currency, days)
df = pd.concat([df, y[['Close']]], axis=1, sort=True, join='outer')
return df
def draws(period='1w'):
tokens = ['Uniswap', 'Maker', 'Aave', 'Compound', 'Synthetix']
obj = DefiPulse()
path = obj.drawPercent(tokens, period)
# initialize tweepy instance
try:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
if os.path.exists(path):
api.update_with_media(filename=path, status='Weekly Total Value Lock change in DefiPulse #DeFi #Ethereum')
else:
print('empty tweet')
except Exception as e:
print(e)
call('rm -rf /tmp/*', shell=True)
def debts():
obj = DefiPulse()
path = obj.drawDebt()
# initialize tweepy instance
try:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
if os.path.exists(path):
api.update_with_media(filename=path, status='Weekly Outstanding Debt USD in DefiPulse #DeFi #Ethereum')
else:
print('empty tweet')
except Exception as e:
print(e)
call('rm -rf /tmp/*', shell=True)
def tweet_with_image(path, tweet):
# initialize tweepy instance
try:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
if os.path.exists(path):
api.update_with_media(filename=path, status=tweet)
else:
print('empty tweet')
except Exception as e:
print(e)
call('rm -rf /tmp/*', shell=True)
def lambda_handler(event, context):
if event['operation'] == 'rates':
token = event['token']
rates(token)
elif event['operation'] == 'prices':
prices()
elif event['operation'] == 'tvl':
token = 'ethereum'
tvl(token, 'usd', '7')
elif event['operation'] == 'draws':
draws()
elif event['operation'] == 'debts':
debts()
elif event['operation'] == 'govtokens':
coins = ['bitcoin', 'ethereum', 'maker', 'uniswap', 'compound-governance-token', 'havven', 'aave', 'yearn-finance']
tickers = ['BTC', 'ETH', 'MKR', 'UNI', 'COMP', 'SNX', 'AAVE', 'YFI']
df = tokenprices(coins, 'usd', '7')
df.columns = tickers
df.dropna(how='any', inplace=True)
df_t = df.copy()
df_t /= df.loc[df.index[0]]
obj = CoinGecko()
path = obj.draw(df_t, 'Weekly_Governance_Token_Price_Change')
tweet = 'Weekly Governance Token Price Change #DeFi #Ethereum'
tweet_with_image(path, tweet)
elif event['operation'] == 'corrtokens':
coins = ['bitcoin', 'ethereum', 'maker', 'uniswap', 'compound-governance-token', 'havven', 'aave', 'yearn-finance']
tickers = ['BTC', 'ETH', 'MKR', 'UNI', 'COMP', 'SNX', 'AAVE', 'YFI']
df = tokenprices(coins, 'usd', '7')
df.columns = tickers
df.dropna(how='any', inplace=True)
df_c = df.copy()
df_corr = pd.DataFrame()
for t in tickers:
df_c['pct_' + t] = df_c.loc[:, t].pct_change(1).fillna(df_c[t].pct_change(1).median())
df_c['rol_' + t] = df_c.loc[:, 'pct_' + t].rolling(7).sum().fillna(df_c['pct_' + t].rolling(7).sum().median())
pd.concat([df_corr, df_c['rol_' + t]], axis=1, sort=True, join='outer')
df_corr = df_c.loc[:, df_c.columns.str.contains('rol')]
df_corr.columns = tickers
obj = CoinGecko()
path = obj.draw(df_corr[7:], 'Rolling_7-days_change_of_DeFi_and_Crypto')
tweet = 'Rolling 7 days change of DeFi and Crypto(%) #DeFi #Ethereum'
tweet_with_image(path, tweet)
# call lambda_handler
if __name__ == "__main__":
lambda_handler(json.loads(sys.argv[1]), {})
|
nilq/baby-python
|
python
|
#
# Copyright (c) 2021 Incisive Technology Ltd
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import importlib
import pytest
from hikaru import HikaruDocumentBase, set_default_release
from hikaru.model.rel_1_16.versions import versions
def beginning():
set_default_release('rel_1_16')
def ending():
pass
@pytest.fixture(scope='module', autouse=True)
def setup():
beginning()
yield 1
ending()
test_classes = []
for version in versions:
mod = importlib.import_module(f".{version}", f"hikaru.model.rel_1_16.{version}")
for c in vars(mod).values():
if (type(c) is type and issubclass(c, HikaruDocumentBase) and
c is not HikaruDocumentBase):
test_classes.append(c)
@pytest.mark.parametrize('cls', test_classes)
def test_docclass(cls):
assert hasattr(cls, 'apiVersion'), f"Class {cls.__name__} doesn't have apiVersion"
assert cls.apiVersion, f"Class {cls.__name__} has no value for apiVersion"
assert hasattr(cls, 'kind'), f"Class {cls.__name__} doesn't have kind"
assert cls.kind, f"Class {cls.__name__} has no value for kind"
if __name__ == "__main__":
beginning()
try:
for cls in test_classes:
test_docclass(cls)
finally:
ending()
|
nilq/baby-python
|
python
|
"""Remote apps
=============
"""
|
nilq/baby-python
|
python
|
import os
import sqlite3
'''
THING I THINK I'M MISSING
The completed date shouldn't be a boolean because it can be overdue.
It was an integer before so it could be set to the date in which it was marked completed.
This should be changed back.
'''
class Equipment:
def __init__(self, pk=-1, name=''):
self.pk = pk
self.name = name
class MaintenanceItem:
def __init__(self, pk=-1, name='', numdays=-1, equipment=None):
self.pk = pk
self.name = name
self.numdays = numdays
self.equipment = equipment
class MaintenanceDate:
def __init__(self, pk=-1, startdate=0, completedate=0, iscomplete=False, maintenanceitem=None):
self.pk = pk
self.startdate = startdate
self.completedate = completedate
self.iscomplete = iscomplete
self.maintenanceitem = maintenanceitem
class DBManager:
def __init__(self, name):
exists = False
if os.path.isfile(name):
exists = True
self.conn = sqlite3.connect(name)
self.cursor = self.conn.cursor()
if not exists:
self.setup_database()
def setup_database(self):
with open('db_setup.sql', 'r') as f:
db_instructions = f.read()
self.conn.executescript(db_instructions)
# Equipment
def get_all_equipment(self):
self.cursor.execute('''
SELECT
pk,
name
FROM
equipment
ORDER BY
name DESC''') # this is not a mistake... blame treeview
db_eq = self.cursor.fetchall()
equipment_list = []
for eq in db_eq:
equipment_list.append(Equipment(eq[0], eq[1]))
return equipment_list
def get_equipment(self, name):
self.cursor.execute("SELECT pk, name FROM equipment WHERE name=?", (name,))
db_eq = self.cursor.fetchone()
if db_eq == None:
return None
return Equipment(db_eq[0], db_eq[1])
def insert_equipment(self, name):
try:
self.cursor.execute("INSERT INTO equipment(name) VALUES(?);", (name,))
self.conn.commit()
except sqlite3.IntegrityError:
return None
return self.get_equipment(name)
# Maintenace Items
def get_all_maintenance_items(self, equipment):
self.cursor.execute('''
SELECT
pk,
name,
numdays,
equipmentid
FROM
maintenanceitem
WHERE
equipmentid=?''',
(equipment.pk,))
db_mi = self.cursor.fetchall()
mi_list = []
for mi in db_mi:
mi_list.append(MaintenanceItem(mi[0], mi[1], mi[2], equipment))
return mi_list
def insert_maintenance_item(self, maintenance_name, numdays, equipment):
self.cursor.execute('''
INSERT INTO
maintenanceitem(
name,
numdays,
equipmentid
)
VALUES(?,?,?);''',
(maintenance_name, numdays, equipment.pk))
self.conn.commit()
pk = self.get_maintenance_item_pk(maintenance_name, numdays, equipment.pk)
if pk == None:
return None
return MaintenanceItem(pk, maintenance_name, numdays, equipment)
def get_maintenance_item_pk(self, name, numdays, equip_pk):
self.cursor.execute( '''
SELECT
pk
FROM
maintenanceitem
WHERE
name=?
AND
numdays=?
AND
equipmentid=?''',
(name, numdays, equip_pk,))
op = self.cursor.fetchone()
if op == None:
return None
return op[0]
def get_all_maintenance_dates(self, m_item: MaintenanceItem):
self.cursor.execute('''
SELECT
pk,
startdate,
completedate,
iscomplete
FROM
maintenancedate
WHERE
maintenanceid=?
ORDER BY
startdate DESC''',
(m_item.pk,))
db_md = self.cursor.fetchall()
md_list = []
for md in db_md:
print('yee haw')
print(md)
print(md[1])
md_list.append(MaintenanceDate(md[0], md[1], md[2], md[3], m_item))
return md_list
def insert_maintenance_date(self, m_item: MaintenanceItem, startdate):
print(startdate)
self.cursor.execute('''
INSERT INTO
maintenancedate(
startdate,
iscomplete,
maintenanceid
)
VALUES(?,?,?);''',
(startdate, False, m_item.pk))
self.conn.commit()
pass
# pk = self.get_maintenance_item_pk(maintenance_name, numdays, equipment.pk)
# if pk == None:
# return None
# return MaintenanceItem(pk, maintenance_name, numdays, equipment)
def get_maintenance_date_pk(self, startdate, numdays):
pass
def set_completed(self, m_date: MaintenanceDate, completed: bool):
self.cursor.execute('''
UPDATE
maintenancedate
SET
completed = ?
WHERE
pk = ?''',
(completed, m_date.pk,))
self.conn.commit()
def close(self):
self.conn.close()
# BE CAREFUL MY DUDE
def drop_all(self):
self.cursor.execute("DROP TABLE IF EXISTS equipment")
self.cursor.execute("DROP TABLE IF EXISTS maintenanceitem")
self.cursor.execute("DROP TABLE IF EXISTS maintenancedate")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy import Item, Field
from collections import OrderedDict
import six
class ScholarshipItem(Item):
University = Field()
Program = Field()
Degree = Field()
Duration = Field()
Instruction_Language = Field()
Tuition_Fee_RMB = Field()
Starting_Date = Field()
Application_Deadline = Field()
pass
|
nilq/baby-python
|
python
|
import boto3
import csv
profile = "default"
def get_instance(instance_name):
ec2 = boto3.resource('ec2')
return ec2.instances.filter(Filters=[{'Name': 'tag:Name', 'Values': [instance_name]}])
boto3.setup_default_session(profile_name=profile)
ec2 = boto3.client('ec2')
ec2_list = []
sg_name_dict = {}
# dict all {'sg_name': 'sg_id'} on aws
response = ec2.describe_security_groups()
for sg in response['SecurityGroups']:
sg_name = sg['GroupName']
try:
sg_id = sg['GroupId']
except:
sg_id = ""
sg_name_dict[sg_name] = sg_id
# read csv file
with open('input.csv', 'r') as input_file:
reader = csv.reader(input_file)
rows = [row for row in reader]
for server_list in rows[1:]:
instance_name = server_list[0]
each_instance = get_instance(instance_name)
all_sg_ids = []
sg_list = []
for sg in server_list[1:]:
if sg:
sg_list.append(sg)
all_sg_ids.append(sg_name_dict.get(sg, ''))
# modify sg
try:
for i in each_instance:
i.modify_attribute(Groups=all_sg_ids)
print(instance_name + ' replace security group successfully: ' + ', '.join(sg_list))
except:
print(instance_name + ': unable to replace security group. The security group you typed does not exist')
|
nilq/baby-python
|
python
|
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import struct
from uuid import uuid4
from xos.exceptions import *
from backupoperation_decl import *
class BackupOperation(BackupOperation_decl):
class Meta:
proxy = True
def save(self, *args, **kwargs):
if not self.uuid:
self.uuid = uuid4()
super(BackupOperation, self).save(*args, **kwargs)
|
nilq/baby-python
|
python
|
from ..resources.resource import Resource
import requests
from requests.auth import HTTPBasicAuth
class Suppliers(Resource):
def __init__(self):
super().__init__("suppliers")
def delete(self, id):
raise NotImplementedError("Not possible to post a warehouse")
|
nilq/baby-python
|
python
|
def tree(x):
print("\n".join([f"{'*'*(2* n + 1):^{2*x+1}}" for n in range(x)]))
def trunk(n):
for i in range(n):
for j in range(n-1):
print(' ', end=' ')
print('***')
tree(1)
trunk(3)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import tkinter as tk
from tkinter import messagebox
import algoritmo
import aicSpider
class SampleApp(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self._frame = None
self.switch_frame(StartPage)
def switch_frame(self, frame_class):
"""Destroi frame atual e cria o novo."""
new_frame = frame_class(self)
if self._frame is not None:
self._frame.destroy()
self._frame = new_frame
self._frame.grid()
# Classe da primeira janela
class StartPage(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
master.title("Projeto AIC 3")
#### Deixa Janela no centro
larguraDaJanela = 300
alturaDaJanela = 200
larguraDaTela = master.winfo_screenwidth()
alturaDaTela = master.winfo_screenheight()
Xjanela = (larguraDaTela/2) - (larguraDaJanela/2)
Yjanela = (alturaDaTela/2) - (alturaDaJanela/2)
master.geometry("%dx%d+%d+%d" % (larguraDaJanela, alturaDaJanela, Xjanela,Yjanela))
############################
# Variáveis para orientação automatizada da posição dos elementos na janela
j = 0
i = 0
##########Insersão do tamanho da memória
textoTamMemoria = tk.Label(self, text="Tamanho da memória: ")
textoTamMemoria.grid(column=i, row=j,padx=5)
i += 1
entradaTamMemoria = tk.Entry(self,width=10)
entradaTamMemoria.grid(column=i, row=j,pady=25)
j += 1
##############Opções de heurística
i = 0
textoOpcaoHeuristica = tk.Label(self, text="Heurística:")
textoOpcaoHeuristica.grid(column=i, row=j)
i += 1
#Menu "bolinha"
##Varíavel que recebe opção escolhida
selected = tk.IntVar()
optionFirst = tk.Radiobutton(self,text='First Fit', value=1,variable = selected)
optionFirst.grid(column=i, row=j)
j+=1
optionBest = tk.Radiobutton(self,text='Best Fit', value=2,variable = selected)
optionBest.grid(column=i, row=j)
j+=1
optionWorst = tk.Radiobutton(self,text='Worst Fit', value=3,variable = selected)
optionWorst.grid(column=i, row=j)
j+=1
## Inicialização do algorítmo
def botaoPressionado(PageTwo):
global modo
modo = selected.get()
global tamMemoria
tamMemoria = int(entradaTamMemoria.get())
if (tamMemoria < 200):
messagebox.showinfo('Erro!', 'O valor de memória deve ser maior que 200.')
return
if (tamMemoria > 1024):
messagebox.showinfo('Erro!', 'O valor de memória deve ser menor que 1024.')
return
master.switch_frame(PageTwo)
#Botão que inicia o algorítmo
botaoInicio = tk.Button(self, text="Iniciar", command=lambda: botaoPressionado(PageTwo))
botaoInicio.grid(column=i, row=j,pady=15)
class PageTwo(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
master.title("Memória Tempo-Real")
### Deixa janela no centro
larguraDaJanela = 1000
alturaDaJanela = 300
larguraDaTela = master.winfo_screenwidth()
alturaDaTela = master.winfo_screenheight()
Xjanela = (larguraDaTela/2) - (larguraDaJanela/2)
Yjanela = (alturaDaTela/2) - (alturaDaJanela/2)
master.geometry("%dx%d+%d+%d" % (larguraDaJanela, alturaDaJanela, Xjanela,Yjanela))
##########################
#Função que gera uma cor aleatória
def geraCor(posicaoProcesso, tamProcesso):
codigoCor = '#'+str(abs(tamProcesso - posicaoProcesso))
for i in range(len(codigoCor),7):
codigoCor = codigoCor + "f"
#Retorna um hexadecimal aleatório de seis digitos"#??????"
return codigoCor
#Criação do "canvas" -> Ambiente de desenho
canvas = tk.Canvas(self, width=larguraDaJanela, height=100)
# Cria as linhas que formam a memória
def criaLinhas():
lista = []
i, x, y, y1 = 0, 20, 1, 80
while (x < (larguraDaJanela-20)):
lista.append(canvas.create_line(x, y, x, y1, fill="#a0a0a0"))
x += 1
global numeroDeLinhasMemoria
numeroDeLinhasMemoria = len(lista)
return lista
#### Legenda ########################
if(modo == 1):
textoHeuristica = tk.Label(self, text="First Fit",font = "Helvetica 16 bold")
textoHeuristica.grid(column=1,row=0,padx=5)
if(modo == 2):
textoHeuristica = tk.Label(self, text="Best Fit",font = "Helvetica 16 bold")
textoHeuristica.grid(column=1,row=0,padx=5)
if(modo == 3):
textoHeuristica = tk.Label(self, text="Worst Fit",font = "Helvetica 16 bold")
textoHeuristica.grid(column=1,row=0,padx=5)
textoLivre = tk.Label(self, text="(Cinza) Memória livre",font = "Helvetica 16 bold")
textoLivre.grid(column=0, row=2,padx=5, pady=10)
textoOcupada = tk.Label(self, text="(Azul) Memória Ocupada", font= "Helvetica 16 bold")
textoOcupada.grid(column=2, row=2,padx=5, pady=10)
######################################
## Preenche a memória com o processo inserido
## Necessita do tamanho do processo, tamanho da memória e posição na memória
def preencheMemoria(posicaoProcesso, tamProcesso):
if(tamProcesso != 0):
# Calucula quantas linhas devem ser preenchidas
taxaDeLinhas = tamProcesso/tamMemoria
numeroDeLinhas = taxaDeLinhas * len(listaDeLinhas)
numeroDeLinhas = (int(numeroDeLinhas))+2
# Descobre quanto vale cada linha desenhada
pesoLinha = tamMemoria/len(listaDeLinhas)
# Descobre em qual lugar da memória deve começar a pintar as linhas
posicaoMemoria = posicaoProcesso/pesoLinha
posicaoMemoria = (int(posicaoMemoria))+2
#Pinta do ponto range("X",y) até o ponto range(x,"Y")
cor = geraCor(posicaoProcesso, tamProcesso)
for i in range(posicaoMemoria,(posicaoMemoria+numeroDeLinhas)):
canvas.itemconfig(i, fill=cor)
if(posicaoProcesso == 0):
for i in range(0,10):
canvas.itemconfig(i, fill=cor)
# print("Tamanho da Memoria: " + str(tamMemoria))
# print("Tamanho do processo: " + str(tamProcesso))
# print("Peso da Linha: " + str(pesoLinha))
# print("Posição do processo: " + str(posicaoProcesso))
# print("Numero de Linhas total: " + str(len(listaDeLinhas)))
# print("Posição na memória: " + str(posicaoMemoria))
# print("Numero de Linhas pra pintar: " + str(numeroDeLinhas)
def limpaMemoria():
for i in range(0,numeroDeLinhasMemoria):
canvas.itemconfig(i, fill='#a0a0a0')
##### Botoes de Controle ######
#Função Geral dos botoes
def pressionado(listaDeEstados, botao):
global momento
if(botao == "proximo"):
if (momento < len(listaDeEstados)-1):
momento += 1
if((botao == "anterior") and (momento != 0)):
if (momento > 0):
momento -= 1
if(botao == "inicio"):
momento = 0
# Recebe lista do momento atual, com [Clock,[posiçãoInicial, tamanhoProcesso],[pos2,tam2]]
estadoAtual = listaDeEstados[momento]
# Atribui cada valor à sua respectiva variável
## Recebe e "printa" o Clock atual
clock = estadoAtual[0]
textoClock = tk.Label(self, text="Clock "+str(clock),font = "Helvetica 16 bold")
textoClock.grid(column=2,row=0,padx=5)
i = 1
limpaMemoria()
while (i <= len(estadoAtual)-1):
posicaoProcesso = estadoAtual[i][0]
tamProcesso = estadoAtual[i][1]
# Preenche a memória com os dados informados
preencheMemoria(posicaoProcesso, tamProcesso)
i += 1
#Função Executada quando é pressionado o Botao "Proximo"
def pressionadoInicio(listaDeEstados):
botao = "proximo"
pressionado(listaDeEstados,botao)
def pressionadoProximo(listaDeEstados):
botao = "proximo"
pressionado(listaDeEstados,botao)
#Função Executada quando é pressionado o Botao "Anterior"
def pressionadoAnterior(listaDeEstados):
botao = "anterior"
pressionado(listaDeEstados,botao)
global listaDeLinhas
listaDeLinhas = criaLinhas()
#Cria lista com todos os momentos de entrada e saída de processos
global matrizGeral
matrizGeral = algoritmo.main(tamMemoria)
listaDeEstados = matrizGeral[modo-1][0]
listaDeEstados.insert(0,[0,[0,0]])
# Momento (clock atual)
global momento
momento = 0
# Declaração dos botões (função lambda necessária para passar parâmetros)
botaoInicio = tk.Button(self, text="Inicio",command=lambda: pressionado(listaDeEstados,"inicio"))
botaoProximo = tk.Button(self, text="Proximo",command=lambda: pressionado(listaDeEstados, "proximo"))
botaoAnterior = tk.Button(self, text="Anterior",command=lambda: pressionado(listaDeEstados, "anterior"))
botaoInicio.grid(column=1,row=3,pady=10)
botaoProximo.grid(column=2,row=3,pady=10)
botaoAnterior.grid(column=0,row=3,pady=10)
# Função do botao que chama o matplot
def pressionadoGrafico(PageThree):
# Opção 1: Chamar janela tkinter que recebe imagem do matplot
#master.switch_frame(PageThree)
# Opção 2: Chamar matplot direto
self.master.destroy()
aicSpider.main(matrizGeral)
botaoGraficos = tk.Button(self, text="Ir Para Gráficos",command=lambda: pressionadoGrafico(PageThree))
botaoGraficos.grid(column=1,row=4,pady=5)
#Posição do Canvas
canvas.grid(columnspan=3,row=1)
#Tentativa de fazer uma terceira janela TkInter
class PageThree(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
master.title("Gráfico de resultados")
### Deixa janela no centro
larguraDaJanela = 300
alturaDaJanela = 300
larguraDaTela = master.winfo_screenwidth()
alturaDaTela = master.winfo_screenheight()
Xjanela = (larguraDaTela/2) - (larguraDaJanela/2)
Yjanela = (alturaDaTela/2) - (alturaDaJanela/2)
master.geometry("%dx%d+%d+%d" % (larguraDaJanela, alturaDaJanela, Xjanela,Yjanela))
##########################
aicSpider.main()
#img = aicSpider.main()
label = tk.Label(self, text="This is page 2")
label.grid(pady=10)
button = tk.Button(self, text="Go to the start page",
command=lambda: controller.show_frame("StartPage"))
button.grid()
if __name__ == "__main__":
app = SampleApp()
app.mainloop()
# Menu superior
# menu = Menu(self)
# new_item = Menu(menu)
# new_item.add_command(label='New')
# new_item.add_separator()
# new_item.add_command(label='Edit')
# menu.add_cascade(label='File', menu=new_item)
# self.config(menu=menu)
|
nilq/baby-python
|
python
|
from django.conf import settings
from django.urls import include, path
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("login", views.login_view, name="login"),
path("logout", views.logout_view, name="logout"),
path("register", views.register, name="register"),
path("following", views.following, name="following"),
path("post-message", views.postmessage, name="postmessage"),
path("like/<int:id>", views.like, name="like"),
path("profile/<str:username>", views.profile, name="profile"),
path("follow/<int:id>", views.follow, name="follow"),
path("editpost/<int:id>", views.editpost, name="editpost")
]
|
nilq/baby-python
|
python
|
import argparse
import os
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
# setup
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--save-interval', type=int, default=10, metavar='N',
help='how many batches to wait before checkpointing')
parser.add_argument('--resume', action='store_true', default=False,
help='resume training from checkpoint')
args = parser.parse_args()
use_cuda = torch.cuda.is_available() and not args.no_cuda
device = torch.device('cuda' if use_cuda else 'cpu')
torch.manual_seed(args.seed)
if use_cuda:
torch.cuda.manual_seed(args.seed)
# data
data_path = os.path.join(os.path.expanduser('~'), '.torch', 'datasets', 'mnist')
train_data = datasets.MNIST(data_path, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
test_data = datasets.MNIST(data_path, train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
train_loader = DataLoader(train_data, batch_size=args.batch_size,
shuffle=True, num_workers=4, pin_memory=True)
test_loader = DataLoader(test_data, batch_size=args.batch_size,
num_workers=4, pin_memory=True)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
model = Net().to(device)
optimiser = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
if args.resume:
model.load_state_dict(torch.load('model.pth'))
optimiser.load_state_dict(torch.load('optimiser.pth'))
# training
model.train()
train_losses = []
for i, (data, target) in enumerate(train_loader):
data = data.to(device=device, non_blocking=True)
target = target.to(device=device, non_blocking=True)
optimiser.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
train_losses.append(loss.item())
optimiser.step()
if i % 10 == 0:
print(i, loss.item())
torch.save(model.state_dict(), 'model.pth')
torch.save(optimiser.state_dict(), 'optimiser.pth')
torch.save(train_losses, 'train_losses.pth')
# testing
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for data, target in test_loader:
data = data.to(device=device, non_blocking=True)
target = target.to(device=device, non_blocking=True)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.argmax(1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_data)
acc = correct / len(test_data)
print(acc, test_loss)
|
nilq/baby-python
|
python
|
"""Using a class as a decorator demo.
Count the number of times a function is called.
"""
class CallCount:
def __init__(self, function):
self.f = function
self.count = 0
def __call__(self, *args, **kwargs):
self.count = 1
return self.f(*args, **kwargs)
@property
def count(self):
return self._count
@count.setter
def count(self, value):
try:
self._count += value
except AttributeError:
self._count = 0
@CallCount
def hello(name):
print(f"Hello {name}")
if __name__ == "__main__":
hello("Ana")
hello("Annabelle")
hello("Miguel")
hello("Tony")
print(f"The function hello has been called {hello.count} times.")
|
nilq/baby-python
|
python
|
# Copyright (c) Open-MMLab. All rights reserved.
from mmcv.runner.hooks.hook import HOOKS, Hook
@HOOKS.register_module()
class AlternateTrainingHook(Hook):
# def before_train_iter(self, runner):
def before_train_epoch(self, runner):
runner.model.module.neck.epoch_num = runner._epoch
# if runner._iter < 2000:
# for param in runner.model.module.neck.parameters():
# param.requires_grad = False
# for param_0 in runner.model.module.neck.attention_block_0.parameters():
# param_0.requires_grad = True
# for param_1 in runner.model.module.neck.attention_block_1.parameters():
# param_1.requires_grad = True
# for param_2 in runner.model.module.neck.attention_block_2.parameters():
# param_2.requires_grad = True
# for param_3 in runner.model.module.neck.attention_block_3.parameters():
# param_3.requires_grad = True
# if runner._iter > 2000:
# for param in runner.model.module.neck.parameters():
# param.requires_grad = False
# for param_0 in runner.model.module.neck.attention_block_0.parameters():
# param_0.requires_grad = False
# for param_1 in runner.model.module.neck.attention_block_1.parameters():
# param_1.requires_grad = False
# for param_2 in runner.model.module.neck.attention_block_2.parameters():
# param_2.requires_grad = False
# for param_3 in runner.model.module.neck.attention_block_3.parameters():
# param_3.requires_grad = False
# runner.model.module.neck.attention_block_0.requires_grad = True
# runner.model.module.neck.attention_block_1.requires_grad = True
# runner.model.module.neck.attention_block_2.requires_grad = True
# runner.model.module.neck.attention_block_3.requires_grad = True
if runner._epoch % 2 == 0:
for param in runner.model.module.neck.parameters():
param.requires_grad = True
for param_0 in runner.model.module.neck.attention_block_0.parameters():
param_0.requires_grad = False
for param_1 in runner.model.module.neck.attention_block_1.parameters():
param_1.requires_grad = False
for param_2 in runner.model.module.neck.attention_block_2.parameters():
param_2.requires_grad = False
for param_3 in runner.model.module.neck.attention_block_3.parameters():
param_3.requires_grad = False
else:
for param in runner.model.module.neck.parameters():
param.requires_grad = False
for param_0 in runner.model.module.neck.attention_block_0.parameters():
param_0.requires_grad = True
for param_1 in runner.model.module.neck.attention_block_1.parameters():
param_1.requires_grad = True
for param_2 in runner.model.module.neck.attention_block_2.parameters():
param_2.requires_grad = True
for param_3 in runner.model.module.neck.attention_block_3.parameters():
param_3.requires_grad = True
# elif runner._iter == 100:
# for param in runner.model.module.neck.parameters():
# param.requires_grad = False
# runner.model.module.neck.attention_block_0.requires_grad = True
# runner.model.module.neck.attention_block_1.requires_grad = True
# runner.model.module.neck.attention_block_2.requires_grad = True
# runner.model.module.neck.attention_block_3.requires_grad = True
#
# elif runner._iter == 150:
# # if runner._epoch % 2 == 0:
# for param in runner.model.module.neck.parameters():
# param.requires_grad = True
# runner.model.module.neck.attention_block_0.requires_grad = False
# runner.model.module.neck.attention_block_1.requires_grad = False
# runner.model.module.neck.attention_block_2.requires_grad = False
# runner.model.module.neck.attention_block_3.requires_grad = False
#
# elif runner._iter == 200:
# for param in runner.model.module.neck.parameters():
# param.requires_grad = False
# runner.model.module.neck.attention_block_0.requires_grad = True
# runner.model.module.neck.attention_block_1.requires_grad = True
# runner.model.module.neck.attention_block_2.requires_grad = True
# runner.model.module.neck.attention_block_3.requires_grad = True
# elif runner._iter == 250:
# for param in runner.model.module.neck.parameters():
# param.requires_grad = True
# runner.model.module.neck.attention_block_0.requires_grad = False
# runner.model.module.neck.attention_block_1.requires_grad = False
# runner.model.module.neck.attention_block_2.requires_grad = False
# runner.model.module.neck.attention_block_3.requires_grad = False
|
nilq/baby-python
|
python
|
import os
import logging
from flask import Flask
from slack import WebClient
from slackeventsapi import SlackEventAdapter
# Initialize a Flask app to host the events adapter
app = Flask(__name__)
# Create an events adapter and register it to an endpoint in the slack app for event injestion.
slack_events_adapter = SlackEventAdapter(os.environ.get("SLACK_EVENTS_TOKEN"), "/slack/events", app)
# When a 'message' event is detected by the events adapter, forward that payload
# to this function.
@slack_events_adapter.on("message")
def message(payload):
"""Parse the message event, and if the activation string is in the text,
send a reply
"""
# Get the event data from the payload
event = payload.get("event", {})
# Get the text from the event that came through
text = event.get("text")
# Check and see if the activation phrase was in the text of the message.
if "hi" == text.lower():
# Since the activation phrase was met
# get the channel ID that the event was executed on
channel_id = event.get("channel")
# Execute the send message
return send_message(channel_id, "Hello!")
if "how are you" in text.lower():
channel_id = event.get("channel")
return send_message(channel_id, "Hello!")
def send_message(channel, text):
# Initialize a Web API client
slack_web_client = WebClient(token=os.environ.get("SLACK_TOKEN"))
# message payload
message = {
"channel": channel,
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": (text),
},
},
],
}
# Post the onboarding message in Slack
slack_web_client.chat_postMessage(**message)
if __name__ == "__main__":
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
# Run our app on our externally facing IP address on port 3000 instead of
# running it on localhost, which is traditional for development.
app.run(host='0.0.0.0', port=3000)
|
nilq/baby-python
|
python
|
import os
import sys
import glob
import argparse
import numpy as np
from PIL import Image
from Utility import *
from keras.applications.vgg16 import VGG16, preprocess_input
from keras.models import Model, load_model
from keras.layers import Dense, GlobalAveragePooling2D, BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import adam, sgd
from keras.callbacks import ModelCheckpoint
from keras.preprocessing import image
from sklearn.metrics import roc_curve, auc
FC_SIZE = 1024
NB_VGG_LAYERS_TO_FREEZE = 20
IM_WIDTH, IM_HEIGHT = 256, 256
def setup_to_transfer_learn(model, base_model):
for layer in base_model.layers:
layer.trainable = False
model.compile(optimizer=adam(), loss='binary_crossentropy', metrics=['accuracy'])
def add_new_last_layer(base_model, nb_classes):
x = base_model.output
x = BatchNormalization()(x)
x = GlobalAveragePooling2D()(x)
x = Dense(FC_SIZE, activation='relu', kernel_initializer='he_normal')(x)
predictions = Dense(nb_classes, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
return model
def setup_to_finetune(model):
for layer in model.layers[:NB_VGG_LAYERS_TO_FREEZE]:
layer.trainable = False
for layer in model.layers[NB_VGG_LAYERS_TO_FREEZE:]:
layer.trainable = True
model.compile(optimizer=sgd(lr=1e-5, momentum=0.9), loss='binary_crossentropy', metrics=['accuracy'])
def train(args):
nb_train_samples = get_nb_files(args.train_dir)
nb_classes = len(glob.glob(args.train_dir + "/*"))
nb_val_samples = get_nb_files(args.val_dir)
batch_size = int(args.batch_size)
# MARK :- prepare train data generator
train_datagen = ImageDataGenerator(
preprocessing_function=None,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.1,
horizontal_flip=True,
vertical_flip=True,
featurewise_center=args.featurewise_center
)
# MARK :- fit train data generator for featurewise_center
if args.featurewise_center:
train_x, _ = get_data(args.train_dir, tar_size=(IM_WIDTH, IM_HEIGHT, 3))
train_datagen.fit(train_x / 225)
# MARK :- prepare valid data generator
valid_datagen = ImageDataGenerator(
preprocessing_function=None,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.1,
horizontal_flip=True,
vertical_flip=True,
featurewise_center=args.featurewise_center
)
# MARK :- fit valid data generator for featurewise_center
if args.featurewise_center:
valid_x, _ = get_data(args.val_dir, tar_size=(IM_WIDTH, IM_HEIGHT, 3))
valid_datagen.fit(valid_x / 225)
# MARK :- prepare train and valid generators
train_generator = train_datagen.flow_from_directory(
args.train_dir,
target_size=(IM_WIDTH, IM_HEIGHT),
batch_size=batch_size,
class_mode='categorical'
)
validation_generator = valid_datagen.flow_from_directory(
args.val_dir,
target_size=(IM_WIDTH, IM_HEIGHT),
batch_size=batch_size,
class_mode='categorical'
)
# MARK :- prepare base model
base_model = VGG16(
weights='imagenet',
include_top=False, classes=2,
input_shape=(IM_WIDTH, IM_HEIGHT, 3)
)
# MARK :- setup model to transfer learning
model = add_new_last_layer(base_model, nb_classes)
setup_to_transfer_learn(model, base_model)
# MARK :- prepare VA and VL checkpoints for transfer learning
best_tl_va = ModelCheckpoint(
'Models/tl_va_' + args.output_model_file,
monitor='val_acc',
mode='max',
verbose=1,
save_best_only=True
)
best_tl_vl = ModelCheckpoint(
'Models/tl_vl_' + args.output_model_file,
monitor='val_loss',
mode='min',
verbose=1,
save_best_only=True
)
# MARK :- fit model with transfer learning
history_tl = model.fit_generator(
train_generator,
steps_per_epoch=int(round(nb_train_samples / batch_size)),
epochs=int(args.tl_epoch),
validation_data=validation_generator,
validation_steps=int(round(nb_val_samples / batch_size)),
class_weight='auto',
callbacks=[best_tl_va, best_tl_vl]
)
plot_training(history_tl, 'tl_history.png')
# MARK :- load best transfer learning model and setup it
model = load_model(filepath=args.tl_model)
setup_to_finetune(model)
# MARK :- prepare VA and VL checkpoints for fine tuning
best_ft_va = ModelCheckpoint(
'Models/ft_va_' + args.output_model_file,
monitor='val_acc',
mode='max',
verbose=1,
save_best_only=True
)
best_ft_vl = ModelCheckpoint(
'Models/ft_vl_' + args.output_model_file,
monitor='val_loss',
mode='min',
verbose=1,
save_best_only=True
)
# MARK :- fit model with fine tuning
history_ft = model.fit_generator(
train_generator,
steps_per_epoch=int(round(nb_train_samples / batch_size)),
epochs=int(args.ft_epoch),
validation_data=validation_generator,
validation_steps=int(round(nb_val_samples / batch_size)),
class_weight='auto',
callbacks=[best_ft_va, best_ft_vl]
)
plot_training(history_ft, 'ft_history.png')
if __name__ == "__main__":
a = argparse.ArgumentParser()
a.add_argument("--train_dir", default="Dataset/Train")
a.add_argument("--val_dir", default="Dataset/Validation")
a.add_argument("--test_dir", default="Dataset/Test")
a.add_argument("--tl_epoch", default=15)
a.add_argument("--ft_epoch", default=5)
a.add_argument("--batch_size", default=30)
a.add_argument("--output_model_file", default="vgg16.h5")
a.add_argument("--image", help="path to image")
a.add_argument("--ft_model", default="Models/ft_vl_vgg16.h5")
a.add_argument("--tl_model", default="Models/tl_vl_vgg16.h5")
a.add_argument("--featurewise_center", default=False)
a.add_argument("--plot_roc_auc", default=False)
args = a.parse_args()
if args.image is not None:
model = load_model(filepath=args.ft_model)
img = resize_image(Image.open(args.image), (IM_WIDTH, IM_HEIGHT))
x = np.expand_dims(image.img_to_array(img), axis=0)
# x = preprocess_input(x)
preds = model.predict(x)
plot_preds(Image.open(args.image), preds[0])
sys.exit(1)
if args.plot_roc_auc:
model = load_model(filepath=args.ft_model)
test_x, test_y = get_data(args.val_dir, tar_size=(IM_WIDTH, IM_HEIGHT, 3))
pred_test_y = model.predict(test_x).ravel()
fpr, tpr, thresholds = roc_curve(test_y.ravel(), pred_test_y)
auc_score = auc(fpr, tpr)
plot_auc_roc(tpr, fpr, auc_score, "auc_roc_ft_vgg16")
sys.exit(1)
if args.train_dir is None or args.val_dir is None:
a.print_help()
sys.exit(1)
if (not os.path.exists(args.train_dir)) or (not os.path.exists(args.val_dir)):
print("directories do not exist")
sys.exit(1)
train(args)
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
from .base import Model
from .base import DifferentiableModel
class ModelWrapper(Model):
"""Base class for models that wrap other models.
This base class can be used to implement model wrappers
that turn models into new models, for example by preprocessing
the input or modifying the gradient.
Parameters
----------
model : :class:`Model`
The model that is wrapped.
"""
def __init__(self, model):
super(ModelWrapper, self).__init__(
bounds=model.bounds(),
channel_axis=model.channel_axis())
self.wrapped_model = model
def __enter__(self):
assert self.wrapped_model.__enter__() == self.wrapped_model
return self
def __exit__(self, exc_type, exc_value, traceback):
return self.wrapped_model.__exit__(exc_type, exc_value, traceback)
def batch_predictions(self, images):
return self.wrapped_model.batch_predictions(images)
def predictions(self, image):
return self.wrapped_model.predictions(image)
def num_classes(self):
return self.wrapped_model.num_classes()
class DifferentiableModelWrapper(ModelWrapper):
"""Base class for models that wrap other models and provide
gradient methods.
This base class can be used to implement model wrappers
that turn models into new models, for example by preprocessing
the input or modifying the gradient.
Parameters
----------
model : :class:`Model`
The model that is wrapped.
"""
def predictions_and_gradient(self, image, label):
return self.wrapped_model.predictions_and_gradient(image, label)
def gradient(self, image, label):
return self.wrapped_model.gradient(image, label)
def backward(self, gradient, image):
return self.wrapped_model.backward(gradient, image)
class ModelWithoutGradients(ModelWrapper):
"""Turns a model into a model without gradients.
"""
pass
class ModelWithEstimatedGradients(DifferentiableModelWrapper):
"""Turns a model into a model with gradients estimated
by the given gradient estimator.
Parameters
----------
model : :class:`Model`
The model that is wrapped.
gradient_estimator : `callable`
Callable taking three arguments (pred_fn, image, label) and
returning the estimated gradients. pred_fn will be the
batch_predictions method of the wrapped model.
"""
def __init__(self, model, gradient_estimator):
super(ModelWithEstimatedGradients, self).__init__(
model=model)
assert callable(gradient_estimator)
self._gradient_estimator = gradient_estimator
def predictions_and_gradient(self, image, label):
predictions = self.predictions(image)
gradient = self.gradient(image, label)
return predictions, gradient
def gradient(self, image, label):
pred_fn = self.batch_predictions
bounds = self.bounds()
return self._gradient_estimator(pred_fn, image, label, bounds)
def backward(self, gradient, image):
raise NotImplementedError
class CompositeModel(DifferentiableModel):
"""Combines predictions of a (black-box) model with the gradient of a
(substitute) model.
Parameters
----------
forward_model : :class:`Model`
The model that should be fooled and will be used for predictions.
backward_model : :class:`Model`
The model that provides the gradients.
"""
def __init__(self, forward_model, backward_models, weights):
bounds = forward_model.bounds()
for backward_model in backward_models:
assert bounds == backward_model.bounds()
channel_axis = forward_model.channel_axis()
for backward_model in backward_models:
assert channel_axis == backward_model.channel_axis()
num_classes = forward_model.num_classes()
for backward_model in backward_models:
assert num_classes == backward_model.num_classes()
super(CompositeModel, self).__init__(
bounds=bounds,
channel_axis=channel_axis)
self.forward_model = forward_model
self.backward_models = backward_models
self._num_classes = num_classes
self.weights = weights
def num_classes(self):
return self._num_classes
def batch_predictions(self, images):
return self.forward_model.batch_predictions(images)
def predictions_and_gradient(self, image, label):
predictions = self.forward_model.predictions(image)
gradient = 0
for i in range(len(self.backward_models)):
gradient += self.weights[i] * self.backward_models[i].gradient(image, label)
# for backward_model in self.backward_models:
# gradient += backward_model.gradient(image, label)
# gradient /= len(self.backward_models)
return predictions, gradient
def gradient(self, image, label):
gradient = 0
for i in range(len(self.backward_models)):
gradient += self.weights[i] * self.backward_models[i].gradient(image, label)
# gradient /= len(self.backward_models)
return gradient
def __enter__(self):
assert self.forward_model.__enter__() == self.forward_model
for backward_model in self.backward_models:
assert backward_model.__enter__() == backward_model
return self
def __exit__(self, exc_type, exc_value, traceback):
r1 = self.forward_model.__exit__(exc_type, exc_value, traceback)
rb = []
for backward_model in self.backward_models:
rb.append(backward_model.__exit__(exc_type, exc_value, traceback))
bNone = True
for r in rb:
if r is not None:
bNone = False
break
if r1 is None and bNone:
return None
return (r1,) + tuple(rb) # pragma: no cover
|
nilq/baby-python
|
python
|
from jinja2 import Template
import bot_logger
import lang
import models
def help_user(msg):
user = models.User(msg.author.name)
if user.is_registered():
msg.reply(Template(lang.message_help + lang.message_footer).render(
username=msg.author.name, address=user.address))
else:
bot_logger.logger.info('user %s not registered (command : help) ' % msg.author.name)
msg.reply(Template(lang.message_need_register + lang.message_footer).render(username=msg.author.name))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import sys
import random
import itertools
import ast
def nbackseq(n, length, words):
"""Generate n-back balanced sequences
:param n: int
How many characters (including the current one) to look back
to assure no duplicates
:param length: int
The total length of the sequence to produce
:param words: list
A list of words to be used to generate sequences
NOTE: must input words parameter as a literal e.g., '[1, 2]' with the quotes!!
:return: list
A list of solutions where each solution is a list of words of length 'length'
"""
solutions = []
solution_attempts = []
while len(solution_attempts) < len(list(itertools.permutations(words, length))):
solution = random.sample(words, length)
if solution not in solution_attempts:
good = True
for index in range(len(solution)):
subseq = solution[index: index + n]
if len(set(subseq)) != n:
good = False
break
if good:
solutions.append(solution)
solution_attempts.append(solution)
return solutions
def test_nbackseq():
assert nbackseq(2, 1, [1, 2]) == []
assert nbackseq(1, 1, ['a']) == [['a']]
assert nbackseq(2, 2, [1, 2]) == [[1, 2], [2, 1]]
if __name__ == '__main__':
n = int(sys.argv[1])
length = int(sys.argv[2])
try:
words = ast.literal_eval(sys.argv[3])
except:
raise ValueError("'words' parameter needs to be a literal (e.g. '[1, 2]' with the quotes!")
solutions = nbackseq(n, length, words)
print(solutions)
|
nilq/baby-python
|
python
|
sir = "mere pere droguri mofturi CamIoane"
rime = {}
for i in range(len(sir)):
if (i == 0 or sir[i - 1] == " "):
k = i
if (sir[i] == " "):
if (rime.get(sir[i - 2:i]) == None):
rime[sir[i - 2:i]] = [sir[k:i]]
else:
rime[sir[i - 2:i]].append(sir[k:i])
if (i == len(sir) - 1):
if (rime.get(sir[i - 1:i + 1]) == None):
rime[sir[i - 1:i + 1]] = [sir[k:i + 1]]
else:
rime[sir[i - 1:i + 1]].append(sir[k:i + 1])
final = {}
print(rime)
for j, v in rime.items():
if (len(rime[j]) >= 2):
final[j] = v
print(final)
|
nilq/baby-python
|
python
|
import os
import sys
import cherrypy
import ConfigParser
import urllib
import urllib2
import simplejson as json
import webtools
import time
import datetime
import random
import pprint
from pyechonest import song as song_api, config
config.TRACE_API_CALLS=True
config.ECHO_NEST_API_KEY='EHY4JJEGIOFA1RCJP'
import collections
import hashlib
catalog='paulify'
rcatalog='id:' + catalog
class Server(object):
def __init__(self, config):
self.production_mode = config.getboolean('settings', 'production')
self.cache_dir = '/lab/mir/data/cache'
self.total = 0;
self.cached = 0;
def search(self, q='', special='', sid='', artist='', title='', callback='', _=''):
if callback:
cherrypy.response.headers['Content-Type']= 'text/javascript'
else:
cherrypy.response.headers['Content-Type']= 'application/json'
print 'total', self.total, 'cached', self.cached, q, callback
response = {}
if len(special) > 0:
results = self.read_from_cache(special)
if results:
results = callback + "(" + results + ")"
return results
else:
response['status'] = 'not_found'
return to_json(response, callback)
elif len(sid) > 0:
result = song_api.Song(sid, buckets=[rcatalog, 'tracks', 'audio_summary'], limit=True, results=1)
results = [result]
elif len(artist) > 0:
results = song_api.search(artist=artist, title=title,\
buckets=[rcatalog, 'tracks', 'audio_summary'], limit=True, results=1)
else:
results = song_api.search(combined=q, \
buckets=[rcatalog, 'tracks', 'audio_summary'], limit=True, results=1)
if len(results) > 0:
track = results[0].get_tracks(catalog)[0]
id = track['id']
results = self.read_from_cache(id)
if results:
print 'cache hit'
else:
print 'cache miss'
response['status'] = 'ok'
t = self.get_track(id)
response['track'] = t
results = to_json(response, None)
self.write_to_cache(id, results)
results = callback + "(" + results + ")"
return results
else:
response['status'] = 'not_found'
return to_json(response, callback)
search.exposed = True
def get_track(self, id):
track = {}
rtrack = fetch_track(id)
pprint.pprint(rtrack)
track['id'] = rtrack['id']
track['artist'] = rtrack['artist']
track['title'] = rtrack['title']
track['audio'] = rtrack['audio']
track['summary'] = rtrack['audio_summary']
track['analysis'] = self.get_analysis(rtrack)
return track
def get_analysis(self, rtrack):
f = urllib.urlopen(rtrack['audio_summary']['analysis_url'])
js = f.read()
f.close()
return json.loads(js)
def read_from_cache(self, id):
full_path = os.path.join(self.cache_dir, id)
if os.path.exists(full_path):
with open(full_path) as f:
return f.read()
else:
return None;
def write_to_cache(self, id, results):
full_path = os.path.join(self.cache_dir, id)
with open(full_path, 'w') as f:
f.write(results)
def fetch_track(trid):
url = 'http://developer.echonest.com/api/v4/track/profile?api_key=N6E4NIOVYMTHNDM8J&format=json&bucket=audio_summary&id=' + trid
f = urllib.urlopen(url)
js = f.read()
print 'json', js
f.close()
response = json.loads(js)
return response['response']['track']
def to_json(dict, callback=None):
results = json.dumps(dict, sort_keys=True, indent = 4)
if callback:
results = callback + "(" + results + ")"
return results
if __name__ == '__main__':
urllib2.install_opener(urllib2.build_opener())
conf_path = os.path.abspath('web.conf')
print 'reading config from', conf_path
cherrypy.config.update(conf_path)
config = ConfigParser.ConfigParser()
config.read(conf_path)
production_mode = config.getboolean('settings', 'production')
current_dir = os.path.dirname(os.path.abspath(__file__))
# Set up site-wide config first so we get a log if errors occur.
if production_mode:
print "Starting in production mode"
cherrypy.config.update({'environment': 'production',
'log.error_file': 'simdemo.log',
'log.screen': True})
else:
print "Starting in development mode"
cherrypy.config.update({'noenvironment': 'production',
'log.error_file': 'site.log',
'log.screen': True})
conf = webtools.get_export_map_for_directory("static")
cherrypy.quickstart(Server(config), '/SongServer', config=conf)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ppretredit.ui'
#
# Created: Mon Jan 11 21:22:20 2010
# by: PyQt4 UI code generator 4.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_TrainerEditDlg(object):
def setupUi(self, TrainerEditDlg):
TrainerEditDlg.setObjectName("TrainerEditDlg")
TrainerEditDlg.resize(799, 603)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("PPRE.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
TrainerEditDlg.setWindowIcon(icon)
self.chooseTrainer = QtGui.QComboBox(TrainerEditDlg)
self.chooseTrainer.setGeometry(QtCore.QRect(150, 5, 181, 27))
self.chooseTrainer.setObjectName("chooseTrainer")
self.curtrLabel = QtGui.QLabel(TrainerEditDlg)
self.curtrLabel.setGeometry(QtCore.QRect(20, 5, 121, 20))
self.curtrLabel.setObjectName("curtrLabel")
self.maintab = QtGui.QTabWidget(TrainerEditDlg)
self.maintab.setGeometry(QtCore.QRect(30, 55, 731, 541))
self.maintab.setTabPosition(QtGui.QTabWidget.North)
self.maintab.setObjectName("maintab")
self.tab0 = QtGui.QWidget()
self.tab0.setObjectName("tab0")
self.gridLayoutWidget = QtGui.QWidget(self.tab0)
self.gridLayoutWidget.setGeometry(QtCore.QRect(5, 5, 711, 492))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout = QtGui.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setObjectName("gridLayout")
self.label = QtGui.QLabel(self.gridLayoutWidget)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.label_3 = QtGui.QLabel(self.gridLayoutWidget)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 1, 0, 1, 1)
self.label_4 = QtGui.QLabel(self.gridLayoutWidget)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 3, 0, 1, 1)
self.class_2 = QtGui.QComboBox(self.gridLayoutWidget)
self.class_2.setObjectName("class_2")
self.gridLayout.addWidget(self.class_2, 3, 1, 1, 1)
self.label_5 = QtGui.QLabel(self.gridLayoutWidget)
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 2, 0, 1, 1)
self.label_6 = QtGui.QLabel(self.gridLayoutWidget)
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 4, 0, 1, 1)
self.label_7 = QtGui.QLabel(self.gridLayoutWidget)
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 5, 0, 1, 1)
self.label_8 = QtGui.QLabel(self.gridLayoutWidget)
self.label_8.setObjectName("label_8")
self.gridLayout.addWidget(self.label_8, 6, 0, 1, 1)
self.label_9 = QtGui.QLabel(self.gridLayoutWidget)
self.label_9.setObjectName("label_9")
self.gridLayout.addWidget(self.label_9, 7, 0, 1, 1)
self.label_10 = QtGui.QLabel(self.gridLayoutWidget)
self.label_10.setObjectName("label_10")
self.gridLayout.addWidget(self.label_10, 8, 0, 1, 1)
self.gridLayout_2 = QtGui.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
self.item1 = QtGui.QComboBox(self.gridLayoutWidget)
self.item1.setObjectName("item1")
self.gridLayout_2.addWidget(self.item1, 0, 0, 1, 1)
self.item3 = QtGui.QComboBox(self.gridLayoutWidget)
self.item3.setObjectName("item3")
self.gridLayout_2.addWidget(self.item3, 1, 0, 1, 1)
self.item2 = QtGui.QComboBox(self.gridLayoutWidget)
self.item2.setObjectName("item2")
self.gridLayout_2.addWidget(self.item2, 0, 1, 1, 1)
self.item4 = QtGui.QComboBox(self.gridLayoutWidget)
self.item4.setObjectName("item4")
self.gridLayout_2.addWidget(self.item4, 1, 1, 1, 1)
self.gridLayout.addLayout(self.gridLayout_2, 4, 1, 1, 1)
self.label_11 = QtGui.QLabel(self.gridLayoutWidget)
self.label_11.setObjectName("label_11")
self.gridLayout.addWidget(self.label_11, 9, 0, 1, 1)
self.doubleBool = QtGui.QCheckBox(self.gridLayoutWidget)
self.doubleBool.setObjectName("doubleBool")
self.gridLayout.addWidget(self.doubleBool, 9, 1, 1, 1)
self.label_12 = QtGui.QLabel(self.gridLayoutWidget)
self.label_12.setObjectName("label_12")
self.gridLayout.addWidget(self.label_12, 10, 0, 1, 1)
self.label_13 = QtGui.QLabel(self.gridLayoutWidget)
self.label_13.setObjectName("label_13")
self.gridLayout.addWidget(self.label_13, 11, 0, 1, 1)
self.label_14 = QtGui.QLabel(self.gridLayoutWidget)
self.label_14.setObjectName("label_14")
self.gridLayout.addWidget(self.label_14, 12, 0, 1, 1)
self.gridLayout_4 = QtGui.QGridLayout()
self.gridLayout_4.setObjectName("gridLayout_4")
self.label_19 = QtGui.QLabel(self.gridLayoutWidget)
self.label_19.setObjectName("label_19")
self.gridLayout_4.addWidget(self.label_19, 0, 0, 1, 1)
self.label_20 = QtGui.QLabel(self.gridLayoutWidget)
self.label_20.setObjectName("label_20")
self.gridLayout_4.addWidget(self.label_20, 1, 0, 1, 1)
self.itemBool = QtGui.QCheckBox(self.gridLayoutWidget)
self.itemBool.setObjectName("itemBool")
self.gridLayout_4.addWidget(self.itemBool, 0, 1, 1, 1)
self.attackBool = QtGui.QCheckBox(self.gridLayoutWidget)
self.attackBool.setObjectName("attackBool")
self.gridLayout_4.addWidget(self.attackBool, 1, 1, 1, 1)
self.gridLayout.addLayout(self.gridLayout_4, 2, 1, 1, 1)
self.pokenum = QtGui.QSpinBox(self.gridLayoutWidget)
self.pokenum.setMaximum(6)
self.pokenum.setObjectName("pokenum")
self.gridLayout.addWidget(self.pokenum, 1, 1, 1, 1)
self.uc = QtGui.QSpinBox(self.gridLayoutWidget)
self.uc.setObjectName("uc")
self.gridLayout.addWidget(self.uc, 5, 1, 1, 1)
self.ud = QtGui.QSpinBox(self.gridLayoutWidget)
self.ud.setObjectName("ud")
self.gridLayout.addWidget(self.ud, 6, 1, 1, 1)
self.ue = QtGui.QSpinBox(self.gridLayoutWidget)
self.ue.setObjectName("ue")
self.gridLayout.addWidget(self.ue, 7, 1, 1, 1)
self.uf = QtGui.QSpinBox(self.gridLayoutWidget)
self.uf.setObjectName("uf")
self.gridLayout.addWidget(self.uf, 8, 1, 1, 1)
self.u11 = QtGui.QSpinBox(self.gridLayoutWidget)
self.u11.setObjectName("u11")
self.gridLayout.addWidget(self.u11, 10, 1, 1, 1)
self.u12 = QtGui.QSpinBox(self.gridLayoutWidget)
self.u12.setObjectName("u12")
self.gridLayout.addWidget(self.u12, 11, 1, 1, 1)
self.u13 = QtGui.QSpinBox(self.gridLayoutWidget)
self.u13.setObjectName("u13")
self.gridLayout.addWidget(self.u13, 12, 1, 1, 1)
self.trname = QtGui.QLineEdit(self.gridLayoutWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.trname.sizePolicy().hasHeightForWidth())
self.trname.setSizePolicy(sizePolicy)
self.trname.setObjectName("trname")
self.gridLayout.addWidget(self.trname, 0, 1, 1, 1)
self.maintab.addTab(self.tab0, "")
self.tab = QtGui.QWidget()
self.tab.setObjectName("tab")
self.textEdit = QtGui.QTextEdit(self.tab)
self.textEdit.setGeometry(QtCore.QRect(50, 50, 600, 75))
self.textEdit.setObjectName("textEdit")
self.textEdit_2 = QtGui.QTextEdit(self.tab)
self.textEdit_2.setGeometry(QtCore.QRect(50, 175, 600, 75))
self.textEdit_2.setObjectName("textEdit_2")
self.textEdit_3 = QtGui.QTextEdit(self.tab)
self.textEdit_3.setGeometry(QtCore.QRect(50, 300, 600, 75))
self.textEdit_3.setObjectName("textEdit_3")
self.label_2 = QtGui.QLabel(self.tab)
self.label_2.setGeometry(QtCore.QRect(55, 20, 166, 18))
self.label_2.setObjectName("label_2")
self.label_21 = QtGui.QLabel(self.tab)
self.label_21.setGeometry(QtCore.QRect(55, 145, 211, 18))
self.label_21.setObjectName("label_21")
self.label_22 = QtGui.QLabel(self.tab)
self.label_22.setGeometry(QtCore.QRect(55, 270, 231, 18))
self.label_22.setObjectName("label_22")
self.maintab.addTab(self.tab, "")
self.tab1 = QtGui.QWidget()
self.tab1.setObjectName("tab1")
self.gridLayoutWidget_3 = QtGui.QWidget(self.tab1)
self.gridLayoutWidget_3.setGeometry(QtCore.QRect(50, 50, 536, 236))
self.gridLayoutWidget_3.setObjectName("gridLayoutWidget_3")
self.gridLayout_3 = QtGui.QGridLayout(self.gridLayoutWidget_3)
self.gridLayout_3.setObjectName("gridLayout_3")
self.spec1 = QtGui.QComboBox(self.gridLayoutWidget_3)
self.spec1.setObjectName("spec1")
self.gridLayout_3.addWidget(self.spec1, 0, 1, 1, 1)
self.label_15 = QtGui.QLabel(self.gridLayoutWidget_3)
self.label_15.setObjectName("label_15")
self.gridLayout_3.addWidget(self.label_15, 0, 0, 1, 1)
self.label_16 = QtGui.QLabel(self.gridLayoutWidget_3)
self.label_16.setObjectName("label_16")
self.gridLayout_3.addWidget(self.label_16, 1, 0, 1, 1)
self.pokelvl1 = QtGui.QSpinBox(self.gridLayoutWidget_3)
self.pokelvl1.setObjectName("pokelvl1")
self.gridLayout_3.addWidget(self.pokelvl1, 1, 1, 1, 1)
self.label_17 = QtGui.QLabel(self.gridLayoutWidget_3)
self.label_17.setObjectName("label_17")
self.gridLayout_3.addWidget(self.label_17, 2, 0, 1, 1)
self.pokeItem1 = QtGui.QComboBox(self.gridLayoutWidget_3)
self.pokeItem1.setObjectName("pokeItem1")
self.gridLayout_3.addWidget(self.pokeItem1, 2, 1, 1, 1)
self.label_18 = QtGui.QLabel(self.gridLayoutWidget_3)
self.label_18.setObjectName("label_18")
self.gridLayout_3.addWidget(self.label_18, 3, 0, 1, 1)
self.gridLayout_5 = QtGui.QGridLayout()
self.gridLayout_5.setObjectName("gridLayout_5")
self.attack1_1 = QtGui.QComboBox(self.gridLayoutWidget_3)
self.attack1_1.setObjectName("attack1_1")
self.gridLayout_5.addWidget(self.attack1_1, 0, 0, 1, 1)
self.attack1_2 = QtGui.QComboBox(self.gridLayoutWidget_3)
self.attack1_2.setObjectName("attack1_2")
self.gridLayout_5.addWidget(self.attack1_2, 0, 1, 1, 1)
self.attack1_3 = QtGui.QComboBox(self.gridLayoutWidget_3)
self.attack1_3.setObjectName("attack1_3")
self.gridLayout_5.addWidget(self.attack1_3, 1, 0, 1, 1)
self.attack1_4 = QtGui.QComboBox(self.gridLayoutWidget_3)
self.attack1_4.setObjectName("attack1_4")
self.gridLayout_5.addWidget(self.attack1_4, 1, 1, 1, 1)
self.gridLayout_3.addLayout(self.gridLayout_5, 3, 1, 1, 1)
self.maintab.addTab(self.tab1, "")
self.saveButton = QtGui.QPushButton(TrainerEditDlg)
self.saveButton.setGeometry(QtCore.QRect(410, 5, 126, 28))
self.saveButton.setObjectName("saveButton")
self.retranslateUi(TrainerEditDlg)
self.maintab.setCurrentIndex(0)
QtCore.QObject.connect(self.saveButton, QtCore.SIGNAL("pressed()"), TrainerEditDlg.saveTrainer)
QtCore.QMetaObject.connectSlotsByName(TrainerEditDlg)
def retranslateUi(self, TrainerEditDlg):
TrainerEditDlg.setWindowTitle(QtGui.QApplication.translate("TrainerEditDlg", "PPRE: Pokemon Edit", None, QtGui.QApplication.UnicodeUTF8))
self.curtrLabel.setText(QtGui.QApplication.translate("TrainerEditDlg", "Current Trainer", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("TrainerEditDlg", "Trainer Name", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("TrainerEditDlg", "Number of Pokemon", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("TrainerEditDlg", "Trainer Class", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("TrainerEditDlg", "Trainer Type", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("TrainerEditDlg", "Items", None, QtGui.QApplication.UnicodeUTF8))
self.label_7.setText(QtGui.QApplication.translate("TrainerEditDlg", "0Ch", None, QtGui.QApplication.UnicodeUTF8))
self.label_8.setText(QtGui.QApplication.translate("TrainerEditDlg", "0Dh", None, QtGui.QApplication.UnicodeUTF8))
self.label_9.setText(QtGui.QApplication.translate("TrainerEditDlg", "0Eh", None, QtGui.QApplication.UnicodeUTF8))
self.label_10.setText(QtGui.QApplication.translate("TrainerEditDlg", "0Fh", None, QtGui.QApplication.UnicodeUTF8))
self.label_11.setText(QtGui.QApplication.translate("TrainerEditDlg", "Double Battle", None, QtGui.QApplication.UnicodeUTF8))
self.doubleBool.setText(QtGui.QApplication.translate("TrainerEditDlg", "True", None, QtGui.QApplication.UnicodeUTF8))
self.label_12.setText(QtGui.QApplication.translate("TrainerEditDlg", "11h", None, QtGui.QApplication.UnicodeUTF8))
self.label_13.setText(QtGui.QApplication.translate("TrainerEditDlg", "12h", None, QtGui.QApplication.UnicodeUTF8))
self.label_14.setText(QtGui.QApplication.translate("TrainerEditDlg", "13h", None, QtGui.QApplication.UnicodeUTF8))
self.label_19.setText(QtGui.QApplication.translate("TrainerEditDlg", "Pokemon have items", None, QtGui.QApplication.UnicodeUTF8))
self.label_20.setText(QtGui.QApplication.translate("TrainerEditDlg", "Pokemon have different attacks", None, QtGui.QApplication.UnicodeUTF8))
self.itemBool.setText(QtGui.QApplication.translate("TrainerEditDlg", "True", None, QtGui.QApplication.UnicodeUTF8))
self.attackBool.setText(QtGui.QApplication.translate("TrainerEditDlg", "True", None, QtGui.QApplication.UnicodeUTF8))
self.maintab.setTabText(self.maintab.indexOf(self.tab0), QtGui.QApplication.translate("TrainerEditDlg", "Main", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("TrainerEditDlg", "Introduction Text", None, QtGui.QApplication.UnicodeUTF8))
self.label_21.setText(QtGui.QApplication.translate("TrainerEditDlg", "Defeated Text", None, QtGui.QApplication.UnicodeUTF8))
self.label_22.setText(QtGui.QApplication.translate("TrainerEditDlg", "Won Text", None, QtGui.QApplication.UnicodeUTF8))
self.maintab.setTabText(self.maintab.indexOf(self.tab), QtGui.QApplication.translate("TrainerEditDlg", "Quotes", None, QtGui.QApplication.UnicodeUTF8))
self.label_15.setText(QtGui.QApplication.translate("TrainerEditDlg", "Pokemon", None, QtGui.QApplication.UnicodeUTF8))
self.label_16.setText(QtGui.QApplication.translate("TrainerEditDlg", "Level", None, QtGui.QApplication.UnicodeUTF8))
self.label_17.setText(QtGui.QApplication.translate("TrainerEditDlg", "Item", None, QtGui.QApplication.UnicodeUTF8))
self.label_18.setText(QtGui.QApplication.translate("TrainerEditDlg", "Extra Attacks", None, QtGui.QApplication.UnicodeUTF8))
self.maintab.setTabText(self.maintab.indexOf(self.tab1), QtGui.QApplication.translate("TrainerEditDlg", "Data", None, QtGui.QApplication.UnicodeUTF8))
self.saveButton.setText(QtGui.QApplication.translate("TrainerEditDlg", "Save", None, QtGui.QApplication.UnicodeUTF8))
|
nilq/baby-python
|
python
|
import datetime as dt
import smtplib
import random
import pandas
import os
PLACEHOLDER = "[NAME]"
MY_EMAIL = "my_email@gmail.com"
MY_PASSWORD = "my_password"
LETTER_TO_SEND = ""
now = dt.datetime.now()
is_day = now.day
is_month = now.month
data = pandas.read_csv("./birthdays.csv")
birthdays = data.to_dict(orient="records")
is_birthday = [person for person in birthdays if person['month'] == is_month and person['day'] == is_day]
name = is_birthday[0]["name"]
email = is_birthday[0]["email"]
if is_birthday:
file = random.choice(os.listdir("./letter_templates/"))
with open(f"./letter_templates/{file}") as letter:
new_letter = letter.read()
LETTER_TO_SEND = new_letter.replace(PLACEHOLDER, name)
with smtplib.SMTP("smtp.gmail.com") as connection:
connection.starttls()
connection.login(user=MY_EMAIL, password=MY_PASSWORD)
connection.sendmail(
from_addr=MY_EMAIL,
to_addrs=email,
msg=f"Subject:Happy Birthday {name}\n\n{LETTER_TO_SEND}"
)
|
nilq/baby-python
|
python
|
# coding=utf-8
import json
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
authenticator = IAMAuthenticator('your_api_key')
language_translator = LanguageTranslatorV3(
version='2018-05-01',
authenticator=authenticator)
language_translator.set_service_url('https://gateway.watsonplatform.net/language-translator/api')
## Translate
translation = language_translator.translate(
text='Hello', model_id='en-es').get_result()
print(json.dumps(translation, indent=2, ensure_ascii=False))
# List identifiable languages
# languages = language_translator.list_identifiable_languages().get_result()
# print(json.dumps(languages, indent=2))
# # Identify
# language = language_translator.identify(
# 'Language translator translates text from one language to another').get_result()
# print(json.dumps(language, indent=2))
# # List models
# models = language_translator.list_models(
# source='en').get_result()
# print(json.dumps(models, indent=2))
# # Create model
# with open('glossary.tmx', 'rb') as glossary:
# response = language_translator.create_model(
# base_model_id='en-es',
# name='custom-english-to-spanish',
# forced_glossary=glossary).get_result()
# print(json.dumps(response, indent=2))
# # Delete model
# response = language_translator.delete_model(model_id='<YOUR MODEL ID>').get_result()
# print(json.dumps(response, indent=2))
# # Get model details
# model = language_translator.get_model(model_id='<YOUR MODEL ID>').get_result()
# print(json.dumps(model, indent=2))
#### Document Translation ####
# List Documents
result = language_translator.list_documents().get_result()
print(json.dumps(result, indent=2))
# Translate Document
with open('en.pdf', 'rb') as file:
result = language_translator.translate_document(
file=file,
file_content_type='application/pdf',
filename='en.pdf',
model_id='en-fr').get_result()
print(json.dumps(result, indent=2))
# Document Status
result = language_translator.get_document_status(
document_id='{document id}').get_result()
print(json.dumps(result, indent=2))
# Translated Document
with open('translated.pdf', 'wb') as f:
result = language_translator.get_translated_document(
document_id='{document id}',
accept='application/pdf').get_result()
f.write(result.content)
# Delete Document
language_translator.delete_document(document_id='{document id}')
|
nilq/baby-python
|
python
|
# TODO: support axis=k to create multiple factors for each row/col
# TODO: knapsack (how to pass costs? must check broadcast/shape)
class Logic(object):
def __init__(self, variables):
self._variables = variables
# TODO: deal with negated
def _construct(self, fg, variables):
return [fg.create_factor_logic(self.factor_type, variables)], []
class Xor(Logic):
factor_type = "XOR"
class Or(Logic):
factor_type = "OR"
class AtMostOne(Logic):
factor_type = "ATMOSTONE"
class Imply(Logic):
factor_type = "IMPLY"
class XorOut(Logic):
factor_type = "XOROUT"
class OrOut(Logic):
factor_type = "OROUT"
class AndOut(Logic):
factor_type = "ANDOUT"
class Budget(object):
def __init__(self, variables, budget):
self._variables = variables
self.budget = budget
# TODO: deal with negated
def _construct(self, fg, pvars):
return [fg.create_factor_budget(pvars, self.budget)], []
class Pair(object):
# TODO: possible to have it be faster?
def __init__(self, vars_i, vars_j, additionals):
self._variables = vars_i, vars_j
self._additionals = additionals
def _construct(self, fg, pvars):
vars_i, vars_j = pvars
n = len(vars_i)
adds = self._additionals
factors = [
fg.create_factor_pair([
vars_i[k],
vars_j[k]],
adds[k])
for k in range(n)
]
add_tensors = [adds[k] for k in range(n)]
return factors, add_tensors
|
nilq/baby-python
|
python
|
import sys
import os
import numpy as np
import shutil
from common import PostProcess, update_metrics_in_report_json
from common import read_limits, check_limits_and_add_to_report_json
#from common import VirtualVehicleMakeMetrics as VVM
def main():
print "in main....."
sampleRate = 0.10
startAnalysisTime = 50
f = open('../rawdata.csv', 'w')
mat_file_name = sys.argv[1]
print "Mat file name is "+mat_file_name
if not os.path.exists(mat_file_name):
print "Given result file does not exist: ",mat_file_name
raise IOError('Given result file does not exist: {0}'.format(sys.argv[1]))
else:
print "line 10....",os.getcwd()
dstpath = os.path.join(os.getcwd(), 'matfiles')
print "dstPath is ",dstpath
if not os.path.isdir(dstpath):
os.makedirs(dstpath)
numFiles = len(os.listdir(dstpath))
dstname = '_' + str(numFiles) + mat_file_name
print "dstname ",dstname
#shutil.copyfile(mat_file_name, os.path.join(dstpath, dstname))
print "line30"
print "Line 24: Opened "+mat_file_name
## First limit part
#limit_dict, filter = read_limits()
print "done limits"
filter = []
## End of first limit part
## Post processing part
#--------accelerations-----------------------------------------------
#---------------------------------------------------------------------------------
# Processing
#---------------------------------------------------------------------------------
# loads results with the filtered out variables (and 'time' which is default)
filter = []
pp = PostProcess(mat_file_name, filter)
vars_available = pp.get_names()
dumpList = []
print vars_available[1]
for vv in vars_available:
if vv.find("current_") != -1:
print "add to dumpList: "+vv
dumpList.append(vv)
if vv.find("voltage_") != -1:
print "add to dumpList: "+vv
dumpList.append(vv)
if vv.find("angle_") != -1:
print "add to dumpList: "+vv
dumpList.append(vv)
if vv.find("BaseTemp") != -1:
print "add to dumpList: "+vv
dumpList.append(vv)
if vv.find("GyroTemp") != -1:
print "add to dumpList: "+vv
dumpList.append(vv)
if vv.find("BattTemp") != -1:
print "add to dumpList: "+vv
dumpList.append(vv)
pp.print_time()
print "Last time is "+str(pp.get_max_time())
sampData = []
for vv in dumpList:
ndat = pp.resample_data(vv,sampleRate)
print "got ndat size=",len(ndat)
sampData.append(ndat)
print 'sampdata=',len(sampData),'cols',len(sampData[0]),'rows'
i = 0
print "dumping raw data headers"
for c,vv in enumerate(dumpList):
print vv,c
f.write( vv+',')
f.write( "\n")
print "dump data"
print len(sampData),'cols',len(sampData[0])
while i < len(sampData[0]):
if i % 1000 == 0:
print "line ",i
for c,vv in enumerate(dumpList):
f.write(str(sampData[c][i])+',')
f.write( "\n")
i = i + 1
f.close()
actAngleIdx = -1
setAngleIdx = -1
voltBusIdx = -1
currGyroIdx = -1
gyroTempIdx = -1
baseTempIdx = -1
for c,vv in enumerate(dumpList):
if vv.find("angle_set") != -1:
setAngleIdx = c
if vv.find("angle_act") != -1:
actAngleIdx = c
if vv.find("voltage_bus") != -1:
voltBusIdx = c
if vv.find("current_gyro") != -1:
currGyroIdx = c
print "gyro idx ",currGyroIdx
if vv.find("GyroTemp") != -1:
gyroTempIdx = c
if vv.find("BaseTemp") != -1:
baseTempIdx = c
maxErr = 0
sumErr = 0
avgErr = 0
maxBusV = -1
minBusV = 100
minBattCap = 100
maxGyroCurr = 0;
maxTemp = 0
maxGyroTemp = 0
if actAngleIdx != -1 and setAngleIdx != -1:
i = int(startAnalysisTime/sampleRate)
first = i
print "scanning angles from ",i," to " ,len(sampData[setAngleIdx])
while i < len(sampData[setAngleIdx]):
angErr = abs(sampData[setAngleIdx][i] - sampData[actAngleIdx][i])
if angErr > maxErr:
maxErr = angErr
sumErr = sumErr + angErr
i = i + 1
avgErr = sumErr / (i - first + 1)
if voltBusIdx != -1:
i = int(startAnalysisTime/sampleRate)
while i < len(sampData[voltBusIdx]):
vts = abs(sampData[voltBusIdx][i])
if vts > maxBusV:
maxBusV = vts
if vts < minBusV:
minBusV = vts
i = i + 1
if currGyroIdx != -1:
i = int(startAnalysisTime/sampleRate)
print "scanning Gyro currents from ",i," to " ,len(sampData[currGyroIdx])
while i < len(sampData[currGyroIdx]):
vts = abs(sampData[currGyroIdx][i])
if vts > maxGyroCurr:
maxGyroCurr = vts
print vts
i = i + 1
if baseTempIdx != -1:
i = int(startAnalysisTime/sampleRate)
while i < len(sampData[baseTempIdx]):
vts = abs(sampData[baseTempIdx][i])
if vts > maxTemp:
maxTemp = vts
i = i + 1
if gyroTempIdx != -1:
i = int(startAnalysisTime/sampleRate)
while i < len(sampData[gyroTempIdx]):
vts = abs(sampData[gyroTempIdx][i])
if vts > maxGyroTemp:
maxGyroTemp = vts
i = i + 1
output_dir = "../"
json_filename = os.path.join(output_dir, 'testbench_manifest.json')
import json
json_data = {}
if os.path.isfile(json_filename):
with open(json_filename, "r") as json_file:
print "reading json"
json_data = json.load(json_file)
print "json_data is....."
print json_data
for metric in json_data['Metrics']:
if metric["Name"] == "angleMaxError":
metric["Value"] = str(maxErr)
print 'angleMaxError '+str(maxErr)
if metric["Name"] == "angleAvgError":
metric["Value"] = str(avgErr)
if metric["Name"] == "minBusVoltage":
metric["Value"] = str(minBusV)
if metric["Name"] == "maxBusVoltage":
metric["Value"] = str(maxBusV)
if metric["Name"] == "minBattCapacity":
metric["Value"] = str(minBattCap)
if metric["Name"] == "maxGyroCurrent":
metric["Value"] = str(maxGyroCurr)
if metric["Name"] == "maxTemp":
metric["Value"] = str(maxTemp)
if metric["Name"] == "maxGyroTemp":
metric["Value"] = str(maxGyroTemp)
print "dumping to ",json_filename
print json_data
with open(json_filename, "w") as json_file:
json.dump(json_data, json_file, indent=4)
# #---------------------------------------------------------------------------------
# # Potential_Design
# #---------------------------------------------------------------------------------
# Potential_Design = 0
# followTime = pp.get_data_by_index(followTime_uri, -1)
# if (SettlingTime == -1 or riseTime == -1 or minDistance < .1*minDistanceVelocity*followTime):
# Potential_Design = -1
# else: Potential_Design = 1
# print "Potential_Design: %d" %Potential_Design
# #---------------------------------------------------------------------------------
# # Metrics
# #---------------------------------------------------------------------------------
# metrics = {}
# metrics.update({'vehicleMass':{'value': vehicleMass, 'unit':'kg'},
# 'distanceTraveled':{'value': distanceTraveled, 'unit': 'm'},
# 'minDistance': {'value': minDistance, 'unit': 'm'},
# 'finalVelocity':{'value': Vf, 'unit': 'm/s'},
# 'requiredTorque':{'value': requiredTorque, 'unit':'N-m'},
# 'riseTime':{'value': np.amax(riseTime), 'unit' :''},
# 'Overshoot':{'value': np.amax(Overshoot), 'unit' :''},
# 'settlingTime':{'value': np.amax(SettlingTime), 'unit' :''},
# 'rms_error':{'value': RMS_error, 'unit' : ''},
# 'numSetpointCrossings':{'value':numSetPointCrossings, 'unit': ''},
# 'averageA': {'value': maxAccel, 'unit': 'm/s2'},
# 'averageJ': {'value': maxJerk, 'unit': 'm/s3'},
# 'Potential_Design': {'value': Potential_Design, 'unit': ''},
# #'chassisType':{'value': chassisType, 'unit' :''},
# })
#print metrics
cwd = os.getcwd()
os.chdir('..')
# print 'Plot saved to : {0}'.format(pp.save_as_svg(vehicle_speed,
# pp.global_abs_max(vehicle_speed),
# 'VehicleSpeed',
# 'max(FTP_Driver.driver_bus.vehicle_speed)',
# 'kph'))
#pp.store_data_to_csv(jerk_uri, '{0}.csv'.format(jerk_uri), 0, time_inc, numSamples)
#pp.store_data_to_csv(a_uri, '{0}.csv'.format(a_uri), 0, time_inc, numSamples)
#pp.store_data_to_csv(pp.time_array, '{0}.csv'.format(pp.time_array), 0, time_inc, numSamples)
#pp.store_data_to_csv(boomCylLength_uri, '{0}.csv'.format(boomCylLength_uri), 0, time_inc, numSamples)
#pp.store_data_to_csv(armCylLength_uri, '{0}.csv'.format(armCylLength_uri), 0, time_inc, numSamples)
#pp.store_data_to_csv(bucketCylLength_uri, '{0}.csv'.format(bucketCylLength_uri), 0, time_inc, numSamples)
#pp.store_data_to_csv(boomCylRPressure_uri, '{0}.csv'.format(boomCylRPressure_uri), 0, 0.1, dur)
#pp.store_data_to_csv(arm_ang_vel_uri, '{0}.csv'.format(arm_ang_vel_uri), 0, 0.1, dur)
#pp.store_data_to_csv(max_Y_uri, '{0}.csv'.format(max_Y_uri), 0, 0.1, dur)
#pp.store_data_to_csv(max_reach_uri, '{0}.csv'.format(max_reach_uri), 0, 0.1, dur)
#pp.store_data_to_csv(State_uri, '{0}.csv'.format(State_uri), 0, 0.1, dur)
## end of postprocessing part
## Second limit part
#check_limits_and_add_to_report_json(pp, limit_dict)
#update_metrics_in_report_json(metrics)
## end of Second limit part
os.chdir(cwd)
print "done main"
if __name__ == '__main__':
root_dir = os.getcwd()
print "Starting in "+root_dir
try:
print "Starting Main...."
main()
except:
print "exception occurred..."
os.chdir(root_dir)
import traceback
trace = traceback.format_exc()
# Generate this file on failed executions, https://github.com/scipy/scipy/issues/1840
with open(os.path.join('..', '_POST_PROCESSING_FAILED.txt'), 'wb') as f_out:
f_out.write(trace)
|
nilq/baby-python
|
python
|
# Algorithms > Warmup > Simple Array Sum
# Calculate the sum of integers in an array.
#
# https://www.hackerrank.com/challenges/simple-array-sum/problem
#
#
# Complete the simpleArraySum function below.
#
def simpleArraySum(ar):
#
# Write your code here.
#
return sum(ar)
if __name__ == '__main__':
ar_count = int(input())
ar = list(map(int, input().rstrip().split()))
result = simpleArraySum(ar)
print(result)
|
nilq/baby-python
|
python
|
from cached_property import cached_property
from onegov.activity import Activity, Attendee, Booking, Occasion
from onegov.feriennet import _
from onegov.feriennet import FeriennetApp
from onegov.feriennet.collections import BillingCollection, MatchCollection
from onegov.feriennet.exports.unlucky import UnluckyExport
from onegov.feriennet.layout import DefaultLayout
from onegov.org.models import Boardlet, BoardletFact
class FeriennetBoardlet(Boardlet):
@cached_property
def session(self):
return self.request.session
@cached_property
def period(self):
return self.request.app.active_period
@cached_property
def layout(self):
return DefaultLayout(None, self.request)
@property
def state(self):
if not self.period:
return 'failure'
if not self.period.confirmed:
return 'warning'
return 'success'
@FeriennetApp.boardlet(name='period', order=(1, 1))
class PeriodBoardlet(FeriennetBoardlet):
@property
def title(self):
return self.period and self.period.title or _("No active period")
@property
def state(self):
if not self.period:
return 'failure'
return 'success'
@property
def facts(self):
if not self.period:
return
def icon(checked):
return checked and 'fa-check-square-o' or 'fa-square-o'
yield BoardletFact(
text=_("Prebooking: ${dates}", mapping={
'dates': self.layout.format_date_range(
self.period.prebooking_start,
self.period.prebooking_end,
)
}),
icon=icon(self.period.confirmed)
)
yield BoardletFact(
text=_("Booking: ${dates}", mapping={
'dates': self.layout.format_date_range(
self.period.booking_start,
self.period.booking_end,
)
}),
icon=icon(self.period.finalized if self.period.finalizable
else self.period.is_booking_in_past)
)
yield BoardletFact(
text=_("Execution: ${dates}", mapping={
'dates': self.layout.format_date_range(
self.period.execution_start,
self.period.execution_end,
)
}),
icon=icon(self.period.is_execution_in_past)
)
@FeriennetApp.boardlet(name='activities', order=(1, 2))
class ActivitiesBoardlet(FeriennetBoardlet):
@cached_property
def occasions_count(self):
if not self.period:
return 0
return self.session.query(Occasion)\
.filter_by(period_id=self.period.id)\
.count()
@cached_property
def activities_count(self):
if not self.period:
return 0
return self.session.query(Activity).filter(Activity.id.in_(
self.session.query(Occasion.activity_id)
.filter_by(period_id=self.period.id)
.subquery()
)).filter_by(state='accepted').count()
@property
def title(self):
return _("${count} Activities", mapping={
'count': self.activities_count
})
@property
def state(self):
if not self.period:
return 'failure'
return self.activities_count and 'success' or 'warning'
@property
def facts(self):
if not self.period:
return
yield BoardletFact(
text=_("${count} Activities", mapping={
'count': self.activities_count
}),
icon='fa-dot-circle-o'
)
yield BoardletFact(
text=_("${count} Occasions", mapping={
'count': self.occasions_count
}),
icon='fa-circle-o'
)
@FeriennetApp.boardlet(name='bookings', order=(1, 3))
class BookingsBoardlet(FeriennetBoardlet):
@cached_property
def counts(self):
if not self.period:
return {
'accepted': 0,
'blocked': 0,
'cancelled': 0,
'denied': 0,
'total': 0,
}
bookings = self.session.query(Booking)\
.filter_by(period_id=self.period.id)
return {
'accepted': bookings.filter_by(state='accepted').count(),
'blocked': bookings.filter_by(state='blocked').count(),
'cancelled': bookings.filter_by(state='cancelled').count(),
'denied': bookings.filter_by(state='denied').count(),
'total': bookings.count(),
}
@cached_property
def attendees_count(self):
if not self.period:
return 0
return self.session.query(Attendee)\
.filter(Attendee.id.in_(
self.session.query(Booking.attendee_id).filter_by(
period_id=self.period.id
)
)).count()
@property
def title(self):
if not self.period or not self.period.confirmed:
return _("${count} Wishes", mapping={
'count': self.counts['total']
})
else:
return _("${count} Bookings", mapping={
'count': self.counts['total']
})
@property
def state(self):
if not self.period:
return 'failure'
return self.counts['total'] and 'success' or 'warning'
@property
def facts(self):
if not self.period:
return
if not self.period.confirmed:
yield BoardletFact(
text=_("${count} Wishes", mapping={
'count': self.counts['total']
}),
icon='fa-square',
)
yield BoardletFact(
text=_("${count} Wishes per Attendee", mapping={
'count': self.attendees_count and (
round(self.counts['total'] / self.attendees_count, 1)
) or 0
}),
icon='fa-line-chart',
)
else:
yield BoardletFact(
text=_("${count} Bookings", mapping={
'count': self.counts['total']
}),
icon='fa-square',
)
yield BoardletFact(
text=_("${count} accepted", mapping={
'count': self.counts['accepted']
}),
icon='fa-minus',
)
yield BoardletFact(
text=_("${count} cancelled", mapping={
'count': self.counts['cancelled']
}),
icon='fa-minus',
)
yield BoardletFact(
text=_("${count} denied", mapping={
'count': self.counts['denied']
}),
icon='fa-minus',
)
yield BoardletFact(
text=_("${count} blocked", mapping={
'count': self.counts['blocked']
}),
icon='fa-minus',
)
yield BoardletFact(
text=_("${count} Bookings per Attendee", mapping={
'count': self.attendees_count and round(
self.counts['accepted'] / self.attendees_count, 1
) or 0
}),
icon='fa-line-chart',
)
@FeriennetApp.boardlet(name='attendees', order=(1, 4))
class AttendeesBoardlet(FeriennetBoardlet):
@cached_property
def attendee_counts(self):
if not self.period:
return {
'total': 0,
'female': 0,
'male': 0,
}
attendees = self.session.query(Attendee)\
.filter(Attendee.id.in_(
self.session.query(Booking.attendee_id).filter_by(
period_id=self.period.id
)
))
return {
'total': attendees.count(),
'girls': attendees.filter_by(gender='female').count(),
'boys': attendees.filter_by(gender='male').count(),
}
@property
def title(self):
return _("${count} Attendees", mapping={
'count': self.attendee_counts['total']
})
@property
def state(self):
if not self.period:
return 'failure'
return self.attendee_counts['total'] and 'success' or 'warning'
@property
def facts(self):
if not self.period:
return
yield BoardletFact(
text=_("${count} Girls", mapping={
'count': self.attendee_counts['girls']
}),
icon='fa-female'
)
yield BoardletFact(
text=_("${count} Boys", mapping={
'count': self.attendee_counts['boys']
}),
icon='fa-male'
)
@FeriennetApp.boardlet(name='matching', order=(1, 5))
class MatchingBoardlet(FeriennetBoardlet):
@cached_property
def happiness(self):
if not self.period or not self.period.confirmed:
return 0
raw = MatchCollection(self.session, self.period).happiness
return round(raw * 100, 2)
@cached_property
def unlucky_count(self):
if not self.period:
return 0
return UnluckyExport().query(self.session, self.period).count()
@property
def title(self):
return _("${amount}% Happiness", mapping={
'amount': self.happiness
})
@property
def state(self):
if not self.period:
return 'failure'
return self.happiness > 75 and 'success' or 'warning'
@property
def facts(self):
if not self.period:
return
yield BoardletFact(
text=_("${amount}% Happiness", mapping={
'amount': self.happiness
}),
icon='fa-smile-o',
)
yield BoardletFact(
text=_("${count} Attendees Without Occasion", mapping={
'count': self.unlucky_count
}),
icon='fa-frown-o',
)
@FeriennetApp.boardlet(name='billing', order=(1, 6))
class BillingPortlet(FeriennetBoardlet):
@cached_property
def amounts(self):
if not self.period:
return {
'total': 0,
'outstanding': 0,
'paid': 0,
}
billing = BillingCollection(self.request, self.period)
result = {
'total': billing.total,
'outstanding': billing.outstanding,
}
result['paid'] = result['total'] - result['outstanding']
return result
@property
def title(self):
return _("${amount} CHF outstanding", mapping={
'amount': self.layout.format_number(self.amounts['outstanding'])
})
@property
def state(self):
if not self.period:
return 'failure'
return self.amounts['outstanding'] and 'warning' or 'success'
@property
def facts(self):
if not self.period:
return
yield BoardletFact(
text=_("${amount} CHF total", mapping={
'amount': self.layout.format_number(self.amounts['total'])
}),
icon='fa-circle',
)
yield BoardletFact(
text=_("${amount} CHF paid", mapping={
'amount': self.layout.format_number(self.amounts['paid'])
}),
icon='fa-plus-circle',
)
yield BoardletFact(
text=_("${amount} CHF outstanding", mapping={
'amount': self.layout.format_number(
self.amounts['outstanding']
)
}),
icon='fa-minus-circle',
)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""Configurations for slimming simple network.
- Author: Curt-Park
- Email: jwpark@jmarple.ai
"""
from config.train.cifar100 import simplenet, simplenet_finetune
train_config = simplenet.config
regularizer_params = {
"REGULARIZER": "BnWeight",
"REGULARIZER_PARAMS": dict(coeff=1e-5),
"EPOCHS": train_config["EPOCHS"],
}
train_config.update(regularizer_params)
finetune_config = simplenet_finetune.config
regularizer_params = {
"REGULARIZER": "BnWeight",
"REGULARIZER_PARAMS": dict(coeff=1e-5),
"EPOCHS": finetune_config["EPOCHS"],
}
finetune_config.update(regularizer_params)
config = {
"TRAIN_CONFIG": train_config,
"TRAIN_CONFIG_AT_PRUNE": finetune_config,
"N_PRUNING_ITER": 15,
"PRUNE_METHOD": "SlimMagnitude",
"PRUNE_PARAMS": dict(
PRUNE_AMOUNT=0.1,
NORM=2,
STORE_PARAM_BEFORE=10,
TRAIN_START_FROM=0,
PRUNE_AT_BEST=False,
),
}
|
nilq/baby-python
|
python
|
# hack to capture stdout to a string, to test it
import re
import os
import subprocess
import io
import sys
from contextlib import contextmanager
import filecmp
def test_rr_cases():
# now for various combinations of inputs
output_file = 'test_output.csv'
# test exact round robin case, but just once
process_command_line = ['python','src/sports_schedule_sat.py'
,'-t','8'
,'-d','7'
,'-p','2'
,'--cpu','2'
,'--debug'
,'--timelimit','10'
,'--csv',output_file]
try:
proc = subprocess.run(process_command_line, encoding='utf8', capture_output=True)
out = proc.stdout
err = proc.stderr
print('out 186 is ',out)
assert re.search('OPTIMAL', out, re.MULTILINE)
assert re.search('num_search_workers: 2',err,re.MULTILINE)
except:
assert False
try:
# clean up the temp file
os.unlink(output_file)
except:
print('no file to delete')
# test exact round robin case, but just twice around
process_command_line = ['python','src/sports_schedule_sat.py'
,'-t','4'
,'-d','6'
,'-p','1'
,'--cpu','2'
,'--debug'
,'--timelimit','60'
,'--csv',output_file]
try:
proc = subprocess.run(process_command_line, encoding='utf8', capture_output=True)
out = proc.stdout
err = proc.stderr
assert re.search('OPTIMAL', out, re.MULTILINE)
assert re.search('num_search_workers: 2',err,re.MULTILINE)
except:
assert False
try:
# clean up the temp file
os.unlink(output_file)
except:
print('no file to delete')
|
nilq/baby-python
|
python
|
from gpkit import Model, Variable, VectorVariable, SignomialsEnabled
import numpy as np
class MST(Model):
def setup(self, N):
edgeCost = VectorVariable([N, N],
'edgeCost')
edgeMaxFlow = VectorVariable([N, N],
'edgeMaxFlow')
connect = VectorVariable([N,N],'connectivity')
flow = VectorVariable([N, N], 'flow')
source = VectorVariable(N, 'source')
sink = VectorVariable(N, 'sink')
totalCost = Variable('totalCost')
constraints = []
with SignomialsEnabled():
for i in range(0, N):
constraints.extend([sink[i] + sum(flow[i, :]) <= source[i] + sum(flow[:, i]),])
for j in range(0, N):
constraints.extend([flow[i, j] <= connect[i,j]*edgeMaxFlow[i, j]])
for i in range(0, N):
for j in range(i + 1, N):
constraints.extend([flow[i, j] * flow[j, i] <= 1e-5])
constraints.extend([totalCost >= sum(edgeCost * flow) ])
return constraints
|
nilq/baby-python
|
python
|
import pytest
from auth import create_jwt_payload
@pytest.mark.usefixtures("default_qr_code")
def test_qr_exists(client, default_qr_code):
code = default_qr_code["code"]
graph_ql_query_string = f"""query CheckQrExistence {{
qrExists(qrCode: "{code}")
}}"""
data = {"query": graph_ql_query_string}
response = client.post("/graphql", json=data)
assert response.status_code == 200
assert response.json["data"]["qrExists"]
graph_ql_query_string = """query CheckQrExistence {
qrExists(qrCode: "111")
}"""
data = {"query": graph_ql_query_string}
response = client.post("/graphql", json=data)
assert response.status_code == 200
assert not response.json["data"]["qrExists"]
@pytest.mark.usefixtures("default_qr_code")
def test_qr_code(client, default_qr_code):
graph_ql_query_string = f"""query {{
qrCode(qrCode: "{default_qr_code['code']}") {{
code
}}
}}"""
data = {"query": graph_ql_query_string}
response = client.post("/graphql", json=data)
queried_code = response.json["data"]["qrCode"]
assert response.status_code == 200
assert queried_code["code"] == default_qr_code["code"]
@pytest.mark.usefixtures("qr_code_without_box")
def test_code_not_associated_with_box(client, qr_code_without_box):
code = qr_code_without_box["code"]
graph_ql_query_string = f"""query {{
qrCode(qrCode: "{code}") {{
box {{
id
}}
}}
}}"""
data = {"query": graph_ql_query_string}
response_data = client.post("/graphql", json=data)
assert (
"<Model: Box> instance matching query does not exist"
in response_data.json["errors"][0]["message"]
)
queried_box = response_data.json["data"]["qrCode"]["box"]
assert queried_box is None
def test_code_does_not_exist(client):
graph_ql_query_string = """query Box {
qrCode(qrCode: "-1") {
id
}
}"""
data = {"query": graph_ql_query_string}
response_data = client.post("/graphql", json=data)
queried_code = response_data.json["data"]["qrCode"]
assert (
"<Model: QRCode> instance matching query does not exist"
in response_data.json["errors"][0]["message"]
)
assert queried_code is None
@pytest.mark.usefixtures("box_without_qr_code")
def test_create_qr_code(client, box_without_qr_code):
data = {"query": "mutation { createQrCode { id } }"}
response = client.post("/graphql", json=data)
qr_code_id = int(response.json["data"]["createQrCode"]["id"])
assert response.status_code == 200
assert qr_code_id > 2
data = {
"query": f"""mutation {{
createQrCode(boxLabelIdentifier: "{box_without_qr_code['box_label_identifier']}") # noqa
{{
id
box {{
id
items
}}
}}
}}"""
}
response = client.post("/graphql", json=data)
created_qr_code = response.json["data"]["createQrCode"]
assert response.status_code == 200
assert int(created_qr_code["id"]) == qr_code_id + 1
assert created_qr_code["box"]["items"] == box_without_qr_code["items"]
assert int(created_qr_code["box"]["id"]) == box_without_qr_code["id"]
data = {"query": """mutation { createQrCode(boxLabelIdentifier: "xxx") { id } }"""}
response = client.post("/graphql", json=data)
assert response.status_code == 200
assert response.json["data"]["createQrCode"] is None
assert len(response.json["errors"]) == 1
assert response.json["errors"][0]["extensions"]["code"] == "BAD_USER_INPUT"
def test_invalid_permission(client, mocker):
mocker.patch("jose.jwt.decode").return_value = create_jwt_payload(permissions=[])
data = {"query": "mutation { createQrCode { id } }"}
response = client.post("/graphql", json=data)
assert response.status_code == 200
assert response.json["data"]["createQrCode"] is None
assert len(response.json["errors"]) == 1
assert response.json["errors"][0]["extensions"]["code"] == "FORBIDDEN"
|
nilq/baby-python
|
python
|
import os
import time
import requests
from flask import render_template, request, flash, redirect, url_for, abort
from jinja2 import Markup
from app import app, PLOTS_FOLDER, UPLOAD_FOLDER
from functions import dir_listing, process_images
@app.route('/')
def home():
return render_template('home.html')
@app.route('/uploads/', defaults={'req_path': ''})
@app.route('/uploads/<path:req_path>')
def uploads(req_path):
return dir_listing(UPLOAD_FOLDER, req_path, 'images.html')
@app.route('/plots/', defaults={'req_path': ''})
@app.route('/plots/<path:req_path>')
def plots(req_path):
return dir_listing(PLOTS_FOLDER, req_path, 'plots.html')
@app.route('/upload/', methods=['GET', 'POST'])
def upload_form():
if request.method == 'POST':
result_response, status_code = process_images(request)
if status_code != 200:
message = 'There was an error uploading photos: ' + str(result_response['reason'])
flash(message)
return redirect('/upload/')
else:
return redirect(url_for('uploads', req_path=result_response['folder_name']))
else:
return render_template('upload_form.html')
@app.route('/create/', methods=['GET', 'POST'])
def create():
folders = os.listdir(UPLOAD_FOLDER)
if request.method == 'GET':
return render_template('create.html', folders=folders)
elif request.method == 'POST':
form_data = request.form
image_location = os.path.join(UPLOAD_FOLDER, form_data['folder_name'])
destination = os.path.join(PLOTS_FOLDER, form_data['folder_name'])
if os.path.isfile(os.path.join(image_location, 'metadata.csv')):
metadata = os.path.join(image_location, 'metadata.csv')
data = {"args": ['--images', image_location + "/*.jpg", '--out_dir', destination, '--metadata', metadata]}
else:
data = {"args": ['--images', image_location + "/*.jpg", '--out_dir', destination]}
response = requests.post(request.url_root + "api/pixplot", json=data)
app.logger.info('Create request status: ' + str(response.status_code))
app.logger.debug('Create request status: ' + str(response.json()))
while True:
time.sleep(10)
result = requests.get(response.json()['result_url'])
app.logger.debug(str(result.json()))
if 'status' in result.json().keys() and result.json()['status'] == 'running':
# Still running
continue
elif 'report' in result.json().keys() and result.json()['report'][-6:-1] == 'Done!':
# Complete without error
message = Markup('<a href="/plots/%s/index.html" class="alert-link">Finished! Your PixPlot is uploaded here.</a>' % form_data['folder_name'])
break
else:
# Something botched
message = 'There was an error creating your PixPlot. Sorry.'
app.logger.error(str(result.json()))
break
flash(message)
return redirect('/create/')
else:
return abort(404)
|
nilq/baby-python
|
python
|
# Packages
from sys import argv, exit
from os.path import realpath, dirname
from flask import Flask
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from flask_sqlalchemy import SQLAlchemy
# Local
from config import Config
path = dirname(realpath(__file__))
secret = "%s/app/app.secret" % path
if not isfile(secret):
print("Error: Missing file '%s'" % secret)
exit(1)
app = Flask(__name__, static_url_path="/assets")
with open(secret, "r") as f:
app.secret_key = f.read()
app.config.from_object(Config)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
manager = None
if len(argv) > 1:
manager = Manager(app)
manager.add_command("db", MigrateCommand)
from app import routes, models
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
from __future__ import print_function
from builtins import str
import sys
import os
from vmrunner import vmrunner
import socket
# Get an auto-created VM from the vmrunner
vm = vmrunner.vms[0]
def UDP_test(trigger_line):
print("<Test.py> Performing UDP tests")
HOST, PORT = "10.0.0.55", 4242
sock = socket.socket
# SOCK_DGRAM is the socket type to use for UDP sockets
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# NOTE: This is necessary for the test to exit after the VM has
# been shut down due to a VM timeout
sock.settimeout(20)
data = "Lucky"
sock.sendto(data, (HOST, PORT))
received = sock.recv(1024)
print("<Test.py> Sent: {}".format(data))
print("<Test.py> Received: {}".format(received))
if received != data: return False
data = "Luke"
sock.sendto(data, (HOST, PORT))
received = sock.recv(1024)
print("<Test.py> Sent: {}".format(data))
print("<Test.py> Received: {}".format(received))
if received != data: return False
data = "x" * 1472
sock.sendto(data, (HOST, PORT))
received = sock.recv(1500)
if received != data:
print("<Test.py> Did not receive long string: {}".format(received))
return False
data = "x" * 9216 # 9216 is apparently default max for MacOS
sock.sendto(data, (HOST, PORT))
received = bytearray()
while (len(received) < len(data)):
received.extend(sock.recv(len(data)))
print("RECEIVED: ", len(received))
if received != data:
print("<Test.py> Did not receive mega string (64k)")
return False
vm.exit(0, "Test completed without errors")
# Add custom event-handler
vm.on_output("UDP test service", UDP_test)
if len(sys.argv) > 1:
vm.boot(image_name=str(sys.argv[1]))
else:
# Boot the VM, taking a timeout as parameter
vm.cmake().boot(30,image_name="net_udp").clean()
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
spam = ['apples', 'bannanas', 'tofus', 'cats']
length = len(spam)
item = 0
while item < length - 1:
print(spam[item], end=' ')
item = item + 1
print(' and ' + spam[item])
|
nilq/baby-python
|
python
|
from SOC.models import Manna
import numpy as np
import pytest
def test_boundary_shape():
sim = Manna(L=10)
assert sim.values.shape == (12, 12)
assert sim.L_with_boundary == 12
def test_run_abel():
sim = Manna(L=20)
sim.run(5)
def test_run_nonabel():
sim = Manna(L=20, abelian = False)
sim.run(5)
def test_driving_does_not_pollute_boundary():
sim = Manna(L=10)
for i in range(1000):
sim.drive()
def test_toppling_reduces_middle_to_max_one():
sim = Manna(L=10)
sim.values[1:-1, 1:-1] = 6
sim.AvalancheLoop()
assert (0 <= sim.values[1:-1, 1:-1]).all()
assert (sim.values[1:-1, 1:-1] <= 1).all()
@pytest.mark.skip
def test_whiteboard_case_1():
sim = Manna(L=3)
sim.values[2, 2] = 2
results = sim.AvalancheLoop()
assert int(results['AvalancheSize']) == 2
assert int(results['number_of_iterations']) == 1
@pytest.mark.skip
def test_whiteboard_case_2():
sim = Manna(L=3)
sim.values[2, 2] = 2
results = sim.AvalancheLoop()
assert int(results['AvalancheSize']) == 2
assert int(results['number_of_iterations']) == 1
def test_resurrect():
sim = Manna(L=10)
filename = "test_ressurrect.zarr"
sim.run(5, filename=filename)
saved = sim.saved_snapshots[-1].copy()
save_every_orig = sim.save_every
sim2 = Manna.from_file(filename)
np.testing.assert_allclose(sim2.values, saved)
assert sim2.save_every == save_every_orig
def test_resurrect_default_name():
sim = Manna(L=10)
filename = sim.run(50, filename=False)
saved = sim.saved_snapshots[-1].copy()
save_every_orig = sim.save_every
sim2 = Manna.from_file(filename)
np.testing.assert_allclose(sim2.values, saved)
assert sim2.save_every == save_every_orig
|
nilq/baby-python
|
python
|
# Copyright 2020 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing NVIDIA Driver installation.
"""
import re
from absl import flags
from absl import logging
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import os_types
from perfkitbenchmarker import regex_util
NVIDIA_DRIVER_LOCATION_BASE = 'https://us.download.nvidia.com/tesla'
NVIDIA_TESLA_K80 = 'k80'
NVIDIA_TESLA_P4 = 'p4'
NVIDIA_TESLA_P100 = 'p100'
NVIDIA_TESLA_V100 = 'v100'
NVIDIA_TESLA_T4 = 't4'
NVIDIA_TESLA_A100 = 'a100'
"""Default GPU clocks and autoboost configurations.
Base_clock is the default clock speeds when setting the GPU clocks. Max_clock
is currently unused. The clock speeds are in the format of
[memory_clock in MHz, graphics_clock in MHz].
"""
GPU_DEFAULTS = {
NVIDIA_TESLA_K80: {
'base_clock': [2505, 562],
'max_clock': [2505, 875],
'autoboost_enabled': True,
},
NVIDIA_TESLA_P4: {
'base_clock': [3003, 885],
'max_clock': [3003, 1531],
'autoboost_enabled': None,
},
NVIDIA_TESLA_P100: {
'base_clock': [715, 1189],
'max_clock': [715, 1328],
'autoboost_enabled': None,
},
NVIDIA_TESLA_V100: {
'base_clock': [877, 1312],
'max_clock': [877, 1530],
'autoboost_enabled': None,
},
NVIDIA_TESLA_T4: {
'base_clock': [5001, 585],
'max_clock': [5001, 1590],
'autoboost_enabled': None,
},
NVIDIA_TESLA_A100: {
'base_clock': [1215, 1410],
'max_clock': [1215, 1410],
'autoboost_enabled': None,
},
}
EXTRACT_CLOCK_SPEEDS_REGEX = r'(\d*).*,\s*(\d*)'
flag_util.DEFINE_integerlist('gpu_clock_speeds',
None,
'desired gpu clock speeds in the form '
'[memory clock, graphics clock]')
flags.DEFINE_boolean('gpu_autoboost_enabled', None,
'whether gpu autoboost is enabled')
flags.DEFINE_string('nvidia_driver_version', '450.80.02',
'The version of nvidia driver to install. '
'For example, "418.67" or "418.87.01."')
flags.DEFINE_boolean('nvidia_driver_force_install', False,
'Whether to install NVIDIA driver, even if it is already '
'installed.')
flags.DEFINE_string('nvidia_driver_x_library_path', '/usr/lib',
'X library path for nvidia driver installation')
flags.DEFINE_string('nvidia_driver_x_module_path', '/usr/lib/xorg/modules',
'X module path for nvidia driver installation')
flags.DEFINE_boolean('nvidia_driver_persistence_mode', None,
'whether to enable persistence mode on the NVIDIA GPU')
FLAGS = flags.FLAGS
class UnsupportedClockSpeedError(Exception):
pass
class NvidiaSmiParseOutputError(Exception):
pass
class HeterogeneousGpuTypesError(Exception):
pass
class UnsupportedGpuTypeError(Exception):
pass
def CheckNvidiaGpuExists(vm):
"""Returns whether NVIDIA GPU exists or not on the vm.
Args:
vm: The virtual machine to check.
Returns:
True or False depending on whether NVIDIA GPU exists.
"""
# PKB only supports NVIDIA driver on DEBIAN for now.
if vm.BASE_OS_TYPE != os_types.DEBIAN:
return False
vm.Install('pciutils')
output, _ = vm.RemoteCommand('sudo lspci', should_log=True)
regex = re.compile(r'3D controller: NVIDIA Corporation')
return regex.search(output) is not None
def CheckNvidiaSmiExists(vm):
"""Returns whether nvidia-smi is installed or not on a VM.
Args:
vm: The virtual to check.
Returns:
True or False depending on whether nvidia-smi command exists.
"""
# PKB only supports NVIDIA driver on DEBIAN for now.
if vm.BASE_OS_TYPE != os_types.DEBIAN:
return False
resp, _ = vm.RemoteHostCommand('command -v nvidia-smi',
ignore_failure=True,
suppress_warning=True)
return bool(resp.rstrip())
def GetDriverVersion(vm):
"""Returns the NVIDIA driver version as a string.
Args:
vm: Virtual machine to query.
Returns:
String containing NVIDIA driver version installed.
Raises:
NvidiaSmiParseOutputError: If nvidia-smi output cannot be parsed.
"""
stdout, _ = vm.RemoteCommand('nvidia-smi', should_log=True)
regex = r'Driver Version\:\s+(\S+)'
match = re.search(regex, stdout)
if match:
return str(match.group(1))
raise NvidiaSmiParseOutputError('Unable to parse driver version from {}'
.format(stdout))
def GetGpuType(vm):
"""Return the type of NVIDIA gpu(s) installed on the vm.
Args:
vm: Virtual machine to query.
Returns:
Type of gpus installed on the vm as a string.
Raises:
NvidiaSmiParseOutputError: If nvidia-smi output cannot be parsed.
HeterogeneousGpuTypesError: If more than one gpu type is detected.
UnsupportedClockSpeedError: If gpu type is not supported.
Example:
If 'nvidia-smi -L' returns:
GPU 0: Tesla V100-SXM2-16GB (UUID: GPU-1a046bb9-e456-45d3-5a35-52da392d09a5)
GPU 1: Tesla V100-SXM2-16GB (UUID: GPU-56cf4732-054c-4e40-9680-0ec27e97d21c)
GPU 2: Tesla V100-SXM2-16GB (UUID: GPU-4c7685ad-4b3a-8adc-ce20-f3a945127a8a)
GPU 3: Tesla V100-SXM2-16GB (UUID: GPU-0b034e63-22be-454b-b395-382e2d324728)
GPU 4: Tesla V100-SXM2-16GB (UUID: GPU-b0861159-4727-ef2f-ff66-73a765f4ecb6)
GPU 5: Tesla V100-SXM2-16GB (UUID: GPU-16ccaf51-1d1f-babe-9f3d-377e900bf37e)
GPU 6: Tesla V100-SXM2-16GB (UUID: GPU-6eba1fa6-de10-80e9-ec5f-4b8beeff7e12)
GPU 7: Tesla V100-SXM2-16GB (UUID: GPU-cba5a243-219c-df12-013e-1dbc98a8b0de)
GetGpuType() will return:
['V100-SXM2-16GB', 'V100-SXM2-16GB', 'V100-SXM2-16GB', 'V100-SXM2-16GB',
'V100-SXM2-16GB', 'V100-SXM2-16GB', 'V100-SXM2-16GB', 'V100-SXM2-16GB']
"""
stdout, _ = vm.RemoteCommand('nvidia-smi -L', should_log=True)
try:
gpu_types = []
for line in stdout.splitlines():
if not line:
continue
splitted = line.split()
if splitted[2] == 'Tesla':
gpu_types.append(splitted[3])
else:
gpu_types.append(splitted[2])
except:
raise NvidiaSmiParseOutputError('Unable to parse gpu type from {}'
.format(stdout))
if any(gpu_type != gpu_types[0] for gpu_type in gpu_types):
raise HeterogeneousGpuTypesError(
'PKB only supports one type of gpu per VM')
if 'K80' in gpu_types[0]:
return NVIDIA_TESLA_K80
if 'P4' in gpu_types[0]:
return NVIDIA_TESLA_P4
if 'P100' in gpu_types[0]:
return NVIDIA_TESLA_P100
if 'V100' in gpu_types[0]:
return NVIDIA_TESLA_V100
if 'T4' in gpu_types[0]:
return NVIDIA_TESLA_T4
if 'A100' in gpu_types[0]:
return NVIDIA_TESLA_A100
raise UnsupportedClockSpeedError(
'Gpu type {0} is not supported by PKB'.format(gpu_types[0]))
def QueryNumberOfGpus(vm):
"""Returns the number of NVIDIA GPUs on the system.
Args:
vm: Virtual machine to query.
Returns:
Integer indicating the number of NVIDIA GPUs present on the vm.
"""
stdout, _ = vm.RemoteCommand('sudo nvidia-smi --query-gpu=count --id=0 '
'--format=csv', should_log=True)
return int(stdout.split()[1])
def GetPeerToPeerTopology(vm):
"""Returns a string specifying which GPUs can access each other via p2p.
Args:
vm: Virtual machine to operate on.
Example:
If p2p topology from nvidia-smi topo -p2p r looks like this:
0 1 2 3
0 X OK NS NS
1 OK X NS NS
2 NS NS X OK
3 NS NS OK X
GetTopology will return 'Y Y N N;Y Y N N;N N Y Y;N N Y Y'
"""
stdout, _ = vm.RemoteCommand('nvidia-smi topo -p2p r', should_log=True)
lines = [line.split() for line in stdout.splitlines()]
num_gpus = len(lines[0])
results = []
for idx, line in enumerate(lines[1:]):
if idx >= num_gpus:
break
results.append(' '.join(line[1:]))
# Delimit each GPU result with semicolons,
# and simplify the result character set to 'Y' and 'N'.
return (';'.join(results)
.replace('X', 'Y') # replace X (self) with Y
.replace('OK', 'Y') # replace OK with Y
.replace('NS', 'N')) # replace NS (not supported) with N
def SetAndConfirmGpuClocks(vm):
"""Sets and confirms the GPU clock speed and autoboost policy.
The clock values are provided either by the gpu_pcie_bandwidth_clock_speeds
flags, or from gpu-specific defaults. If a device is queried and its
clock speed does not align with what it was just set to, an exception will
be raised.
Args:
vm: The virtual machine to operate on.
Raises:
UnsupportedClockSpeedError: If a GPU did not accept the
provided clock speeds.
"""
gpu_type = GetGpuType(vm)
gpu_clock_speeds = GPU_DEFAULTS[gpu_type]['base_clock']
autoboost_enabled = GPU_DEFAULTS[gpu_type]['autoboost_enabled']
if FLAGS.gpu_clock_speeds is not None:
gpu_clock_speeds = FLAGS.gpu_clock_speeds
if FLAGS.gpu_autoboost_enabled is not None:
autoboost_enabled = FLAGS.gpu_autoboost_enabled
desired_memory_clock = gpu_clock_speeds[0]
desired_graphics_clock = gpu_clock_speeds[1]
EnablePersistenceMode(vm)
SetGpuClockSpeed(vm, desired_memory_clock, desired_graphics_clock)
SetAutoboostDefaultPolicy(vm, autoboost_enabled)
num_gpus = QueryNumberOfGpus(vm)
for i in range(num_gpus):
if QueryGpuClockSpeed(vm, i) != (desired_memory_clock,
desired_graphics_clock):
raise UnsupportedClockSpeedError(
'Unrecoverable error setting GPU #{} clock speed to {},{}'.format(
i, desired_memory_clock, desired_graphics_clock))
def SetGpuClockSpeed(vm, memory_clock_speed, graphics_clock_speed):
"""Sets autoboost and memory and graphics clocks to the specified frequency.
Args:
vm: Virtual machine to operate on.
memory_clock_speed: Desired speed of the memory clock, in MHz.
graphics_clock_speed: Desired speed of the graphics clock, in MHz.
"""
num_gpus = QueryNumberOfGpus(vm)
for device_id in range(num_gpus):
current_clock_speeds = QueryGpuClockSpeed(vm, device_id)
if current_clock_speeds != (memory_clock_speed, graphics_clock_speed):
vm.RemoteCommand('sudo nvidia-smi -ac {},{} --id={}'.format(
memory_clock_speed,
graphics_clock_speed,
device_id
))
def QueryGpuClockSpeed(vm, device_id):
"""Returns the value of the memory and graphics clock.
All clock values are in MHz.
Args:
vm: Virtual machine to operate on.
device_id: Id of GPU device to query.
Returns:
Tuple of clock speeds in MHz in the form (memory clock, graphics clock).
"""
query = ('sudo nvidia-smi --query-gpu=clocks.applications.memory,'
'clocks.applications.graphics --format=csv --id={0}'
.format(device_id))
stdout, _ = vm.RemoteCommand(query, should_log=True)
clock_speeds = stdout.splitlines()[1]
matches = regex_util.ExtractAllMatches(EXTRACT_CLOCK_SPEEDS_REGEX,
clock_speeds)[0]
return (int(matches[0]), int(matches[1]))
def EnablePersistenceMode(vm):
"""Enables persistence mode on the NVIDIA driver.
Args:
vm: Virtual machine to operate on.
"""
vm.RemoteCommand('sudo nvidia-smi -pm 1')
def SetAutoboostDefaultPolicy(vm, autoboost_enabled):
"""Sets the autoboost policy to the specified value.
For each GPU on the VM, this function will set the autoboost policy
to the value specified by autoboost_enabled.
Args:
vm: Virtual machine to operate on.
autoboost_enabled: Bool or None. Value (if any) to set autoboost policy to
"""
if autoboost_enabled is None:
return
num_gpus = QueryNumberOfGpus(vm)
for device_id in range(num_gpus):
current_state = QueryAutoboostPolicy(vm, device_id)
if current_state['autoboost_default'] != autoboost_enabled:
vm.RemoteCommand('sudo nvidia-smi --auto-boost-default={0} --id={1}'
.format(1 if autoboost_enabled else 0, device_id))
def QueryAutoboostPolicy(vm, device_id):
"""Returns the state of autoboost and autoboost_default.
Args:
vm: Virtual machine to operate on.
device_id: Id of GPU device to query.
Returns:
Dict containing values for autoboost and autoboost_default.
Values can be True (autoboost on), False (autoboost off),
and None (autoboost not supported).
Raises:
NvidiaSmiParseOutputError: If output from nvidia-smi can not be parsed.
"""
autoboost_regex = r'Auto Boost\s*:\s*(\S+)'
autoboost_default_regex = r'Auto Boost Default\s*:\s*(\S+)'
query = 'sudo nvidia-smi -q -d CLOCK --id={0}'.format(device_id)
stdout, _ = vm.RemoteCommand(query, should_log=True)
autoboost_match = re.search(autoboost_regex, stdout)
autoboost_default_match = re.search(autoboost_default_regex, stdout)
nvidia_smi_output_string_to_value = {
'On': True,
'Off': False,
'N/A': None,
}
if (autoboost_match is None) or (autoboost_default_match is None):
raise NvidiaSmiParseOutputError('Unable to parse Auto Boost policy from {}'
.format(stdout))
return {
'autoboost': nvidia_smi_output_string_to_value[
autoboost_match.group(1)],
'autoboost_default': nvidia_smi_output_string_to_value[
autoboost_default_match.group(1)]
}
def GetMetadata(vm):
"""Returns gpu-specific metadata as a dict.
Args:
vm: Virtual machine to operate on.
Returns:
A dict of gpu-specific metadata.
"""
clock_speeds = QueryGpuClockSpeed(vm, 0)
autoboost_policy = QueryAutoboostPolicy(vm, 0)
return {
'gpu_memory_clock': clock_speeds[0],
'gpu_graphics_clock': clock_speeds[1],
'gpu_autoboost': autoboost_policy['autoboost'],
'gpu_autoboost_default': autoboost_policy['autoboost_default'],
'nvidia_driver_version': GetDriverVersion(vm),
'gpu_type': GetGpuType(vm),
'num_gpus': QueryNumberOfGpus(vm),
'peer_to_peer_gpu_topology': GetPeerToPeerTopology(vm),
}
def DoPostInstallActions(vm):
"""Perform post NVIDIA driver install action on the vm.
Args:
vm: The virtual machine to operate on.
"""
SetAndConfirmGpuClocks(vm)
def Install(vm):
"""Install NVIDIA GPU driver on the vm.
Args:
vm: The virtual machine to install NVIDIA driver on.
"""
version_to_install = FLAGS.nvidia_driver_version
if not version_to_install:
logging.info('--nvidia_driver_version unset. Not installing.')
return
elif not FLAGS.nvidia_driver_force_install and CheckNvidiaSmiExists(vm):
logging.warn('NVIDIA drivers already detected. Not installing.')
return
location = ('{base}/{version}/NVIDIA-Linux-x86_64-{version}.run'
.format(base=NVIDIA_DRIVER_LOCATION_BASE,
version=version_to_install))
vm.Install('wget')
tokens = re.split('/', location)
filename = tokens[-1]
vm.RemoteCommand('wget {location} && chmod 755 {filename} '
.format(location=location, filename=filename),
should_log=True)
vm.RemoteCommand('sudo ./{filename} -q -x-module-path={x_module_path} '
'--ui=none -x-library-path={x_library_path} '
'--no-install-compat32-libs'
.format(filename=filename,
x_module_path=FLAGS.nvidia_driver_x_module_path,
x_library_path=FLAGS.nvidia_driver_x_library_path),
should_log=True)
if FLAGS.nvidia_driver_persistence_mode:
EnablePersistenceMode(vm)
|
nilq/baby-python
|
python
|
# Code adapted from https://github.com/araffin/learning-to-drive-in-5-minutes/
# Author: Sheelabhadra Dey
import argparse
import os
import time
from collections import OrderedDict
from pprint import pprint
import numpy as np
import yaml
from stable_baselines.common import set_global_seeds
from stable_baselines.common.vec_env import VecFrameStack, VecNormalize, DummyVecEnv
from stable_baselines.ddpg import AdaptiveParamNoiseSpec, NormalActionNoise, OrnsteinUhlenbeckActionNoise
from stable_baselines.ppo2.ppo2 import constfn
from config import MIN_THROTTLE, MAX_THROTTLE, FRAME_SKIP,\
SIM_PARAMS, N_COMMAND_HISTORY, BASE_ENV, ENV_ID, MAX_STEERING_DIFF
from utils.utils import make_env, ALGOS, linear_schedule, get_latest_run_id, load_vae, create_callback
from environment.env import Env
from environment.carla.client import make_carla_client, CarlaClient
from environment.carla.tcp import TCPConnectionError
parser = argparse.ArgumentParser()
parser.add_argument('-tb', '--tensorboard-log', help='Tensorboard log dir', default='', type=str)
parser.add_argument('-i', '--trained-agent', help='Path to a pretrained agent to continue training',
default='', type=str)
parser.add_argument('--algo', help='RL Algorithm', default='sac',
type=str, required=False, choices=list(ALGOS.keys()))
parser.add_argument('-n', '--n-timesteps', help='Overwrite the number of timesteps', default=-1,
type=int)
parser.add_argument('--log-interval', help='Override log interval (default: -1, no change)', default=-1,
type=int)
parser.add_argument('-f', '--log-folder', help='Log folder', type=str, default='logs')
parser.add_argument('-vae', '--vae-path', help='Path to saved VAE', type=str, default='')
parser.add_argument('--save-vae', action='store_true', default=False,
help='Save VAE')
parser.add_argument('--seed', help='Random generator seed', type=int, default=0)
args = parser.parse_args()
set_global_seeds(args.seed)
tensorboard_log = None if args.tensorboard_log == '' else args.tensorboard_log + '/' + ENV_ID
print("=" * 10, ENV_ID, args.algo, "=" * 10)
vae = None
if args.vae_path != '':
print("Loading VAE ...")
vae = load_vae(args.vae_path)
# Load hyperparameters from yaml file
with open('hyperparams/{}.yml'.format(args.algo), 'r') as f:
hyperparams = yaml.load(f)[BASE_ENV]
# Sort hyperparams that will be saved
saved_hyperparams = OrderedDict([(key, hyperparams[key]) for key in sorted(hyperparams.keys())])
# save vae path
saved_hyperparams['vae_path'] = args.vae_path
if vae is not None:
saved_hyperparams['z_size'] = vae.z_size
# Save simulation params
for key in SIM_PARAMS:
saved_hyperparams[key] = eval(key)
pprint(saved_hyperparams)
# Compute and create log path
log_path = os.path.join(args.log_folder, args.algo)
save_path = os.path.join(log_path, "{}_{}".format(ENV_ID, get_latest_run_id(log_path, ENV_ID) + 1))
params_path = os.path.join(save_path, ENV_ID)
os.makedirs(params_path, exist_ok=True)
# Create learning rate schedules for ppo2 and sac
if args.algo in ["ppo2", "sac"]:
for key in ['learning_rate', 'cliprange']:
if key not in hyperparams:
continue
if isinstance(hyperparams[key], str):
schedule, initial_value = hyperparams[key].split('_')
initial_value = float(initial_value)
hyperparams[key] = linear_schedule(initial_value)
elif isinstance(hyperparams[key], float):
hyperparams[key] = constfn(hyperparams[key])
else:
raise ValueError('Invalid valid for {}: {}'.format(key, hyperparams[key]))
if args.n_timesteps > 0:
n_timesteps = args.n_timesteps
else:
n_timesteps = int(hyperparams['n_timesteps'])
del hyperparams['n_timesteps']
with make_carla_client('localhost', 2000) as client:
print("CarlaClient connected")
env = DummyVecEnv([make_env(client, args.seed, vae=vae)])
# Optional Frame-stacking
n_stack = 1
if hyperparams.get('frame_stack', False):
n_stack = hyperparams['frame_stack']
env = VecFrameStack(env, n_stack)
print("Stacking {} frames".format(n_stack))
del hyperparams['frame_stack']
# Parse noise string for DDPG
if args.algo == 'ddpg' and hyperparams.get('noise_type') is not None:
noise_type = hyperparams['noise_type'].strip()
noise_std = hyperparams['noise_std']
n_actions = env.action_space.shape[0]
if 'adaptive-param' in noise_type:
hyperparams['param_noise'] = AdaptiveParamNoiseSpec(initial_stddev=noise_std,
desired_action_stddev=noise_std)
elif 'normal' in noise_type:
hyperparams['action_noise'] = NormalActionNoise(mean=np.zeros(n_actions),
sigma=noise_std * np.ones(n_actions))
elif 'ornstein-uhlenbeck' in noise_type:
hyperparams['action_noise'] = OrnsteinUhlenbeckActionNoise(mean=np.zeros(n_actions),
sigma=noise_std * np.ones(n_actions))
else:
raise RuntimeError('Unknown noise type "{}"'.format(noise_type))
print("Applying {} noise with std {}".format(noise_type, noise_std))
del hyperparams['noise_type']
del hyperparams['noise_std']
# Train an agent from scratch
model = ALGOS[args.algo](env=env, tensorboard_log=tensorboard_log, verbose=1, **hyperparams)
kwargs = {}
if args.log_interval > -1:
kwargs = {'log_interval': args.log_interval}
if args.algo == 'sac':
kwargs.update({'callback': create_callback(args.algo,
os.path.join(save_path, ENV_ID + "_best"),
verbose=1)})
model.learn(n_timesteps, **kwargs)
# Save trained model
model.save(os.path.join(save_path, ENV_ID))
# Save hyperparams
with open(os.path.join(params_path, 'config.yml'), 'w') as f:
yaml.dump(saved_hyperparams, f)
if args.save_vae and vae is not None:
print("Saving VAE")
vae.save(os.path.join(params_path, 'vae'))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import collections
import fnmatch
import itertools
import pickle
import re
from collections import OrderedDict
from typing import Any, Dict, List, Optional, Sequence
from ..core import _imperative_rt
from ..core._imperative_rt import ComputingGraph, SerializationMetadata
from ..core._trace_option import set_symbolic_shape as _set_symbolic_shape
from ..core.tensor import megbrain_graph as G
from ..logger import get_logger
from .comp_graph_tools import get_dep_vars, get_opr_type, get_oprs_seq
from .network_node import (
ConstOpBase,
Host2DeviceCopy,
ImmutableTensor,
NetworkNode,
OpNode,
VarNode,
str_to_mge_class,
)
logger = get_logger(__name__)
class Network:
def __init__(self):
self.input_vars = [] # input var of graph
self._orig_inputs = []
self.output_vars = [] # output var of graph
self._orig_outputs = []
self.all_oprs_map = OrderedDict() # _imperative_rt.graph.VarNode.id: VarNode
self.all_vars_map = (
OrderedDict()
) # _imperative_rt.graph.OperatorNode.id: OpNode
self.graph = ComputingGraph()
self._metadata = None
@property
def metadata(self):
r"""Load metadata as a dict."""
if not self._metadata.is_valid:
logger.info("metadata is not valid!")
return None
ret = dict()
try:
user_info = pickle.loads(self._metadata.user_info)
except: # pylint: disable=bare-except
logger.warning(
"can't parse user info by pickle, so return the original bytes object!"
)
user_info = self._metadata.user_info
ret["user_info"] = user_info
ret["graph_modified"] = self._metadata.graph_modified
ret["optimized_for_inference"] = self._metadata.optimized_for_inference
if ret["optimized_for_inference"]:
ret.update(G.deserialize_infer_option(self._metadata.optimize_options))
return ret
@classmethod
def load(cls, model_path: str, outspec: List[str] = None):
r"""Loads a computing graph as a Network object.
Args:
model_path: file path of mge model.
outspec: only load the subgraph with outspec as its endpoints.
"""
self = cls()
ret = G.load_graph(model_path)
outputs, self._metadata = ret.output_vars_list, ret.metadata
if outspec is not None:
output_spec = outspec.copy()
all_vars = get_dep_vars(outputs) + outputs
new_outputs = {}
for i in all_vars:
if i.name in output_spec:
new_outputs[i.name] = i
output_spec.remove(i.name)
assert len(output_spec) == 0, "Can not find {} in this model".format(
output_spec
)
outputs = [new_outputs[i] for i in outspec]
self._orig_outputs = outputs
for x in self._orig_outputs:
self.output_vars.append(self._get_var(x))
self.add_dep_oprs()
for x in self._orig_inputs:
self.input_vars.append(self._get_var(x))
self.graph = self._orig_outputs[0].graph
return self
def _compile(self):
self.all_oprs_map = {}
self.all_vars_map = {}
for opr in self.all_oprs:
if isinstance(opr, (ConstOpBase, Host2DeviceCopy)):
opr.compile(self.graph)
else:
opr.compile()
if opr.name is not None:
opr._opr.name = opr.name
self.all_oprs_map[opr._opr.id] = opr
for o in opr.outputs:
self.all_vars_map[o.var.id] = o
def optimize_for_inference(self, dest_vars, **kwargs):
r"""Applies optimize_for_inference pass for operator graph.
Args:
dest_vars: list of output vars in the operator graph
Keyword Arguments:
* enable_io16xc32 --
whether to use float16 for I/O between oprs and use
float32 as internal computation precision. Note the output var would be
changed to float16.
* enable_ioc16 --
whether to use float16 for both I/O and computation
precision.
* enable_hwcd4 --
whether to use NHWCD4 data layout. This is faster on some
OpenCL backend.
* enable_nchw88 --
whether to use NCHW88 data layout, currently
used in X86 AVX backend.
* enable_nchw44 --
whether to use NCHW44 data layout, currently
used in arm backend.
* enable_nchw44_dot --
whether to use NCHW44_dot data layout, currently
used in armv8.2+dotprod backend.
* enable_nchw4 --
whether to use NCHW4 data layout, currently
used in nvidia backend(based on cudnn).
* enable_nchw32 --
whether to use NCHW32 data layout, currently
used in nvidia backend with tensorcore(based on cudnn).
* enable_chwn4 --
whether to use CHWN4 data layout, currently
used in nvidia backend with tensorcore.
* enable_nchw64 --
whether to use NCHW64 data layout, used for fast int4
support on Nvidia GPU.
* enable_fuse_conv_bias_nonlinearity: whether to fuse conv+bias+nonlinearty
into one opr.
* enable_fuse_conv_bias_with_z: whether to fuse conv_bias with z
input for inference on nvidia backend(this optimization pass will
result in mismatch of the precision of output of training and
inference)
"""
if not isinstance(dest_vars, Sequence):
dest_vars = [dest_vars]
dest_vars = list(G.VarNode(var.var) for var in dest_vars)
new_vars = G.optimize_for_inference(dest_vars, **kwargs)
return list(self._get_var(var) for var in new_vars)
def dump(
self,
file,
*,
keep_var_name: int = 1,
keep_opr_name: bool = False,
keep_param_name: bool = False,
keep_opr_priority: bool = False,
strip_info_file=None,
append_json=False,
optimize_for_inference=True,
append=False,
user_info: Any = None,
enable_metadata=True,
**kwargs
):
r"""Serializes graph to file.
Args:
file: output file, could be file object or filename.
append: whether output is appended to ``file``.
Only works when ``file`` is str.
keep_var_name: level for keeping variable names:
* 0: none of the names are kept
* 1: (default)keep names of output vars
* 2: keep names of all (output and internal) vars
keep_opr_name: whether to keep operator names.
keep_param_name: whether to keep param names, so param values can be
easily manipulated after loading model
keep_opr_priority: whether to keep priority setting for operators
strip_info_file: a string for path or a file handler. if is not None,
then the dump information for code strip would be written to ``strip_info_file``
append_json: will be check when `strip_info_file` is not None. if set
true, the information for code strip will be append to strip_info_file.
if set false, will rewrite strip_info_file
optimize_for_inference: enbale optmizations,
will skip all optimize options if this is False. Default: True
user_info: any type object, which will be pickled to bytes.
enable_metadata: whether to save metadata into output file.
See more detials in :meth:`~.trace.dump`.
"""
def _set_var_name(var):
graph_var = G.VarNode(var.var)
graph_var.name = var.name
return graph_var
self._compile()
out = list(map(_set_var_name, self.output_vars))
if kwargs.pop("arg_names", False):
logger.warning(
'"arg_names" is not supported in Network.dump, rename input vars directly'
)
if kwargs.pop("output_names", False):
logger.warning(
'"output_names" is not supported in Network.dump, rename output vars directly'
)
if optimize_for_inference:
out, optimize_options = G.optimize_for_inference(out, **kwargs)
metadata = SerializationMetadata()
if enable_metadata:
metadata.is_valid = True
metadata.graph_modified = True
metadata.user_info = pickle.dumps(user_info)
if optimize_for_inference:
metadata.optimize_options = optimize_options
G.set_priority_to_id([o._node if isinstance(o, G.VarNode) else o for o in out])
dump_content, _ = G.dump_graph(
out,
keep_var_name=keep_var_name,
keep_opr_name=keep_opr_name,
keep_param_name=keep_param_name,
keep_opr_priority=keep_opr_priority,
strip_info_file=strip_info_file,
append_json=append_json,
metadata=metadata,
)
if isinstance(file, str):
permission = "wb" if append == False else "ab"
file = open(file, permission)
file.write(dump_content)
def make_const(self, data, name=None, device=None):
r"""Makes an ImmutableTensor OpNode to provide a parameter for the network."""
node = ImmutableTensor(data, name, device, self.graph)
node.compile(self.graph)
return node.outputs[0]
def make_input_node(self, shape, dtype, name=None, device=None):
r"""Makes a Host2DeviceCopy OpNode to provide an input varnode for the network."""
node = Host2DeviceCopy(shape, dtype, name, device)
node.compile(self.graph)
return node.outputs[0]
def add_output(self, *vars: VarNode):
r"""Adds vars into the network output node list"""
if not all([var.owner for var in vars]):
self.add_dep_oprs(*vars)
for var in vars:
# use method 'is' instead of 'in' to avoid
# compare VarNode use elemwise equal
if not any(var is _ for _ in self.output_vars):
self.output_vars.append(var)
def remove_output(self, *vars: VarNode):
r"""Removes vars from the network output node list"""
for var in vars:
# use list pop instead of remove to avoid
# compare VarNode use elemwise equal
for idx, out_var in enumerate(self.output_vars):
if var is out_var:
self.output_vars.pop(idx)
def add_dep_oprs(self, *vars):
if len(vars) == 0:
vars = self.output_vars
assert all(isinstance(var, VarNode) for var in vars), "Only support add VarNode"
q = list(vars)
while len(q) > 0:
cur = q.pop(0)
if cur.owner is not None:
continue
if cur.name is None:
cur.name = cur.var.name
self.all_vars_map[cur.var.id] = cur
mge_opr = cur.var.owner
if get_opr_type(mge_opr) == "Host2DeviceCopy":
self._orig_inputs.extend(mge_opr.outputs)
cur.owner = self._add_opr(mge_opr)
if cur.owner is None:
cur.owner = self.all_oprs_map[mge_opr.id]
continue
q.extend(cur.owner.inputs)
return list(vars)
def modify_opr_names(self, modifier):
r"""Modifies names of operators **inplace**; useful for merging loaded
network into another network
Args:
modifier(str or callable): a string to be prepended to the name, or a function
that maps from name to name
"""
if isinstance(modifier, str):
om = modifier
modifier = lambda v: "{}.{}".format(om, v)
assert isinstance(modifier, collections.Callable)
for i in self.all_oprs:
v0 = i.name
v1 = modifier(v0)
assert isinstance(v1, str)
i.name = v1
def reset_batch_size(self, batchsize, *, blacklist=()):
r"""Helper for reset batch size; first dimension of all data providers
not in blacklist are assumed to be the batch size
Args:
blacklist: data provider names whose first dimension is not
batchbatch size
"""
blacklist = set(blacklist)
prev_batchsize = None
for i in self.data_providers_filter:
if i.name in blacklist:
blacklist.remove(i.name)
else:
shp = list(i.shape)
if prev_batchsize is None:
prev_batchsize = shp[0]
else:
assert prev_batchsize == shp[0], (
"batchsize mismatch: batchsize={} "
"shape={} dp={}".format(prev_batchsize, shp, i.name)
)
shp[0] = batchsize
i.shape = tuple(shp)
self._compile()
assert prev_batchsize is not None, "no data provider found"
assert not blacklist, "unused items in blacklist: {}".format(blacklist)
def replace_vars(self, repl_dict: Dict[VarNode, VarNode]):
r"""Replaces vars in the graph.
Args:
repl_dict: the map {old_var: new_var} that specifies how to replace the vars.
"""
if not all([var.owner for var in repl_dict.values()]):
self.add_dep_oprs(*list(repl_dict.values()))
for var in self.all_vars:
if var in repl_dict:
repl_var = repl_dict[var]
if repl_var is var:
continue
for opnode in var.users:
# use method 'is' instead of 'in' to avoid
# compare VarNode use elemwise equal
assert any([var is _ for _ in opnode.inputs])
opnode.inputs = [repl_var if var is i else i for i in opnode.inputs]
if opnode not in repl_var.users:
repl_var.users.append(opnode)
var.users.clear()
self._compile()
def replace_oprs(self, repl_dict: Dict[OpNode, OpNode]):
r"""Replaces operators in the graph.
Args:
repl_dict: the map {old_opr: new_opr} that specifies how to replace the operators.
"""
for opr in self.all_oprs:
if opr in repl_dict:
assert len(opr.outputs) == len(
repl_dict[opr].outputs
), "can not replace {} with {}".format(type(opr), type(repl_dict[opr]))
for ind, var in enumerate(opr.outputs):
var.owner = repl_dict[opr]
var.__dict__.update(repl_dict[opr].outputs[ind].__dict__)
var.var = repl_dict[opr].outputs[ind].var
repl_dict[opr].outputs = opr.outputs
self._compile()
def get_opr_by_type(self, oprcls, unique=True):
assert issubclass(oprcls, OpNode)
rst = self.opr_filter.type(oprcls).as_list()
if unique:
assert len(rst) == 1, "{} operators of type {} found".format(
len(rst), oprcls
)
(rst,) = rst
return rst
def get_opr_by_name(self, name, unique=True):
rst = self.opr_filter.name(name).as_list()
if unique:
assert len(rst) == 1, "{} operators of type {} found".format(len(rst), name)
(rst,) = rst
return rst
def get_var_by_name(self, name, unique=True):
rst = self.var_filter.name(name).as_list()
if unique:
assert len(rst) == 1, "{} operators of type {} found".format(len(rst), name)
(rst,) = rst
return rst
def get_var_receive_oprs(self, var):
r"""Gets all oprs which use var as input"""
return self.opr_filter.has_input(var).as_list()
def get_dep_oprs(self, var):
r"""Gets dependent oprs of var"""
return get_oprs_seq(var, False, False)
@property
def opr_filter(self):
r"""Filter on all opnodes of the Network."""
oprs = self.all_oprs
return NodeFilter(itertools.islice(oprs, len(oprs)))
@property
def var_filter(self):
r"""Filter on all varnode of the Network."""
vars = self.all_vars
return NodeFilter(itertools.islice(vars, len(vars)))
@property
def params_filter(self): # all immutable tensor
r"""Filter on all parameters (ImmutableTensor Opr) of the Network"""
return self.opr_filter.param_provider()
@property
def data_providers_filter(self): # all host2devicecopy
r"""Filter on all input nodes (Host2DeviceCopy Opr) of the Network"""
return self.opr_filter.data_provider()
@property
def dest_vars(self):
r"""Output varnodes of the Network."""
return self.output_vars
@property
def all_oprs(self):
return get_oprs_seq(self.output_vars, False, False)
@property
def all_vars(self):
return get_dep_vars(self.output_vars)
@property
def all_vars_dict(self):
return self.var_filter.as_dict()
@property
def all_oprs_dict(self):
return self.opr_filter.as_dict()
def _add_opr(self, opr) -> Optional[OpNode]:
r"""Used for loading and building graph."""
assert isinstance(opr, _imperative_rt.graph.OperatorNode)
# TODO: use megbrain C++ RTTI to replace type string
if opr.id not in self.all_oprs_map:
opnode = str_to_mge_class(get_opr_type(opr)).load(opr)
self.all_oprs_map[opr.id] = opnode
for var in opr.inputs:
varnode = self._get_var(var)
opnode.add_inp_var(varnode)
varnode.users.append(opnode)
for var in opr.outputs:
opnode.add_out_var(self._get_var(var))
return opnode
else:
# overwrite the opnode 'new' output VarNode with
# original one when output number larger than 1,
# or will cause dependence issue in _compiler step.
if len(opr.outputs) > 1:
opnode = self.all_oprs_map[opr.id]
for idx, output in enumerate(opnode.outputs):
if output.var.id in self.all_vars_map:
opnode.outputs[idx] = self.all_vars_map[output.var.id]
return None
def _get_opr(self, x):
if x.id in self.all_oprs_map:
return self.all_oprs_map[x.id]
else:
return None
def _get_var(self, x):
r"""Convert :class:`~._imperative_rt.graph.VarNode` to :class:`~.VarNode`."""
assert isinstance(x, _imperative_rt.graph.VarNode)
if x.id not in self.all_vars_map or self.all_vars_map[x.id].var != x:
self.all_vars_map[x.id] = VarNode.load(x, self._get_opr(x.owner))
return self.all_vars_map[x.id]
def set_symbolic_shape(option: bool):
r"""Set the VarNode use symbolic shape or not, return the last status.
Please set to True and must recover after dump if want to change the input batch size.
Args:
option: True for enable symbolic shape.
"""
return _set_symbolic_shape(option)
def as_varnode(obj):
r"""convert a :class:`.VarNode` compatible object to :class:`.VarNode`.
Args:
obj: it must be one of the following:
1. a :class:`.VarNode` object
2. a :class:`.OpNode` object that has unique output
3. an iterable that produces either type 1 or 2, with length 1
"""
if type(obj) is VarNode:
return obj
if isinstance(obj, OpNode):
assert len(obj.outputs) == 1, (
"operator {} must have one output to be converted to VarNode; "
"got {} actually".format(obj, len(obj.outputs))
)
ret = obj.outputs[0]
assert type(ret) is VarNode
return ret
assert isinstance(
obj, collections.Iterable
), "{} is not compatible with VarNode".format(obj)
val = list(obj)
assert (
len(val) == 1
), "can not convert sequence of length {} to VarNode ({})".format(
len(val), (lambda s: s if len(s) < 50 else s[:50] + " ...")(str(val))
)
return as_varnode(val[0])
def as_oprnode(obj):
r"""convert a :class:`.OpNode` compatible object to
:class:`.OpNode`; it works like :func:`as_varnode`.i
"""
if type(obj) is VarNode:
return obj.owner
if isinstance(obj, OpNode):
return obj
assert isinstance(
obj, collections.Iterable
), "{} is not compatible with OpNode".format(obj)
val = list(obj)
assert (
len(val) == 1
), "can not convert sequence of length {} to " "OpNode({})".format(len(val), val)
return as_oprnode(val[0])
class NodeFilter:
r"""Filter on node iterator. This class is an iterator of
:class:`.NetworkNode` objects and multiple filtering conditions and
mappers can be chained.
Example:
.. code-block::
# find all :class:`.ImmutableTensor` nodes
for i in NodeFilter(node_iter).param_provider():
print(i)
# find all :class:`.ImmutableTensor` nodes that end with ':W'
for i in NodeFilter(node_iter).param_provider().name('*:W'):
print(i)
# number of inputs
nr_input = NodeFilter(node_iter).data_provider().as_count()
"""
_iter = None
def __init__(self, node_iter):
"""
:param node_iter: iterator to :class:`.NetworkNode`, or a
:class:`.VarNode`-compatible object; in the later case, its
dependent oprs would be used
"""
if isinstance(node_iter, VarNode):
oprs = get_oprs_seq(node_iter, False, False)
node_iter = itertools.islice(oprs, len(oprs) - 1)
if isinstance(node_iter, OpNode):
oprs = get_oprs_seq(node_iter.inputs, False, False)
node_iter = itertools.islice(oprs, len(oprs) - 1)
assert isinstance(node_iter, collections.Iterable)
if (not isinstance(node_iter, NodeFilter)) and type(
self
) is not NodeFilterCheckType:
node_iter = NodeFilterCheckType(node_iter, NetworkNode)
self._iter = node_iter
@classmethod
def make_all_deps(cls, *dest_vars):
r"""make a :class:`NodeFilter` that contains all deps of given vars"""
return cls(list(get_oprs_seq(dest_vars, False, False)))
def __iter__(self):
r"""to be overwritten by subclass to implement filters"""
return iter(self._iter)
def type(self, node_type):
r"""filter by specific node type
Args:
node_type: node type class
Returns:
a new :class:`NodeFilter` object
"""
return NodeFilterType(self, node_type)
def check_type(self, node_type):
r"""assert that all oprs produced by this iterator are instances of
certain type
Args:
node_type: node type class
Returns:
a new :class:`NodeFilter` object
Raises:
TypeError if type check failed
"""
return NodeFilterCheckType(self, node_type)
def not_type(self, node_type):
r"""remove oprs of specific type
Args:
node_type: node type class
Returns:
a new :class:`NodeFilter` object
"""
return NodeFilterNotType(self, node_type)
def param_provider(self):
r"""get :class:`~.ParamProvider` oprs; shorthand for
``.type(ParamProvider)``
"""
return self.type(ImmutableTensor)
def data_provider(self):
r"""get :class:`.DataProvider` oprs; shorthand for
``.type(DataProvider)``
"""
return self.type(Host2DeviceCopy)
def name(self, pattern, ignorecase=True):
r"""filter by node name
Args:
pattern(class:`str`): a string in glob syntax that can contain ``?`` and
``*`` to match a single or arbitrary characters.
ignorecase(bool, optional): whether to ignroe case
Returns:
a new :class:`NodeFilter` object
"""
return NodeFilterName(self, pattern, ignorecase)
def has_input(self, var):
r"""an opr is kept if it has given var as one of its inputs
Args:
var: var node to checked
Returns:
a new :class:`NodeFilter` object
"""
return NodeFilterHasInput(self, var)
def as_list(self):
r"""consume this iterator and return its content as a list"""
return list(self)
def as_unique(self):
r"""assert that this iterator yields only one node and return it
Returns:
class:`.GraphNodeBase`: the unique node
Raises:
ValueError if this iterator does not yield a unique node
"""
(opr,) = self
return opr
def as_dict(self):
r"""construct an ordered dict to map from node names to objects in
this iterator
"""
return collections.OrderedDict((i.name, i) for i in self)
def as_count(self):
r"""consume this iterator and get the number of elements"""
return sum(1 for _ in self)
class NodeFilterType(NodeFilter):
r"""see :meth:`NodeFilter.type`"""
_node_type = None
def __init__(self, node_iter, node_type):
assert issubclass(node_type, NetworkNode), "bad opr type: {}".format(node_type)
super().__init__(node_iter)
self._node_type = node_type
def __iter__(self):
for i in self._iter:
if isinstance(i, self._node_type):
yield i
class NodeFilterNotType(NodeFilterType):
r"""see :meth:`NodeFilter.not_type`"""
def __iter__(self):
for i in self._iter:
if not isinstance(i, self._node_type):
yield i
class NodeFilterCheckType(NodeFilterType):
r"""see :meth:`NodeFilter.check_type`"""
def __iter__(self):
for i in self._iter:
if not isinstance(i, self._node_type):
raise TypeError(
"all nodes should be {}; got {!r}".format(self._node_type, i)
)
yield i
class NodeFilterHasInput(NodeFilter):
r"""see :meth:`NodeFilter.has_input`"""
_var = None
def __init__(self, node_iter, var):
var = as_varnode(var)
super().__init__(node_iter)
self.var = var
def __iter__(self):
for i in self._iter:
assert isinstance(
i, OpNode
), "has_input() must be used with OpNode; " "got {!r}".format(i)
if any(self.var is _ for _ in i.inputs):
yield i
class NodeFilterName(NodeFilter):
r"""see :meth:`NodeFilter.name`"""
_re = None
def __init__(self, node_iter, pattern, ignorecase):
super().__init__(node_iter)
self.pattern = pattern
self._re = self.make_re(pattern, ignorecase)
@classmethod
def make_re(cls, pattern, ignorecase=True):
assert isinstance(pattern, str), "bad pattern: {!r}".format(pattern)
assert isinstance(ignorecase, bool)
flags = 0
if ignorecase:
flags |= re.IGNORECASE
return re.compile(fnmatch.translate(pattern), flags=flags)
def __iter__(self):
for i in self._iter:
if self.pattern == i.name or self._re.match(i.name):
yield i
|
nilq/baby-python
|
python
|
from glue.config import DictRegistry
__all__ = ['viewer_registry', 'ViewerRegistry']
class ViewerRegistry(DictRegistry):
"""
Registry containing references to custom viewers.
"""
def __call__(self, name=None):
def decorator(cls):
self.add(name, cls)
return cls
return decorator
def add(self, name, cls):
"""
Add an item to the registry.
Parameters
----------
name : str
The key referencing the associated class in the registry
dictionary.
cls : type
The class definition (not instance) associated with the name given
in the first parameter.
"""
if name in self.members:
raise ValueError(f"Viewer with the name {name} already exists, "
f"please choose a different name.")
else:
self.members[name] = {'cls': cls}
viewer_registry = ViewerRegistry()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
'''
Code List Object
===============
'''
from __future__ import annotations
__all__ = ('CodeList',)
from typing import Tuple
from builder.commands.scode import SCode
from builder.datatypes.builderexception import BuilderError
from builder.utils import assertion
from builder.utils.logger import MyLogger
# logger
LOG = MyLogger.get_logger(__name__)
LOG.set_file_handler()
class CodeList(object):
''' Code list package object.
'''
def __init__(self, *args: SCode):
self._data = tuple(assertion.is_instance(a, SCode) for a in args)
#
# property
#
@property
def data(self) -> Tuple[SCode]:
return self._data
|
nilq/baby-python
|
python
|
# import logging
from xml.etree import ElementTree as ET
import lxml.etree as LET
from ckeditor.fields import RichTextField
from acdh_tei_pyutils.tei import TeiReader
from curator.models import Upload
# Create your models here.
from django.db import models
from django.utils.timezone import now
from .namespaces import NS, TEI_NS, XML_ID, get_attribute
from .util import clean_id, element2string
from django.template.loader import render_to_string
for ns, uri in NS.items():
ET.register_namespace(ns, uri)
class Witness(models.Model):
"""
Stores the physical tablet information - siglum
Probably unnecessary in the database
"""
witness_id = models.CharField(max_length=100, primary_key=True) # siglum
museum_numbers = models.TextField(blank=True, null=True)
provenance = models.CharField(max_length=100, blank=True, null=True)
script = models.TextField(blank=True, null=True)
state_publication = models.TextField(blank=True, null=True)
state_preservation = models.TextField(blank=True, null=True)
manuscript_type = models.TextField(blank=True, null=True)
tablets_attested = models.TextField(blank=True, null=True)
omens_attested = models.TextField(blank=True, null=True)
cdli_number = models.TextField(blank=True, null=True)
remarks = models.TextField(blank=True, null=True)
ctime = models.DateTimeField(default=now)
def __str__(self):
return f"{self.witness_id}: {self.museum_numbers}"
@property
def xml_id(self):
return clean_id(self.witness_id)
@property
def tei(self):
wit = ET.Element(get_attribute("witness", TEI_NS), {XML_ID: self.xml_id})
idno = ET.SubElement(wit, get_attribute("idno", TEI_NS))
idno.text = self.witness_id
return wit
# @staticmethod
# def corresponding_witness(cls, witness_label):
# """
# returns the corresponding witness object (eg. BM 036389+)
# given the witness label found in the score (eg. BM 36389+.2)
# """
# search_str = witness_label.split("+")[0]
# return Witness.objects.filter(witness_id__startswith=search_str)
class Chapter(models.Model):
"""
Stores the chapter number, name and links to omens
"""
chapter_name = models.CharField(max_length=100, unique=True)
animal = models.CharField(max_length=100, blank=True, null=True)
author = models.CharField(max_length=100, blank=True, null=True)
reviewer = models.CharField(max_length=100, blank=True, null=True)
proofreader = models.CharField(max_length=100, blank=True, null=True)
remarks = models.CharField(max_length=100, blank=True, null=True)
ctime = models.DateTimeField(default=now, blank=True, null=True)
witness = models.ManyToManyField(Witness)
upload = models.ManyToManyField(Upload)
introduction = RichTextField(default="Page under construction", blank=True, null=True)
def __str__(self):
return f"Chapter {self.chapter_name}"
def get_witness_from_omen(self):
witnesses = Witness.objects.filter(omen__in=self.omen_set.all()).distinct()
return witnesses
@property
def full_tei_string(self):
template_name = "omens/tei_templates/chapter.xml"
context = {"object": self}
full_tei_string = render_to_string(template_name, context)
return full_tei_string
class Omen(models.Model):
"""
Individual omen
"""
xml_id = models.CharField(max_length=100, unique=True) # TEI ID
omen_name = models.CharField(max_length=100, primary_key=True) # TEI @n
omen_num = models.CharField(max_length=100) # from sheet name
ctime = models.DateTimeField(default=now)
chapter = models.ForeignKey(Chapter, on_delete=models.CASCADE, default="")
witness = models.ManyToManyField(Witness)
upload = models.ManyToManyField(Upload)
tei_content = models.TextField(blank=True, null=True)
@property
def tei(self):
chapter_tei = ET.XML(self.chapter.tei)
omen_tei = chapter_tei.find(f'.//*[@n="{self.omen_name}"]')
if omen_tei:
tei_string = element2string(omen_tei)
return tei_string
return ""
@property
def full_tei_string(self):
template_name = "omens/tei_templates/omen.xml"
context = {"object": self}
full_tei_string = render_to_string(template_name, context)
return full_tei_string
@property
def protasis(self):
return Segment.objects.filter(xml_id=self.xml_id + "_P")[0]
@property
def apodosis(self):
return Segment.objects.filter(xml_id=self.xml_id + "_A")[0]
class Segment(models.Model):
"""
A segment in the omen, either PROTASIS or APODOSIS
"""
xml_id = models.CharField(max_length=100, unique=True) # TEI ID
omen = models.ForeignKey(Omen, on_delete=models.CASCADE)
segment_type = models.CharField(
max_length=9,
choices=(("PROTASIS", "Protasis"), ("APODOSIS", "Apodosis")),
default="PROTASIS",
)
@classmethod
def protasis(cls, omen):
return cls.objects.filter(xml_id=omen.omen_name + "_P")[0]
@classmethod
def apodosis(cls, omen):
return cls.objects.filter(xml_id=omen.omen_name + "_A")[0]
def __str__(self):
return f"Omen {self.omen.omen_name} - {self.segment_type}"
class Lemma(models.Model):
"""
A lemma in the omen, represented using w element inside the score in the TEI
"""
xml_id = models.CharField(max_length=100, primary_key=True) # TEI ID
lemma_idx = models.IntegerField(
default=0
) # index of the lemma in the in the omen (position of the w element, implicit in the TEI)
omen = models.ForeignKey(Omen, on_delete=models.CASCADE)
segment = models.ForeignKey(Segment, on_delete=models.CASCADE)
def set_segment_type_to_apodosis(self):
# logging.debug("Changing to Apodosis %s", self.omen.apodosis)
self.segment = self.omen.apodosis
self.save()
def __str__(self):
return f"{self.xml_id}_{self.segment}"
class Reconstruction(models.Model):
"""
A reconstruction of the omen, which contains one or more of the following:
- translation
- transcription
- transliteration
"""
xml_id = models.CharField(max_length=100, primary_key=True) # TEI ID
label = models.CharField(max_length=100)
omen = models.ForeignKey(Omen, on_delete=models.CASCADE, default="")
witness = models.ForeignKey(Witness, on_delete=models.CASCADE, null=True)
@property
def safe_id(self):
return self.xml_id.replace("_", "-").replace(".", "-")
class Translation(models.Model):
"""
Translation of the omen, corresponding to a particular reconstruction
"""
xml_id = models.CharField(max_length=100, primary_key=True) # TEI ID
reconstruction = models.ForeignKey(
Reconstruction, on_delete=models.CASCADE, default=""
)
segment = models.ForeignKey(Segment, on_delete=models.CASCADE)
translation_txt = models.CharField(max_length=1000, default="")
lang = models.CharField(
max_length=2,
choices=(("en", "ENGLISH"), ("dt", "GERMAN"), ("de", "GERMAN")),
default="en",
)
@property
def safe_id(self):
return f"{self.reconstruction.safe_id}-{self.segment.segment_type}"
def __str__(self):
return f"{self.xml_id} {self.segment}"
# class Word(models.Model):
# """
# Words and word roots from the translation,
# to be linked with the curated SenseTree later
# """
# translation = models.ForeignKey(Translation, on_delete=models.CASCADE)
# # position of the word in the in the translation segment
# word_idx = models.IntegerField(default=0)
# # root form of the word
# word_root = models.CharField(max_length=100, default="")
# sense_tree = models.ForeignKey(SenseTree, on_delete=models.CASCADE)
class Transliteration(models.Model):
"""
A row represents a lemma in a transliteration reconstruction of the omen
Probably unnecessary
"""
xml_id = models.CharField(max_length=100, primary_key=True) # TEI ID
reconstruction = models.ForeignKey(
Reconstruction, on_delete=models.CASCADE, default=""
)
lemma = models.ForeignKey(Lemma, on_delete=models.CASCADE, default="")
class Transcription(models.Model):
"""
A row represents a lemma in a transcription of the omen
Probably unnecessary
"""
xml_id = models.CharField(max_length=100, primary_key=True) # TEI ID
reconstruction = models.ForeignKey(
Reconstruction, on_delete=models.CASCADE, default=""
)
lemma = models.ForeignKey(Lemma, on_delete=models.CASCADE, default="")
class Sequence(models.Model):
"""
A row represents a named sequence of omens curated
"""
seq_name = models.CharField(max_length=100, unique=True)
omen = models.ForeignKey(Omen, on_delete=models.CASCADE)
position = models.IntegerField(default=0)
class PhilComment(models.Model):
"""
A row represents a philological comment
"""
omen = models.ForeignKey(Omen, on_delete=models.CASCADE)
comment = RichTextField(blank=True, null=True)
def __str__(self):
if self.comment:
return f"{self.comment[:24]}... (Omen: {self.omen.omen_num})"
@property
def xml_id(self):
return f"phil-comment__{self.id}"
def as_tei_node(self):
if self.comment:
note_node = LET.Element("{http://www.tei-c.org/ns/1.0}note")
note_node.attrib['type'] = "phil-comment"
note_node.attrib["{http://www.w3.org/XML/1998/namespace}id"] = self.xml_id
note_node.text = self.comment
return note_node
return None
def get_parent_node(self):
if self.omen.tei_content:
try:
omen_tei = TeiReader(self.omen.tei_content)
except LET.XMLSyntaxError:
return None
return omen_tei.tree
else:
return None
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
some_div = self.get_parent_node()
if some_div is not None:
phil_note = self.as_tei_node()
xpath = f'//*[@xml:id="{self.xml_id}"]'
for bad in some_div.xpath(xpath):
bad.getparent().remove(bad)
if phil_note is not None:
some_div.insert(0, phil_note)
self.omen.tei_content = ET.tostring(some_div).decode()
self.omen.save()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django_fsm.db.fields.fsmfield
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nickname', models.CharField(max_length=100, null=True, blank=True)),
('nickname_slug', models.CharField(db_index=True, max_length=100, null=True, blank=True)),
('nickname_state', django_fsm.db.fields.fsmfield.FSMField(default=b'unset', max_length=50)),
('wants_reviews', models.BooleanField(default=False, help_text=b"Reviewing other's tasks helps finish transcripts faster.", verbose_name=b"Help review other's tasks.")),
('task_order', models.CharField(default=b'eager', max_length=10, verbose_name=b'Which order would you like to receive tasks?', choices=[(b'eager', b'Give me different kinds of tasks when they are available.'), (b'sequential', b'Give me the same kinds of tasks in sequence.')])),
],
),
migrations.CreateModel(
name='TaskType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=10)),
('description', models.CharField(max_length=200)),
('order', models.PositiveIntegerField(default=0, unique=True)),
],
options={
'ordering': ('order',),
},
),
migrations.AddField(
model_name='profile',
name='task_types',
field=models.ManyToManyField(to='profiles.TaskType', verbose_name=b'Which tasks would you like to help with?'),
),
migrations.AddField(
model_name='profile',
name='user',
field=models.OneToOneField(to=settings.AUTH_USER_MODEL),
),
]
|
nilq/baby-python
|
python
|
from pycket.interpreter import (
App,
Begin,
Begin0,
BeginForSyntax,
CaseLambda,
Cell,
CellRef,
DefineValues,
If,
Lambda,
Let,
Letrec,
LexicalVar,
Module,
ModuleVar,
LinkletVar,
Quote,
QuoteSyntax,
Require,
SetBang,
ToplevelVar,
VariableReference,
WithContinuationMark,
make_let,
make_letrec,
)
from rpython.rlib.objectmodel import specialize
class ASTVisitor(object):
"""
An abstract visitor class for the AST classes defined below.
A subclass need only define handler functions for the relevant portions
of the AST, as the default implementations in this class pass along the
relevant data.
"""
@specialize.argtype(0)
def visit_cell(self, ast, *args):
assert isinstance(ast, Cell)
expr = ast.expr.visit(self, *args)
return Cell(expr, need_cell_flags=ast.need_cell_flags)
@specialize.argtype(0)
def visit_quote(self, ast, *args):
assert isinstance(ast, Quote)
return ast
@specialize.argtype(0)
def visit_quote_syntax(self, ast, *args):
assert isinstance(ast, QuoteSyntax)
return ast
@specialize.argtype(0)
def visit_variable_reference(self, ast, *args):
assert isinstance(ast, VariableReference)
return ast
@specialize.argtype(0)
def visit_with_continuation_mark(self, ast, *args):
assert isinstance(ast, WithContinuationMark)
key = ast.key.visit(self, *args)
value = ast.value.visit(self, *args)
body = ast.body.visit(self, *args)
return WithContinuationMark(key, value, body)
@specialize.argtype(0)
def visit_app(self, ast, *args):
assert isinstance(ast, App)
rator = ast.rator.visit(self, *args)
rands = [a.visit(self, *args) for a in ast.rands]
return App.make(rator, rands, ast.env_structure)
@specialize.argtype(0)
def visit_begin0(self, ast, *args):
assert isinstance(ast, Begin0)
first = ast.first.visit(self, *args)
body = [b.visit(self, *args) for b in ast.body]
return Begin0.make(first, body)
@specialize.argtype(0)
def visit_begin(self, ast, *args):
assert isinstance(ast, Begin)
body = [b.visit(self, *args) for b in ast.body]
return Begin.make(body)
@specialize.argtype(0)
def visit_begin_for_syntax(self, ast, *args):
assert isinstance(ast, BeginForSyntax)
return ast
@specialize.argtype(0)
def visit_cell_ref(self, ast, *args):
assert isinstance(ast, CellRef)
return ast
@specialize.argtype(0)
def visit_lexical_var(self, ast, *args):
assert isinstance(ast, LexicalVar)
return ast
@specialize.argtype(0)
def visit_module_var(self, ast, *args):
assert isinstance(ast, ModuleVar)
return ast
@specialize.argtype(0)
def visit_linklet_var(self, ast, *args):
assert isinstance(ast, LinkletVar)
return ast
@specialize.argtype(0)
def visit_toplevel_var(self, ast, *args):
assert isinstance(ast, ToplevelVar)
return ast
@specialize.argtype(0)
def visit_set_bang(self, ast, *args):
assert isinstance(ast, SetBang)
var = ast.var.visit(self, *args)
rhs = ast.rhs.visit(self, *args)
return SetBang(var, rhs)
@specialize.argtype(0)
def visit_if(self, ast, *args):
assert isinstance(ast, If)
tst = ast.tst.visit(self, *args)
thn = ast.thn.visit(self, *args)
els = ast.els.visit(self, *args)
return If.make(tst, thn, els)
@specialize.argtype(0)
def visit_case_lambda(self, ast, *args):
assert isinstance(ast, CaseLambda)
lams = [l.visit(self, *args) for l in ast.lams]
return CaseLambda(lams, recursive_sym=ast.recursive_sym, arity=ast._arity)
@specialize.argtype(0)
def visit_lambda(self, ast, *args):
from pycket.interpreter import make_lambda
assert isinstance(ast, Lambda)
body = [b.visit(self, *args) for b in ast.body]
return make_lambda(ast.formals, ast.rest, body, sourceinfo=ast.sourceinfo)
@specialize.argtype(0)
def visit_letrec(self, ast, *args):
assert isinstance(ast, Letrec)
rhss = [r.visit(self, *args) for r in ast.rhss]
body = [b.visit(self, *args) for b in ast.body]
vars = ast._rebuild_args()
return make_letrec(vars, rhss, body)
@specialize.argtype(0)
def visit_let(self, ast, *args):
assert isinstance(ast, Let)
rhss = [r.visit(self, *args) for r in ast.rhss]
body = [b.visit(self, *args) for b in ast.body]
vars = ast._rebuild_args()
return make_let(vars, rhss, body)
@specialize.argtype(0)
def visit_define_values(self, ast, *args):
assert isinstance(ast, DefineValues)
rhs = ast.rhs.visit(self, *args)
return DefineValues(ast.names, rhs, ast.display_names)
@specialize.argtype(0)
def visit_module(self, ast, *args):
""" Must not produce a new module AST """
assert isinstance(ast, Module)
for i, b in enumerate(ast.body):
ast.body[i] = b.visit(self, *args)
for i, r in enumerate(ast.requires):
ast.requires[i] = r.visit(self, *args)
return ast
@specialize.argtype(0)
def visit_require(self, ast, *args):
assert isinstance(ast, Require)
return ast
class CopyVisitor(ASTVisitor):
def visit_variable_reference(self, ast):
assert isinstance(ast, VariableReference)
return VariableReference(ast.var, ast.path, ast.is_mut)
def visit_quote(self, ast):
assert isinstance(ast, Quote)
return Quote(ast.w_val)
def visit_lexical_var(self, ast):
assert isinstance(ast, LexicalVar)
return LexicalVar(ast.sym, ast.env_structure)
def visit_module_var(self, ast):
assert isinstance(ast, ModuleVar)
var = ModuleVar(ast.sym, ast.srcmod, ast.srcsym, ast.path)
var.modenv = ast.modenv
var.w_value = ast.w_value
return var
def visit_cell_ref(self, ast):
assert isinstance(ast, CellRef)
return CellRef(ast.sym, ast.env_structure)
def visit_let(self, ast):
assert isinstance(ast, Let)
body = [b.visit(self) for b in ast.body]
rhss = [r.visit(self) for r in ast.rhss]
result = Let(ast.args,
ast.counts,
rhss,
body,
ast.remove_num_envs)
result.copy_body_pruning(ast)
return result
def visit_letrec(self, ast):
assert isinstance(ast, Letrec)
body = [b.visit(self) for b in ast.body]
rhss = [r.visit(self) for r in ast.rhss]
result = Letrec(ast.args,
ast.counts,
rhss,
body)
result.copy_body_pruning(ast)
return result
def visit_begin(self, ast):
assert isinstance(ast, Begin)
body = [b.visit(self) for b in ast.body]
result = Begin(body)
result.copy_body_pruning(ast)
return result
def visit_begin0(self, ast):
assert isinstance(ast, Begin0)
fst = ast.first.visit(self)
rst = [r.visit(self) for r in ast.body]
result = Begin0(fst, rst)
result.copy_body_pruning(ast)
return result
def copy_ast(ast):
visitor = CopyVisitor()
return ast.visit(visitor)
|
nilq/baby-python
|
python
|
# -*- coding: UTF-8 -*-
"""
@CreateDate: 2021/07/25
@Author: Xingyan Liu
@File: builder.py
@Project: stagewiseNN
"""
import os
import sys
from pathlib import Path
from typing import Sequence, Mapping, Optional, Union, Callable
import logging
import pandas as pd
import numpy as np
from scipy import sparse
import scanpy as sc
from .utils import quick_preprocess_raw, make_binary
from .multipartite_graph import stagewise_knn
from .graph2tree import max_connection, adaptive_tree
class BuilderParams:
def __init__(self, **kwargs):
self._dict = {}
self.update(**kwargs)
def update(self, **kwargs):
self._dict.update(**kwargs)
return self
@property
def keys(self):
return self._dict.keys()
def __getattr__(self, key):
return self._dict[key]
class Builder(object):
def __init__(
self,
stage_order: Sequence,
**build_params
):
"""
Parameters
----------
stage_order: Sequence
the order of stages
"""
self.stage_order = stage_order
self._params = BuilderParams(**build_params)
self._distmat = None
self._connect = None
self._stage_lbs = None
self._group_lbs = None
self._edgedf = None
self._refined_group_lbs = None
@property
def stage_lbs(self):
return self._stage_lbs
@property
def group_lbs(self):
return self._group_lbs
# @group_lbs.setter
# def group_lbs(self, group_lbs):
# pass
@property
def distmat(self):
return self._distmat
@property
def connect(self):
return self._connect
@property
def connect_bin(self):
""" binarized edges """
if self._connect is not None:
return make_binary(self._connect)
return None
@property
def edgedf(self):
return self._edgedf
@property
def refined_group_lbs(self):
return self._refined_group_lbs
def build_graph(
self,
X, stage_lbs,
binary_edge: bool = True,
ks: Union[Sequence[int], int] = 10,
n_pcs: Union[Sequence[int], int] = 50,
pca_base_on: Optional[str] = 'stacked',
leaf_size: int = 5,
**kwargs
):
"""
Build multipartite KNN-graph stage-by-stage.
Parameters
----------
X: np.ndarray or sparse matrix
data matrix, of shape (n_samples, n_features)
stage_lbs: Sequence
stage labels for each sample (nodes in `build_graph`)
binary_edge: bool (default=True)
whether to use the binarized edges. Set as True may cause some
information loss but a more robust result.
ks:
the number of nearest neighbors to be calculated.
n_pcs:
The number of principal components after PCA reduction.
If `pca_base_on` is None, this will be ignored.
pca_base_on: str {'x1', 'x2', 'stacked', None} (default='stacked')
if None, perform KNN on the original data space.
leaf_size: int (default=5)
Leaf size passed to BallTree or KDTree, for adjusting the
approximation level. The higher the faster, while of
less promises to find the exact nearest neighbors.
Setting as 1 for brute-force (exact) KNN.
kwargs:
other parameters for `stagewise_knn`
Returns
-------
distmat: sparse.csr_matrix
the distance matrix, of shape (n_samples, n_samples)
connect: sparse.csr_matrix
the connectivities matrix, of shape (n_samples, n_samples)
"""
self._stage_lbs = stage_lbs
distmat, connect = stagewise_knn(
X, self.stage_lbs,
stage_order=self.stage_order,
k=ks,
leaf_size=leaf_size, # 1 for brute-force KNN
pca_base_on=pca_base_on,
n_pcs=n_pcs,
binary_edge=False,
**kwargs
)
self._distmat = distmat
self._connect = connect
if binary_edge:
connect = self.connect_bin
# record parameters
self._params.update(
binary_edge=binary_edge,
ks=ks,
n_pcs=n_pcs,
pca_base_on=pca_base_on,
leaf_size=leaf_size,
)
return distmat, connect
def build_tree(
self,
group_lbs: Sequence,
stage_lbs: Optional[Sequence] = None,
ignore_pa=(),
ext_sep: str = '_',
):
"""
Adaptatively build the developmental tree from the stagewise-KNN graph.
Parameters
----------
group_lbs: Sequence
group labels for each sample (nodes in `build_graph`)
stage_lbs: Sequence
stage labels for each sample (nodes in `build_graph`)
ignore_pa: list or set
parent nodes to be ignored; empty tuple by default.
ext_sep: str
parse string for automatically extract the stage-labels from
`group_lbs`
Returns
-------
edgedf: pd.DataFrame
pd.DataFrame of columns {'node', 'parent', 'prop'},
and of the same number of rows as number of total stage-clusters.
the column 'prop' is the proportion of nodes that have votes for
the current parent.
refined_group_lbs:
refined group labels for each sample (e.g. single-cell)
"""
# connect-matrix NOT calculated by StagewiseNN may cause un-expected
# result by using `sparse.triu()`.
# TODO: define `take_cross_stage_edges(spmatrix)`
conn_upper = sparse.triu(self.connect)
adj_max = max_connection(conn_upper)
self._group_lbs = group_lbs
if self.stage_lbs is None:
self._stage_lbs = stage_lbs
edgedf, refined_group_lbs = adaptive_tree(
adj_max, self.group_lbs,
stage_lbs=self.stage_lbs,
stage_ord=self.stage_order,
ignore_pa=ignore_pa,
ext_sep=ext_sep,
)
self._edgedf = edgedf
self._refined_group_lbs = refined_group_lbs
# record parameters
self._params.update(
ignore_pa=ignore_pa,
ext_sep=ext_sep,
)
return edgedf, refined_group_lbs
def __test__():
pass
if __name__ == '__main__':
import time
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(filename)s-%(lineno)d-%(funcName)s(): '
'%(levelname)s\n%(message)s'
)
t = time.time()
__test__()
print('Done running file: {}\nTime: {}'.format(
os.path.abspath(__file__), time.time() - t,
))
|
nilq/baby-python
|
python
|
# A import performance test of standard classes, dataclasses, attrs, and cluegen
import sys
import time
standard_template = '''
class C{n}:
def __init__(self, a, b, c, d, e):
self.a = a
self.b = b
self.c = c
self.d = d
self.e = e
def __repr__(self):
return f'C{n}({{self.a!r}}, {{self.b!r}}, {{self.c!r}}, {{self.d!r}}, {{self.e!r}})'
def __eq__(self, other):
if self.__class__ is other.__class:
return (self.a, self.b, self.c, self.d, self.e) == (other.a, other.b, other.c, other.d, other.e)
else:
return NotImplemented
'''
namedtuple_template = '''
C{n} = namedtuple('C{n}', ['a', 'b', 'c', 'd', 'e'])
'''
namedtuple_template = '''
class C{n}(NamedTuple):
a : int
b : int
c : int
d : int
e : int
'''
dataclass_template = '''
@dataclass
class C{n}:
a : int
b : int
c : int
d : int
e : int
'''
attr_template = '''
@attr.s
class C{n}:
a = attr.ib()
b = attr.ib()
c = attr.ib()
d = attr.ib()
e = attr.ib()
'''
cluegen_template = '''
class C{n}(Datum):
a : int
b : int
c : int
d : int
e : int
'''
# cluegen, but same default methods as dataclasses generated
cluegen_eval_template = '''
class C{n}(Datum):
a : int
b : int
c : int
d : int
e : int
C{n}.__init__, C{n}.__repr__, C{n}.__eq__
'''
def run_test(name, n):
start = time.time()
while n > 0:
import perftemp
del sys.modules['perftemp']
n -= 1
end = time.time()
print(name, (end-start))
def write_perftemp(count, template, setup):
with open('perftemp.py', 'w') as f:
f.write(setup)
for n in range(count):
f.write(template.format(n=n))
def main(reps):
write_perftemp(100, standard_template, '')
run_test('standard classes', reps)
write_perftemp(100, namedtuple_template, 'from collections import namedtuple\n')
write_perftemp(100, namedtuple_template, 'from typing import NamedTuple\n')
run_test('namedtuple', reps)
write_perftemp(100, dataclass_template, 'from dataclasses import dataclass\n')
run_test('dataclasses', reps)
try:
write_perftemp(100, attr_template, 'import attr\n')
run_test('attrs', reps)
except ImportError:
print("attrs not installed")
write_perftemp(100, cluegen_template, 'from cluegen import Datum\n')
run_test('cluegen', reps)
write_perftemp(100, cluegen_eval_template, 'from cluegen import Datum\n')
run_test('cluegen_eval', reps)
if __name__ == '__main__':
if len(sys.argv) == 2:
reps = int(sys.argv[1])
else:
reps = 100
main(reps)
|
nilq/baby-python
|
python
|
from __future__ import print_function
from __future__ import division
from collections import defaultdict, OrderedDict
from itertools import izip
import numbers
from time import time
import itertools
import math
import scipy.sparse as sparse
import sklearn
from sklearn.base import BaseEstimator
from sklearn.ensemble import GradientBoostingClassifier as GBClassifier
from sklearn.ensemble._gradient_boosting import _random_sample_mask
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.ensemble.gradient_boosting import LossFunction, LOSS_FUNCTIONS, MultinomialDeviance, \
LogOddsEstimator, BinomialDeviance
import numpy
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors.unsupervised import NearestNeighbors
from sklearn.tree._tree import DTYPE
from sklearn.utils.random import check_random_state
from sklearn.utils.validation import check_arrays, column_or_1d
from commonutils import generate_sample, check_sample_weight
import commonutils
import reports
__author__ = 'Alex Rogozhnikov'
# TODO updating tree in FL and NFL
class KnnLossFunction(LossFunction, BaseEstimator):
def __init__(self, uniform_variables):
"""KnnLossFunction is a base class to be inherited by other loss functions,
which choose the particular A matrix and w vector. The formula of loss is:
loss = \sum_i w_i * exp(- \sum_j a_ij y_j score_j)
"""
LossFunction.__init__(self, 1)
self.uniform_variables = uniform_variables
# real matrix and vector will be computed during fitting
self.A = None
self.A_t = None
self.w = None
def __call__(self, y, pred):
"""Computing the loss itself"""
assert len(y) == len(pred) == self.A.shape[1], "something is wrong with sizes"
y_signed = 2 * y - 1
exponents = numpy.exp(- self.A.dot(y_signed * numpy.ravel(pred)))
return numpy.sum(self.w * exponents)
def negative_gradient(self, y, pred, **kwargs):
"""Computing negative gradient"""
assert len(y) == len(pred) == self.A.shape[1], "something is wrong with sizes"
y_signed = 2 * y - 1
exponents = numpy.exp(- self.A.dot(y_signed * numpy.ravel(pred)))
result = self.A_t.dot(self.w * exponents) * y_signed
return result
def fit(self, X, y):
"""This method is used to compute A matrix and w based on train dataset"""
assert len(X) == len(y), "different size of arrays"
A, w = self.compute_parameters(X, y)
self.A = sparse.csr_matrix(A)
self.A_t = sparse.csr_matrix(self.A.transpose())
self.w = numpy.array(w)
assert A.shape[0] == len(w), "inconsistent sizes"
assert A.shape[1] == len(X), "wrong size of matrix"
return self
def compute_parameters(self, trainX, trainY):
"""This method should be overloaded in descendant, and should return A, w (matrix and vector)"""
raise NotImplementedError()
def init_estimator(self, X=None, y=None):
return LogOddsEstimator()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_mask, learning_rate=1.0, k=0):
y_signed = 2 * y - 1
self.update_exponents = self.w * numpy.exp(- self.A.dot(y_signed * numpy.ravel(y_pred)))
LossFunction.update_terminal_regions(self, tree, X, y, residual, y_pred, sample_mask, learning_rate, k)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred):
# terminal_region = numpy.where(terminal_regions == leaf)[0]
y_signed = 2 * y - 1
z = self.A.dot((terminal_regions == leaf) * y_signed)
alpha = numpy.sum(self.update_exponents * z) / (numpy.sum(self.update_exponents * z * z) + 1e-10)
tree.value[leaf, 0, 0] = alpha
# Descendants of KnnLossFunction - particular cases, each has its own
# algorithm of generating A and w
class SimpleKnnLossFunction(KnnLossFunction):
def __init__(self, uniform_variables, knn=10, uniform_label=1, distinguish_classes=True, row_norm=1.):
"""A matrix is square, each row corresponds to a single event in train dataset, in each row we put ones
to the closest neighbours of that event if this event from class along which we want to have uniform prediction.
:param list[str] uniform_variables: the features, along which uniformity is desired
:param int knn: the number of nonzero elements in the row, corresponding to event in 'uniform class'
:param int|list[int] uniform_label: the label (labels) of 'uniform classes'
:param bool distinguish_classes: if True, 1's will be placed only for
"""
self.knn = knn
self.distinguish_classes = distinguish_classes
self.row_norm = row_norm
self.uniform_label = [uniform_label] if isinstance(uniform_label, numbers.Number) else uniform_label
KnnLossFunction.__init__(self, uniform_variables)
def compute_parameters(self, trainX, trainY):
sample_weight = numpy.ones(len(trainX))
A_parts = []
w_parts = []
for label in self.uniform_label:
label_mask = trainY == label
n_label = numpy.sum(label_mask)
if self.distinguish_classes:
knn_indices = commonutils.computeKnnIndicesOfSameClass(self.uniform_variables, trainX, label_mask,
n_neighbours=self.knn)
else:
mask = numpy.ones(len(trainY), dtype=numpy.bool)
knn_indices = commonutils.computeSignalKnnIndices(self.uniform_variables, trainX, mask, self.knn)
knn_indices = knn_indices[label_mask, :]
ind_ptr = numpy.arange(0, n_label * self.knn + 1, self.knn)
column_indices = knn_indices.flatten()
data = numpy.ones(n_label * self.knn, dtype=float) * self.row_norm / self.knn
A_part = sparse.csr_matrix((data, column_indices, ind_ptr), shape=[n_label, len(trainX)])
w_part = numpy.mean(numpy.take(sample_weight, knn_indices), axis=1)
assert A_part.shape[0] == len(w_part)
A_parts.append(A_part)
w_parts.append(w_part)
for label in set(trainY).difference(self.uniform_label):
label_mask = trainY == label
n_label = numpy.sum(label_mask)
ind_ptr = numpy.arange(0, n_label + 1)
column_indices = numpy.where(label_mask)[0].flatten()
data = numpy.ones(n_label, dtype=float) * self.row_norm
A_part = sparse.csr_matrix((data, column_indices, ind_ptr), shape=[n_label, len(trainX)])
w_part = sample_weight[label_mask]
A_parts.append(A_part)
w_parts.append(w_part)
A = sparse.vstack(A_parts, format='csr', dtype=float)
w = numpy.concatenate(w_parts)
return A, w
class SimpleKnnLossFunctionEyeBg(KnnLossFunction):
def __init__(self, uniform_variables, knn=5, distinguish_classes=True, diagonal=0.):
"""A matrix is square, each row corresponds to a single event in train dataset,
in each row we put ones to the closest neighbours of that event for signal.
For background we have identity matrix.
If distinguish_classes==True, only events of the same class are chosen.
"""
self.knn = knn
self.distinguish_classes = distinguish_classes
self.diagonal = diagonal
KnnLossFunction.__init__(self, uniform_variables)
def compute_parameters(self, trainX, trainY):
is_signal = trainY > 0.5
if self.distinguish_classes:
knn_indices = commonutils.computeKnnIndicesOfSameClass(self.uniform_variables, trainX, is_signal, self.knn)
if not self.distinguish_classes:
is_signal = numpy.ones(len(trainY), dtype=numpy.bool)
knn_indices = commonutils.computeSignalKnnIndices(self.uniform_variables, trainX, is_signal, self.knn)
bg_index = numpy.where(~ is_signal)[0]
j = 0
k = 0
ind_ptr = [0]
x = set(bg_index)
column_indices_help = []
for i in range(len(trainX)):
if i in x:
column_indices_help.append(bg_index[j])
ind_ptr.append(k + 1)
k += 1
j += 1
else:
for n in knn_indices[i]:
column_indices_help.append(n)
ind_ptr.append(k + self.knn)
k += self.knn
column_indices = numpy.array(column_indices_help)
data = numpy.ones(len(column_indices))
A = sparse.csr_matrix((data, column_indices, ind_ptr), shape=(len(trainX), len(trainX)))
w = numpy.ones(len(trainX))
return A, w
class SimpleKnnLossFunctionKnnOnDiagonalSignal(KnnLossFunction):
def __init__(self, uniform_variables, knn=5, distinguish_classes=True, diagonal=0.):
"""A matrix is square, each row corresponds to a single event in train dataset,
in each row we put ones to the closest neighbours of that event for signal. For background we
have identity matrix times self.knn.
If distinguish_classes==True, only events of the same class are chosen.
"""
self.knn = knn
self.distinguish_classes = distinguish_classes
self.diagonal = diagonal
KnnLossFunction.__init__(self, uniform_variables)
def compute_parameters(self, trainX, trainY):
is_signal = trainY > 0.5
if self.distinguish_classes:
knn_indices = commonutils.computeKnnIndicesOfSameClass(self.uniform_variables, trainX, is_signal, self.knn)
if not self.distinguish_classes:
is_signal = numpy.ones(len(trainY), dtype=numpy.bool)
knn_indices = commonutils.computeSignalKnnIndices(self.uniform_variables, trainX, is_signal, self.knn)
bg_index = numpy.where(is_signal == False)[0]
j = 0
k = 0
ind_ptr = [0]
x = set(bg_index)
column_indices_help = []
for i in range(len(trainX)):
if i in x:
column_indices_help.append(bg_index[j])
ind_ptr.append(k + 1)
k += 1
j += 1
else:
for n in knn_indices[i]:
column_indices_help.append(n)
ind_ptr.append(k + self.knn)
k += self.knn
column_indices = numpy.array(column_indices_help)
data = numpy.ones(len(column_indices))
data[bg_index] = self.knn
A = sparse.csr_matrix((data, column_indices, ind_ptr), shape=(len(trainX), len(trainX)))
w = numpy.ones(len(trainX))
return A, w
class SimpleKnnLossFunctionKnnOnDiagonalBg(KnnLossFunction):
def __init__(self, uniform_variables, knn=5, distinguish_classes=True, diagonal=0.):
"""A matrix is square, each row corresponds to a single event in train dataset,
in each row we put ones to the closest neighbours of that event for signal. For background we
have identity matrix times self.knn.
If distinguish_classes==True, only events of the same class are chosen.
"""
self.knn = knn
self.distinguish_classes = distinguish_classes
self.diagonal = diagonal
KnnLossFunction.__init__(self, uniform_variables)
def compute_parameters(self, trainX, trainY):
is_signal = trainY > 0.5
if self.distinguish_classes:
knn_indices = commonutils.computeKnnIndicesOfSameClass(self.uniform_variables, trainX, is_signal, self.knn)
if not self.distinguish_classes:
is_signal = numpy.ones(len(trainY), dtype=numpy.bool)
knn_indices = commonutils.computeSignalKnnIndices(self.uniform_variables, trainX, is_signal, self.knn)
bg_index = numpy.where(is_signal == True)[0]
j = 0
k = 0
ind_ptr = [0]
x = set(bg_index)
column_indices_help = []
for i in range(len(trainX)):
if i in x:
column_indices_help.append(bg_index[j])
ind_ptr.append(k + 1)
k += 1
j += 1
else:
for n in knn_indices[i]:
column_indices_help.append(n)
ind_ptr.append(k + self.knn)
k += self.knn
column_indices = numpy.array(column_indices_help)
data = numpy.ones(len(column_indices))
data[bg_index] = self.knn
A = sparse.csr_matrix((data, column_indices, ind_ptr), shape=(len(trainX), len(trainX)))
w = numpy.ones(len(trainX))
return A, w
class SimpleKnnLossFunctionEyeSignal(KnnLossFunction):
def __init__(self, uniform_variables, knn=5, distinguish_classes=True, diagonal=0.):
"""A matrix is square, each row corresponds to a single event in train dataset,
in each row we put ones to the closest neighbours of that event for background.
For signal we have identity matrix.
If distinguish_classes==True, only events of the same class are chosen.
"""
self.knn = knn
self.distinguish_classes = distinguish_classes
self.diagonal = diagonal
KnnLossFunction.__init__(self, uniform_variables)
def compute_parameters(self, trainX, trainY):
is_signal = trainY > 0.5
if self.distinguish_classes:
knn_indices = commonutils.computeKnnIndicesOfSameClass(self.uniform_variables, trainX, is_signal, self.knn)
if not self.distinguish_classes:
is_signal = numpy.ones(len(trainY), dtype=numpy.bool)
knn_indices = commonutils.computeSignalKnnIndices(self.uniform_variables, trainX, is_signal, self.knn)
bg_index = numpy.where(is_signal)[0]
j = 0
k = 0
ind_ptr = [0]
x = set(bg_index)
column_indices_help = []
for i in range(len(trainX)):
if i in x:
column_indices_help.append(bg_index[j])
ind_ptr.append(k + 1)
k += 1
j += 1
else:
for n in knn_indices[i]:
column_indices_help.append(n)
ind_ptr.append(k + self.knn)
k += self.knn
column_indices = numpy.array(column_indices_help)
data = numpy.ones(len(column_indices))
A = sparse.csr_matrix((data, column_indices, ind_ptr), shape=(len(trainX), len(trainX)))
w = numpy.ones(len(trainX))
return A, w
class PairwiseKnnLossFunction(KnnLossFunction):
def __init__(self, uniform_variables, knn, exclude_self=True, penalize_large_preds=True):
""" A is rectangular matrix, in each row we have only two '1's,
all other elements are zeros, these two '1's are placed in the columns, corresponding to neighbours
exclude_self: bool, exclude self from knn?
"""
self.knn = knn
self.exclude_self = exclude_self
self.penalize_large_preds = penalize_large_preds
KnnLossFunction.__init__(self, uniform_variables)
def compute_parameters(self, trainX, trainY):
is_signal = trainY > 0.5
knn = self.knn
if self.exclude_self:
knn_indices = \
commonutils.computeKnnIndicesOfSameClass(self.uniform_variables, trainX, is_signal, knn+1)[:, 1:]
else:
knn_indices = commonutils.computeKnnIndicesOfSameClass(self.uniform_variables, trainX, is_signal, knn)
rows = xrange(len(trainX) * knn)
columns1 = numpy.repeat(numpy.arange(0, len(trainX)), knn)
columns2 = knn_indices.flatten()
data = numpy.ones(len(rows))
A = sparse.csr_matrix((data, (rows, columns1)), shape=[len(trainX) * knn, len(trainX)]) + \
sparse.csr_matrix((data, (rows, columns2)), shape=[len(trainX) * knn, len(trainX)])
if self.penalize_large_preds:
penalty1 = - sparse.eye(len(trainX), len(trainX))
penalty2 = sparse.eye(len(trainX), len(trainX))
A = sparse.vstack((A, penalty1, penalty2), format="csr")
w = numpy.ones(A.shape[0])
return A, w
class RandomKnnLossFunction(KnnLossFunction):
def __init__(self, uniform_variables, n_rows, knn=5, knn_factor=3, large_preds_penalty=1.):
"""A general loss,
at each iteration it takes some random event from train dataset,
and selects randomly knn of its knn*knn_factor neighbours, the process is repeated 'n_rows' times"""
self.n_rows = n_rows
self.knn = knn
self.knn_factor = knn_factor
self.large_preds_penalty = large_preds_penalty
KnnLossFunction.__init__(self, uniform_variables)
def compute_parameters(self, trainX, trainY):
is_signal = trainY > 0.5
knn_max = int(self.knn * self.knn_factor)
knn_indices = commonutils.computeKnnIndicesOfSameClass(self.uniform_variables, trainX, is_signal, knn_max)
selected_originals = numpy.random.randint(0, len(trainX), self.n_rows)
selected_knns = knn_indices[selected_originals, :]
groups_indices = numpy.zeros((self.n_rows, self.knn), dtype=numpy.int)
for i, event_neighs in enumerate(selected_knns):
indices = numpy.random.permutation(knn_max)[:self.knn]
groups_indices[i] = event_neighs[indices]
ind_ptr = numpy.arange(0, self.n_rows * self.knn + 1, self.knn)
column_indices = groups_indices.flatten()
data = numpy.ones(self.n_rows * self.knn)
A = sparse.csr_matrix((data, column_indices, ind_ptr), shape=(self.n_rows, len(trainX)))
if self.large_preds_penalty > 0:
penalty1 = - self.large_preds_penalty * sparse.eye(len(trainX), len(trainX))
penalty2 = self.large_preds_penalty * sparse.eye(len(trainX), len(trainX))
A = sparse.vstack((A, penalty1, penalty2), format="csr")
w = numpy.ones(A.shape[0])
return A, w
class AdaLossFunction(KnnLossFunction):
def __init__(self):
"""Good old Ada loss, implemented as version of KnnLostFunction """
KnnLossFunction.__init__(self, None)
def compute_parameters(self, trainX, trainY):
return sparse.eye(len(trainX), len(trainX)), numpy.ones(len(trainX))
class DistanceBasedKnnFunction(KnnLossFunction):
def __init__(self, uniform_variables, knn=None, distance_dependence=None, large_preds_penalty=0.,
row_normalize=False):
"""If knn is None, the matrix will be filled, otherwise it will be sparse
with knn as number of nonzero cells,
distance dependence is function, that takes distance between i-th and j-th
events and returns a_ij
"""
self.knn = knn
self.distance_dependence = distance_dependence
self.large_pred_penalty = large_preds_penalty
self.row_normalize = row_normalize
KnnLossFunction.__init__(self, uniform_variables)
def compute_parameters(self, trainX, trainY):
for variable in self.uniform_variables:
if variable not in trainX.columns:
raise ValueError("Dataframe is missing %s column" % variable)
if self.knn is None:
A = pairwise_distances(trainX[self.uniform_variables])
A = self.distance_dependence(A)
A *= (trainY[:, numpy.newaxis] == trainY[numpy.newaxis, :])
else:
is_signal = trainY > 0.5
# computing knn indices of same type
uniforming_features_of_signal = numpy.array(trainX.ix[is_signal, self.uniform_variables])
neighbours = NearestNeighbors(n_neighbors=self.knn, algorithm='kd_tree').fit(uniforming_features_of_signal)
signal_distances, knn_signal_indices = neighbours.kneighbors(uniforming_features_of_signal)
knn_signal_indices = numpy.where(is_signal)[0].take(knn_signal_indices)
uniforming_features_of_bg = numpy.array(trainX.ix[~is_signal, self.uniform_variables])
neighbours = NearestNeighbors(n_neighbors=self.knn, algorithm='kd_tree').fit(uniforming_features_of_bg)
bg_distances, knn_bg_indices = neighbours.kneighbors(uniforming_features_of_bg)
knn_bg_indices = numpy.where(~is_signal)[0].take(knn_bg_indices)
signal_distances = self.distance_dependence(signal_distances.flatten())
bg_distances = self.distance_dependence(bg_distances.flatten())
signal_ind_ptr = numpy.arange(0, sum(is_signal) * self.knn + 1, self.knn)
bg_ind_ptr = numpy.arange(0, sum(~is_signal) * self.knn + 1, self.knn)
signal_column_indices = knn_signal_indices.flatten()
bg_column_indices = knn_bg_indices.flatten()
A_sig = sparse.csr_matrix(sparse.csr_matrix((signal_distances, signal_column_indices, signal_ind_ptr),
shape=(sum(is_signal), len(trainX))))
A_bg = sparse.csr_matrix(sparse.csr_matrix((bg_distances, bg_column_indices, bg_ind_ptr),
shape=(sum(~is_signal), len(trainX))))
A = sparse.vstack((A_sig, A_bg), format='csr')
if self.row_normalize:
from sklearn.preprocessing import normalize
A = normalize(A, norm='l1', axis=1)
return A, numpy.ones(A.shape[0])
def compute_efficiencies(mask, y_pred, sample_weight):
"""For each event computes it position among other events by prediction. """
order = numpy.argsort(y_pred[mask])
weights = sample_weight[mask][order]
efficiencies = (numpy.cumsum(weights) - 0.5 * weights) / numpy.sum(weights)
return efficiencies[numpy.argsort(order)]
def test_compute_efficiency(size=100):
y_pred = numpy.random.random(size)
mask = numpy.random.random(size) > 0.5
effs = compute_efficiencies(mask, y_pred, sample_weight=numpy.ones(size))
assert len(effs) == numpy.sum(mask)
assert len(effs) == len(set(effs))
assert numpy.all(effs[numpy.argsort(y_pred[mask])] == numpy.sort(effs))
effs2 = compute_efficiencies(numpy.where(mask)[0], y_pred, sample_weight=numpy.ones(size))
assert numpy.all(effs == effs2)
print("Compute efficiency is ok")
test_compute_efficiency()
def exp_margin(margin):
""" margin = - y_signed * y_pred """
return numpy.exp(numpy.clip(margin, -1e5, 2))
class FlatnessLossFunction(LossFunction, BaseEstimator):
def __init__(self, uniform_variables, bins=10, uniform_label=1, power=2., ada_coefficient=1.,
allow_wrong_signs=True, keep_debug_info=False):
"""
This loss function contains separately penalty for non-flatness and ada_coefficient.
The penalty for non-flatness is using bins.
:type uniform_variables: the vars, along which we want to obtain uniformity
:type bins: the number of bins along each axis
:type uniform_label: int | list(int), the labels for which we want to obtain uniformity
:type power: the loss contains the difference | F - F_bin |^p, where p is power
:type ada_coefficient: coefficient of ada_loss added to this one. The greater the coefficient,
the less we tend to uniformity.
:type allow_wrong_signs: defines whether gradient may different sign from the "sign of class"
(i.e. may have negative gradient on signal)
"""
self.uniform_variables = uniform_variables
self.bins = bins
self.uniform_label = numpy.array([uniform_label]) if isinstance(uniform_label, numbers.Number) \
else numpy.array(uniform_label)
self.power = power
self.ada_coefficient = ada_coefficient
self.allow_wrong_signs = allow_wrong_signs
self.keep_debug_info = keep_debug_info
LossFunction.__init__(self, 1)
def fit(self, X, y, sample_weight=None):
assert len(X) == len(y), 'The lengths are different'
sample_weight = check_sample_weight(y, sample_weight=sample_weight)
self.group_indices = defaultdict(list)
# The weight of bin is mean of weights inside bin
self.group_weights = defaultdict(list)
occurences = numpy.zeros(len(X), dtype=int)
for label in self.uniform_label:
group_indices = self.compute_groups_indices(X, y, sample_weight=sample_weight, label=label)
# cleaning the bins - deleting tiny or empty groups, canonizing
for indices in group_indices:
if len(indices) < 5:
# ignoring very small groups
continue
assert numpy.all((y == label)[indices])
self.group_indices[label].append(numpy.array(indices))
self.group_weights[label].append(numpy.mean(sample_weight[indices]))
occurences[indices] += 1
y = numpy.array(y, dtype=int)
needed_indices = numpy.in1d(y, self.uniform_label)
out_of_bins = numpy.sum((occurences == 0) & needed_indices)
if out_of_bins > 0.01 * len(X):
print("warning: %i events are out of all bins" % out_of_bins)
self.sample_weight = sample_weight
self.event_weights = sample_weight / (occurences + 1e-10)
if self.keep_debug_info:
self.debug_dict = defaultdict(list)
return self
def compute_groups_indices(self, X, y, sample_weight, label):
"""Returns a list, each element is events' indices in some group."""
mask = y == label
bin_limits = []
for var in self.uniform_variables:
bin_limits.append(numpy.linspace(numpy.min(X[var][mask]), numpy.max(X[var][mask]), 2 * self.bins + 1))
result = list()
for shift in [0, 1]:
bin_limits2 = []
for axis_limits in bin_limits:
bin_limits2.append(axis_limits[1 + shift:-1:2])
bin_indices = reports.compute_bin_indices(X, self.uniform_variables, bin_limits2)
result += reports.bin_to_group_indices(bin_indices, mask=mask)
return result
def __call__(self, y, pred):
# computing the common distribution of signal
# taking only signal by now
# this is approximate computation!
# TODO reimplement, this is wrong implementation
pred = numpy.ravel(pred)
loss = 0
for label in self.uniform_label:
needed_indices = y == label
sorted_pred = numpy.sort(pred[needed_indices])
for bin_weight, indices_in_bin in zip(self.group_weights[label], self.group_indices[label]):
probs_in_bin = numpy.take(pred, indices_in_bin)
probs_in_bin = numpy.sort(probs_in_bin)
positions = numpy.searchsorted(sorted_pred, probs_in_bin)
global_effs = positions / float(len(sorted_pred))
local_effs = (numpy.arange(0, len(probs_in_bin)) + 0.5) / len(probs_in_bin)
bin_loss = numpy.sum((global_effs - local_effs) ** self.power)
loss += bin_loss * bin_weight
# Ada loss now
loss += self.ada_coefficient * numpy.sum(numpy.exp(-y * pred))
return loss
def negative_gradient(self, y, y_pred, **kw_args):
y_pred = numpy.ravel(y_pred)
neg_gradient = numpy.zeros(len(y))
for label in self.uniform_label:
label_mask = y == label
global_efficiencies = numpy.zeros(len(y_pred), dtype=float)
global_efficiencies[label_mask] = compute_efficiencies(label_mask, y_pred, sample_weight=self.sample_weight)
for bin_weight, indices_in_bin in zip(self.group_weights[label], self.group_indices[label]):
assert numpy.all(label_mask[indices_in_bin]), "TODO delete"
local_effs = compute_efficiencies(indices_in_bin, y_pred, sample_weight=self.sample_weight)
global_effs = global_efficiencies[indices_in_bin]
bin_gradient = self.power * numpy.sign(local_effs - global_effs) \
* numpy.abs(local_effs - global_effs) ** (self.power - 1)
# TODO multiply by derivative of F_global ?
neg_gradient[indices_in_bin] += bin_weight * bin_gradient
assert numpy.all(neg_gradient[~numpy.in1d(y, self.uniform_label)] == 0)
y_signed = 2 * y - 1
if self.keep_debug_info:
self.debug_dict['pred'].append(numpy.copy(y_pred))
self.debug_dict['fl_grad'].append(numpy.copy(neg_gradient))
self.debug_dict['ada_grad'].append(y_signed * self.sample_weight * numpy.exp(- y_signed * y_pred))
# adding ada
neg_gradient += self.ada_coefficient * y_signed * self.sample_weight \
* exp_margin(-self.ada_coefficient * y_signed * y_pred)
if not self.allow_wrong_signs:
neg_gradient = y_signed * numpy.clip(y_signed * neg_gradient, 0, 1e5)
return neg_gradient
# def update_terminal_regions(self, tree, X, y, residual, y_pred, sample_mask, learning_rate=1.0, k=0):
# the standard version is used
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred):
# terminal_region = numpy.where(terminal_regions == leaf)[0]
tree.value[leaf, 0, 0] = numpy.clip(tree.value[leaf, 0, 0], -10, 10)
# TODO think of real minimization
def init_estimator(self, X=None, y=None):
return LogOddsEstimator()
class NewRF(RandomForestRegressor):
"""Just a random forest regressor, that returns a two-dimensional array"""
def predict(self, X):
return RandomForestRegressor.predict(self, X)[:, numpy.newaxis]
class NewFlatnessLossFunction(FlatnessLossFunction, BaseEstimator):
def __init__(self, uniform_variables, n_neighbours=100, uniform_label=1, ada_coefficient=1.,
allow_wrong_signs=True, keep_debug_info=False, uniforming_factor=1., update_tree=True):
"""
:param int|list[int] uniform_label: labels of classes for which the uniformity of predictions is desired
"""
self.uniform_variables = uniform_variables
self.n_neighbours = n_neighbours
self.uniform_label = numpy.array([uniform_label]) if isinstance(uniform_label, numbers.Number) \
else numpy.array(uniform_label)
self.ada_coefficient = ada_coefficient
self.allow_wrong_signs = allow_wrong_signs
self.keep_debug_info = keep_debug_info
self.uniforming_factor = uniforming_factor
self.update_tree = update_tree
LossFunction.__init__(self, 1)
def fit(self, X, y, sample_weight=None):
assert len(X) == len(y), 'The lengths are different'
# sample_weight = check_sample_weight(y, sample_weight=sample_weight)
y = column_or_1d(y)
assert set(y) == {0,1}, "Only two classes are supported, their labels should be 0 and 1"
self.knn_indices = defaultdict(list)
for label in self.uniform_label:
label_mask = y == label
knn_indices = commonutils.computeSignalKnnIndices(self.uniform_variables, X, label_mask, n_neighbors=self.n_neighbours)
# taking only rows, corresponding to this class
self.knn_indices[label] = knn_indices[label_mask, :]
if self.keep_debug_info:
self.debug_dict = defaultdict(list)
return self
def __call__(self, y, pred):
return 1
def init_estimator(self, X=None, y=None):
return NewRF()
def negative_gradient(self, y, y_pred, sample_weight=None, **kw_args):
sample_weight = check_sample_weight(y, sample_weight=sample_weight)
y_pred = numpy.ravel(y_pred)
neg_gradient = numpy.zeros(len(y))
for label in self.uniform_label:
label_mask = y == label
assert sum(label_mask) == len(self.knn_indices[label])
# global_efficiencies = numpy.zeros(len(y_pred), dtype=float)
# global_efficiencies[label_mask] = compute_efficiencies(label_mask, y_pred, sample_weight=self.sample_weight)
values = y_pred[label_mask]
knn_values = numpy.take(y_pred, self.knn_indices[label])
knn_weights = numpy.take(sample_weight, self.knn_indices[label])
# TODO use heaviside here?
local_efficiencies = numpy.average(knn_values > values[:, numpy.newaxis], axis=1, weights=knn_weights)
global_targets = commonutils.weighted_percentile(values, local_efficiencies,
sample_weight=sample_weight[label_mask])
neg_gradient[label_mask] += self.uniforming_factor * (global_targets - values)
assert numpy.all(neg_gradient[~numpy.in1d(y, self.uniform_label)] == 0)
y_signed = 2 * y - 1
if self.keep_debug_info:
self.debug_dict['pred'].append(numpy.copy(y_pred))
self.debug_dict['fl_grad'].append(numpy.copy(neg_gradient))
self.debug_dict['ada_grad'].append(y_signed * sample_weight * numpy.exp(- y_signed * y_pred))
# adding ada
neg_gradient += self.ada_coefficient * y_signed * sample_weight \
* exp_margin(- self.ada_coefficient * y_signed * y_pred)
if not self.allow_wrong_signs:
neg_gradient = y_signed * numpy.clip(y_signed * neg_gradient, 0, 1e5)
return neg_gradient
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred):
if not self.update_tree:
return
terminal_region = numpy.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = numpy.median(residual)
class MyGradientBoostingClassifier(GBClassifier):
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2, min_samples_leaf=1,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0, train_variables=None):
"""
GradientBoosting from sklearn, which is modified to work with KnnLossFunction and it's versions.
Train variables are variables used in training trees.
:param LossFunction|str loss:
"""
self.train_variables = train_variables
GBClassifier.__init__(self, loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
subsample=subsample, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf,
max_depth=max_depth, init=init, random_state=random_state, max_features=max_features, verbose=verbose)
def get_train_variables(self, X):
if self.train_variables is None:
return X
else:
return X[self.train_variables]
def fit(self, X, y, sample_weight=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes
``0, 1, ..., n_classes_-1``
sample_weight: array-like, shape = [n_samples], default None,
positive weights if they are needed
Returns
-------
self : object
Returns self.
"""
y = column_or_1d(y, warn=True)
self.classes_, y = numpy.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
assert self.n_classes_ == 2, "at this moment only two-class classification is supported"
self._check_params()
# fitting the loss if it needs
if isinstance(self.loss_, KnnLossFunction) or isinstance(self.loss_, FlatnessLossFunction):
self.loss_.fit(X, y)
X = self.get_train_variables(X)
# Check input
X, = check_arrays(X, dtype=DTYPE, sparse_format="dense", check_ccontiguous=True)
n_samples, n_features = X.shape
self.n_features = n_features
random_state = check_random_state(self.random_state)
# pull frequently used parameters into local scope
subsample = self.subsample
do_oob = subsample < 1.0
# allocate model state data structures
self.estimators_ = numpy.empty((self.n_estimators, self.loss_.K), dtype=numpy.object)
self.train_score_ = numpy.zeros((self.n_estimators,), dtype=numpy.float64)
sample_mask = numpy.ones((n_samples,), dtype=numpy.bool)
n_inbag = max(1, int(subsample * n_samples))
if self.verbose:
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
if do_oob:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
verbose_fmt = ' '.join(verbose_fmt)
# print the header line
print(('%10s ' + '%16s ' * (len(header_fields) - 1)) % tuple(header_fields))
# plot verbose info each time i % verbose_mod == 0
verbose_mod = 1
start_time = time()
# fit initial model
self.init_.fit(X, y)
# init predictions
y_pred = self.init_.predict(X)
# perform boosting iterations
for i in range(self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag, random_state)
# fit next stage of tree
args = {}
# TODO write own gradient boosting
if sklearn.__version__ >= '0.15':
args = {'criterion': 'mse', 'splitter': 'best', }
y_pred = self._fit_stage(i, X, y, y_pred=y_pred, sample_mask=sample_mask, random_state=random_state, **args)
self.train_score_[i] = self.loss_(y, y_pred)
if self.verbose > 0:
if (i + 1) % verbose_mod == 0:
remaining_time = (self.n_estimators - (i + 1)) * (time() - start_time) / float(i + 1)
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(verbose_fmt.format(iter=i + 1,
train_score=self.train_score_[i],
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
verbose_mod *= 10
return self
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
# everything connected with loss was moved to self.fit
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0")
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0")
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0)")
# we enable to pass simply LossFunction object
if isinstance(self.loss, LossFunction):
self.loss_ = self.loss
else:
if self.loss not in LOSS_FUNCTIONS:
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance if len(self.classes_) > 2 else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if self.subsample <= 0.0 or self.subsample > 1:
raise ValueError("subsample must be in (0,1]")
if self.init is not None:
if (not hasattr(self.init, 'fit') or not hasattr(self.init, 'predict')):
raise ValueError("init must be valid estimator")
self.init_ = self.init
else:
self.init_ = self.loss_.init_estimator()
def predict(self, X):
return GBClassifier.predict(self, self.get_train_variables(X))
def predict_proba(self, X):
return GBClassifier.predict_proba(self, self.get_train_variables(X))
def staged_predict_proba(self, X):
return GBClassifier.staged_predict_proba(self, self.get_train_variables(X))
def test_gradient(loss, size=1000):
X, y = commonutils.generate_sample(size, 10)
loss.fit(X, y)
pred = numpy.random.random(size)
epsilon = 1e-7
val = loss(y, pred)
gradient = numpy.zeros_like(pred)
for i in range(size):
pred2 = pred.copy()
pred2[i] += epsilon
val2 = loss(y, pred2)
gradient[i] = (val2 - val) / epsilon
n_gradient = loss.negative_gradient(y, pred)
assert numpy.all(abs(n_gradient + gradient) < 1e-3), "Problem with functional gradient"
def test_gradient_boosting(samples=1000):
# Generating some samples correlated with first variable
distance = 0.6
testX, testY = generate_sample(samples, 10, distance)
trainX, trainY = generate_sample(samples, 10, distance)
# We will try to get uniform distribution along this variable
uniform_variables = ['column0']
n_estimators = 20
loss1 = SimpleKnnLossFunction(uniform_variables)
loss2 = PairwiseKnnLossFunction(uniform_variables, knn=10)
loss3 = AdaLossFunction()
loss4 = RandomKnnLossFunction(uniform_variables, samples * 2, knn=5, knn_factor=3)
loss5 = DistanceBasedKnnFunction(uniform_variables, knn=10, distance_dependence=lambda r: numpy.exp(-0.1 * r))
loss6 = FlatnessLossFunction(uniform_variables, ada_coefficient=0.5)
loss7 = FlatnessLossFunction(uniform_variables, ada_coefficient=0.5, uniform_label=[0,1])
loss8 = NewFlatnessLossFunction(uniform_variables, ada_coefficient=0.5, uniform_label=1)
loss9 = NewFlatnessLossFunction(uniform_variables, ada_coefficient=0.5, uniform_label=[0, 1])
for loss in [loss1, loss2, loss3, loss4, loss5, loss6, loss7, loss8, loss9]:
result = MyGradientBoostingClassifier(loss=loss, min_samples_split=20, max_depth=5, learning_rate=.2,
subsample=0.7, n_estimators=n_estimators, train_variables=None)\
.fit(trainX[:samples], trainY[:samples]).score(testX, testY)
assert result >= 0.7, "The quality is too poor: %.3f" % result
# TODO return this code and test losses
# for loss in [loss1, loss2, loss3, loss4, loss5]:
# testGradient(loss)
print('uniform gradient boosting is ok')
test_gradient_boosting()
|
nilq/baby-python
|
python
|
# -*- encoding: utf-8 -*-
text = u"""
Mr. Speaker, Mr. President, distinguished Members of the Congress, honored guests, and fellow citizens:
I come before you to report on the state of our Union, and I'm pleased to report that after 4 years of united effort, the American people have brought forth a nation renewed, stronger, freer, and more secure than before.
Four years ago we began to change, forever I hope, our assumptions about government and its place in our lives. Out of that change has come great and robust growth-in our confidence, our economy, and our role in the world.
Tonight America is stronger because of the values that we hold dear. We believe faith and freedom must be our guiding stars, for they show us truth, they make us brave, give us hope, and leave us wiser than we were. Our progress began not in Washington, DC, but in the hearts of our families, communities, workplaces, and voluntary groups which, together, are unleashing the invincible spirit of one great nation under God.
Four years ago we said we would invigorate our economy by giving people greater freedom and incentives to take risks and letting them keep more of what they earned. We did what we promised, and a great industrial giant is reborn.
Tonight we can take pride in 25 straight months of economic growth, the strongest in 34 years; a 3-year inflation average of 3.9 percent, the lowest in 17 years; and 7.3 million new jobs in 2 years, with more of our citizens working than ever before.
New freedom in our lives has planted the rich seeds for future success:
For an America of wisdom that honors the family, knowing that if
the family goes, so goes our civilization;
For an America of vision that sees tomorrow's dreams in the learning and hard work we do today;
For an America of courage whose service men and women, even as we meet, proudly stand watch on the frontiers of freedom;
For an America of compassion that opens its heart to those who cry out for help.
We have begun well. But it's only a beginning. We're not here to congratulate ourselves on what we have done but to challenge ourselves to finish what has not yet been done.
We're here to speak for millions in our inner cities who long for real jobs, safe neighborhoods, and schools that truly teach. We're here to speak for the American farmer, the entrepreneur, and every worker in industries fighting to modernize and compete. And, yes, we're here to stand, and proudly so, for all who struggle to break free from totalitarianism, for all who know in their hearts that freedom is the one true path to peace and human happiness.
Proverbs tell us, without a vision the people perish. When asked what great principle holds our Union together, Abraham Lincoln said: "Something in
Declaration giving liberty, not alone to the people of this country, but hope to the world for all future time."
We honor the giants of our history not by going back but forward to the dreams their vision foresaw. My fellow citizens, this nation is poised for greatness. The time has come to proceed toward a great new challenge—a second American Revolution of hope and opportunity; a revolution carrying us to new heights of progress by pushing back frontiers of knowledge and space; a revolution of spirit that taps the soul of America, enabling us to summon greater strength than we've ever known; and a revolution that carries beyond our shores the golden promise of human freedom in a world of peace.
Let us begin by challenging our conventional wisdom. There are no constraints on the human mind, no walls around the human spirit, no barriers to our progress except those we ourselves erect. Already, pushing down tax rates has freed our economy to vault forward to record growth.
In Europe, they're calling it "the American Miracle." Day by day, we're shattering accepted notions of what is possible. When I was growing up, we failed to see how a new thing called radio would transform our marketplace. Well, today, many have not yet seen how advances in technology are transforming our lives.
In the late 1950's workers at the AT&T semiconductor plant in Pennsylvania produced five transistors a day for $7.50 apiece. They now produce over a million for less than a penny apiece.
New laser techniques could revolutionize heart bypass surgery, cut diagnosis time for viruses linked to cancer from weeks to minutes, reduce hospital costs dramatically, and hold out new promise for saving human lives.
Our automobile industry has overhauled assembly lines, increased worker productivity, and is competitive once again.
We stand on the threshold of a great ability to produce more, do more, be more. Our economy is not getting older and weaker; it's getting younger and stronger. It doesn't need rest and supervision; it needs new challenge, greater freedom. And that word "freedom" is the key to the second American revolution that we need to bring about.
Let us move together with an historic reform of tax simplification for fairness and growth. Last year I asked Treasury Secretary-then-Regan to develop a plan to simplify the tax code, so all taxpayers would be treated more fairly and personal tax rates could come further down.
We have cut tax rates by almost 25 percent, yet the tax system remains unfair and limits our potential for growth. Exclusions and exemptions cause similar incomes to be taxed at different levels. Low-income families face steep tax barriers that make hard lives even harder. The Treasury Department has produced an excellent reform plan, whose principles will guide the final proposal that we will ask you to enact.
One thing that tax reform will not be is a tax increase in disguise. We will not jeopardize the mortgage interest deduction that families need. We will reduce personal tax rates as low as possible by removing many tax preferences. We will propose a top rate of no more than 35 percent, and possibly lower. And we will propose reducing corporate rates, while maintaining incentives for capital formation.
To encourage opportunity and jobs rather than dependency and welfare, we will propose that individuals living at or near the poverty line be totally exempt from Federal income tax. To restore fairness to families, we will propose increasing significantly the personal exemption.
And tonight, I am instructing Treasury Secretary James Baker—I have to get used to saying that—to begin working with congressional authors and committees for bipartisan legislation conforming to these principles. We will call upon the American people for support and upon every man and woman in this Chamber. Together, we can pass, this year, a tax bill for fairness, simplicity, and growth, making this economy the engine of our dreams and America the investment capital of the world. So let us begin.
Tax simplification will be a giant step toward unleashing the tremendous pent-up power of our economy. But a second American revolution must carry the promise of opportunity for all. It is time to liberate the spirit of enterprise in the most distressed areas of our country.
This government will meet its responsibility to help those in need. But policies that increase dependency, break up families, and destroy self-respect are not progressive; they're reactionary. Despite our strides in civil rights, blacks, Hispanics, and all minorities will not have full and equal power until they have full economic power.
We have repeatedly sought passage of enterprise zones to help those in the abandoned corners of our land find jobs, learn skills, and build better lives. This legislation is supported by a majority of you.
Mr. Speaker, I know we agree that 'there must be no forgotten Americans. Let us place new dreams in a million hearts and create a new generation of entrepreneurs by passing enterprise zones this year. And, Tip, you could make that a birthday present.
Nor must we lose the chance to pass our youth employment opportunity wage proposal. We can help teenagers, who have the highest unemployment rate, find summer jobs, so they can know the pride of work and have confidence in their futures.
We'll continue to support the Job Training Partnership Act, which has a nearly two-thirds job placement rate. Credits in education and health care vouchers will help working families shop for services that they need.
Our administration is already encouraging certain low-income public housing residents to own and manage their own dwellings. It's time that all public housing residents have that opportunity of ownership.
The Federal Government can help create a new atmosphere of freedom. But States and localities, many of which enjoy surpluses from the recovery, must not permit their tax and regulatory policies to stand as barriers to growth.
Let us resolve that we will stop spreading dependency and start spreading opportunity; that we will stop spreading bondage and start spreading freedom.
There are some who say that growth initiatives must await final action on deficit reductions. Well, the best way to reduce deficits is through economic growth. More businesses will be started, more investments made, more jobs created, and more people will be on payrolls paying taxes. The best way to reduce government spending is to reduce the need for spending by increasing prosperity. Each added percentage point per year of real GNP growth will lead to cumulative reduction in deficits of nearly $200 billion over 5 years.
To move steadily toward a balanced budget, we must also lighten government's claim on our total economy. We will not do this by raising taxes. We must make sure that our economy grows faster than the growth in spending by the Federal Government. In our fiscal year 1986 budget, overall government program spending will be frozen at the current level. It must not be one dime higher than fiscal year 1985, and three points are key.
First, the social safety net for the elderly, the needy, the disabled, and unemployed will be left intact. Growth of our major health care programs, Medicare and Medicaid, will be slowed, but protections for the elderly and needy will be preserved.
Second, we must not relax our efforts to restore military strength just as we near our goal of a fully equipped, trained, and ready professional corps. National security is government's first responsibility; so in past years defense spending took about half the Federal budget. Today it takes less than a third. We've already reduced our planned defense expenditures by nearly a hundred billion dollars over the past 4 years and reduced projected spending again this year.
You know, we only have a military-industrial complex until a time of danger, and then it becomes the arsenal of democracy. Spending for defense is investing in things that are priceless—peace and freedom.
Third, we must reduce or eliminate costly government subsidies. For example, deregulation of the airline industry has led to cheaper airfares, but on Amtrak taxpayers pay about $35 per passenger every time an Amtrak train leaves the station, It's time we ended this huge Federal subsidy.
Our farm program costs have quadrupled in recent years. Yet I know from visiting farmers, many in great financial distress, that we need an orderly transition to a market-oriented farm economy. We can help farmers best not by expanding Federal payments but by making fundamental reforms, keeping interest rates heading down, and knocking down foreign trade barriers to American farm exports.
We're moving ahead with Grace commission reforms to eliminate waste and improve government's management practices. In the long run, we must protect the taxpayers from government. And I ask again that you pass, as 32 States have now called for, an amendment mandating the Federal Government spend no more than it takes in. And I ask for the authority, used responsibly by 43 Governors, to veto individual items in appropriation bills. Senator Mattingly has introduced a bill permitting a 2-year trial run of the line-item veto. I hope you'll pass and send that legislation to my desk.
Nearly 50 years of government living beyond its means has brought us to a time of reckoning. Ours is but a moment in history. But one moment of courage, idealism, and bipartisan unity can change American history forever.
Sound monetary policy is key to long-running economic strength and stability. We will continue to cooperate with the Federal Reserve Board, seeking a steady policy that ensures price stability without keeping interest rates artificially high or needlessly holding down growth.
Reducing unneeded red tape and regulations, and deregulating the energy, transportation, and financial industries have unleashed new competition, giving consumers more choices, better services, and lower prices. In just one set of grant programs we have reduced 905 pages of regulations to 31. We seek to fully deregulate natural gas to bring on new supplies and bring us closer to energy independence. Consistent with safety standards, we will continue removing restraints on the bus and railroad industries, we will soon end up legislation—or send up legislation, I should say—to return Conrail to the private sector where it belongs, and we will support further deregulation of the trucking industry.
Every dollar the Federal Government does not take from us, every decision it does not make for us will make our economy stronger, our lives more abundant, our future more free.
Our second American revolution will push on to new possibilities not only on Earth but in the next frontier of space. Despite budget restraints, we will seek record funding for research and development.
We've seen the success of the space shuttle. Now we're going to develop a permanently manned space station and new opportunities for free enterprise, because in the next decade Americans and our friends around the world will be living and working together in space.
In the zero gravity of space, we could manufacture in 30 days lifesaving medicines it would take 30 years to make on Earth. We can make crystals of exceptional purity to produce super computers, creating jobs, technologies, and medical breakthroughs beyond anything we ever dreamed possible.
As we do all this, we'll continue to protect our natural resources. We will seek reauthorization and expanded funding for the Superfund program to continue cleaning up hazardous waste sites which threaten human health and the environment.
Now, there's another great heritage to speak of this evening. Of all the changes that have swept America the past 4 years, none brings greater promise than our rediscovery of the values of faith, freedom, family, work, and neighborhood.
We see signs of renewal in increased attendance in places of worship; renewed optimism and faith in our future; love of country rediscovered by our young, who are leading the way. We've rediscovered that work is good in and of itself, that it ennobles us to create and contribute no matter how seemingly humble our jobs. We've seen a powerful new current from an old and honorable tradition—American generosity.
From thousands answering Peace Corps appeals to help boost food production in Africa, to millions volunteering time, corporations adopting schools, and communities pulling together to help the neediest among us at home, we have refound our values. Private sector initiatives are crucial to our future.
I thank the Congress for passing equal access legislation giving religious groups the same right to use classrooms after school that other groups enjoy. But no citizen need tremble, nor the world shudder, if a child stands in a classroom and breathes a prayer. We ask you again, give children back a right they had for a century and a half or more in this country.
The question of abortion grips our nation. Abortion is either the taking of a human life or it isn't. And if it is—and medical technology is increasingly showing it is—it must be stopped. It is a terrible irony that while some turn to abortion, so many others who cannot become parents cry out for children to adopt. We have room for these children. We can fill the cradles of those who want a child to love. And tonight I ask you in the Congress to move this year on legislation to protect the unborn.
In the area of education, we're returning to excellence, and again, the heroes are our people, not government. We're stressing basics of discipline, rigorous testing, and homework, while helping children become computer-smart as well. For 20 years scholastic aptitude test scores of our high school students went down, but now they have gone up 2 of the last 3 years. We must go forward in our commitment to the new basics, giving parents greater authority and making sure good teachers are rewarded for hard work and achievement through merit pay.
Of all the changes in the past 20 years, none has more threatened our sense of national well-being than the explosion of violent crime. One does not have to be attacked to be a victim. The woman who must run to her car after shopping at night is a victim. The couple draping their door with locks and chains are victims; as is the tired, decent cleaning woman who can't ride a subway home without being afraid.
We do not seek to violate the rights of defendants. But shouldn't we feel more compassion for the victims of crime than for those who commit crime? For the first time in 20 years, the crime index has fallen 2 years in a row. We've convicted over 7,400 drug offenders and put them, as well as leaders of organized crime, behind bars in record numbers.
But we must do more. I urge the House to follow the Senate and enact proposals permitting use of all reliable evidence that police officers acquire in good faith. These proposals would also reform the habeas corpus laws and allow, in keeping with the will of the overwhelming majority of Americans, the use of the death penalty where necessary.
There can be no economic revival in ghettos when the most violent among us are allowed to roam free. It's time we restored domestic tranquility. And we mean to do just that.
Just as we're positioned as never before to secure justice in our economy, we're poised as never before to create a safer, freer, more peaceful world. Our alliances are stronger than ever. Our economy is stronger than ever. We have resumed our historic role as a leader of the free world. And all of these together are a great force for peace.
Since 1981 we've been committed to seeking fair and verifiable arms agreements that would lower the risk of war and reduce the size of nuclear arsenals. Now our determination to maintain a strong defense has influenced the Soviet Union to return to the bargaining table. Our negotiators must be able to go to that table with the united support of the American people. All of us have no greater dream than to see the day when nuclear weapons are banned from this Earth forever.
Each Member of the Congress has a role to play in modernizing our defenses, thus supporting our chances for a meaningful arms agreement. Your vote this spring on the Peacekeeper missile will be a critical test of our resolve to maintain the strength we need and move toward mutual and verifiable arms reductions.
For the past 20 years we've believed that no war will be launched as long as each side knows it can retaliate with a deadly counterstrike. Well, I believe there's a better way of eliminating the threat of nuclear war. It is a Strategic Defense Initiative aimed ultimately at finding a nonnuclear defense against ballistic missiles. It's the most hopeful possibility of the nuclear age. But it's not very well understood.
Some say it will bring war to the heavens, but its purpose is to deter war in the heavens and on Earth. Now, some say the research would be expensive. Perhaps, but it could save millions of lives, indeed humanity itself. And some say if we build such a system, the Soviets will build a defense system of their own. Well, they already have strategic defenses that surpass ours; a civil defense system, where we have almost none; and a research program covering roughly the same areas of technology that we're now exploring. And finally some say the research will take a long time. Well, the answer to that is: Let's get started.
Harry Truman once said that, ultimately, our security and the world's hopes for peace and human progress "lie not in measures of defense or in the control of weapons, but in the growth and expansion of freedom and self-government."
And tonight, we declare anew to our fellow citizens of the world: Freedom is not the sole prerogative of a chosen few; it is the universal right of all God's children. Look to where peace and prosperity flourish today. It is in homes that freedom built. Victories against poverty are greatest and peace most secure where people live by laws that ensure free press, free speech, and freedom to worship, vote, and create wealth.
Our mission is to nourish and defend freedom and democracy, and to communicate these ideals everywhere we can. America's economic success is freedom's success; it can be repeated a hundred times in a hundred different nations. Many countries in east Asia and the Pacific have few resources other than the enterprise of their own people. But through low tax rates and free markets they've soared ahead of centralized economies. And now China is opening up its economy to meet its needs.
We need a stronger and simpler approach to the process of making and implementing trade policy, and we'll be studying potential changes in that process in the next few weeks. We've seen the benefits of free trade and lived through the disasters of protectionism. Tonight I ask all our trading partners, developed and developing alike, to join us in a new round of trade negotiations to expand trade and competition and strengthen the global economy—and to begin it in this next year.
There are more than 3 billion human beings living in Third World countries with an average per capita income of $650 a year. Many are victims of dictatorships that impoverished them with taxation and corruption. Let us ask our allies to join us in a practical program of trade and assistance that fosters economic development through personal incentives to help these people climb from poverty on their own.
We cannot play innocents abroad in a world that's not innocent; nor can we be passive when freedom is under siege. Without resources, diplomacy cannot succeed. Our security assistance programs help friendly governments defend themselves and give them confidence to work for peace. And I hope that you in the Congress will understand that, dollar for dollar, security assistance contributes as much to global security as our own defense budget.
We must stand by all our democratic allies. And we must not break faith with those who are risking their lives—on every continent, from Afghanistan to Nicaragua—to defy Soviet-supported aggression and secure rights which have been ours from birth.
The Sandinista dictatorship of Nicaragua, with full Cuban-Soviet bloc support, not only persecutes its people, the church, and denies a free press, but arms and provides bases for Communist terrorists attacking neighboring states. Support for freedom fighters is self-defense and totally consistent with the OAS and U.N. Charters. It is essential that the Congress continue all facets of our assistance to Central America. I want to work with you to support the democratic forces whose struggle is tied to our own security.
And tonight, I've spoken of great plans and great dreams. They're dreams we can make come true. Two hundred years of American history should have taught us that nothing is impossible.
Ten years ago a young girl left Vietnam with her family, part of the exodus that followed the fall of Saigon. They came to the United States with no possessions and not knowing a word of English. Ten years ago—the young girl studied hard, learned English, and finished high school in the top of her class. And this May, May 22d to be exact, is a big date on her calendar. Just 10 years from the time she left Vietnam, she will graduate from the United States Military Academy at West Point. I thought you might like to meet an American hero named Jean Nguyen.
Now, there's someone else here tonight, born 79 years ago. She lives in the inner city, where she cares for infants born of mothers who are heroin addicts. The children, born in withdrawal, are sometimes even dropped on her doorstep. She helps them with love. Go to her house some night, and maybe you'll see her silhouette against the window as she walks the floor talking softly, soothing a child in her arms-Mother Hale of Harlem, and she, too, is an American hero.
Jean, Mother Hale, your lives tell us that the oldest American saying is new again: Anything is possible in America if we have the faith, the will, and the heart. History is asking us once again to be a force for good in the world. Let us begin in unity, with justice, and love.
Thank you, and God bless you.
"""
import phrasemachine
phrases = phrasemachine.get_phrases(text)
print "%s phrase types" % len(phrases['counts'])
print "%s phrase hits" % sum(phrases['counts'].values())
print "Top phrases:"
print phrases['counts'].most_common(10)
print "From crappy tokenization:"
crappy_tokens = text.split()
print phrasemachine.get_phrases(tokens=crappy_tokens)['counts'].most_common(10)
print "Phrase spans"
phrases = phrasemachine.get_phrases(text, output=['token_spans','tokens'])
print "%s phrase hits" % len(phrases['token_spans'])
print phrases['token_spans'][:20]
print phrases['token_spans'][-20:]
print "First several phrase hits"
print [(s,e, phrases['tokens'][s:e]) for (s,e) in phrases['token_spans'][:10]]
print "From crappy tokenization"
xx = phrasemachine.get_phrases(tokens=crappy_tokens, output='token_spans')['token_spans']
print [(s,e, crappy_tokens[s:e]) for (s,e) in xx[:10]]
|
nilq/baby-python
|
python
|
#
# PySNMP MIB module HUAWEI-SEP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HUAWEI-SEP-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:36:43 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection")
hwDatacomm, = mibBuilder.importSymbols("HUAWEI-MIB", "hwDatacomm")
MibScalar, MibTable, MibTableRow, MibTableColumn, ifName, InterfaceIndex, Integer32, ObjectIdentity, ModuleIdentity, Unsigned32 = mibBuilder.importSymbols("IF-MIB", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ifName", "InterfaceIndex", "Integer32", "ObjectIdentity", "ModuleIdentity", "Unsigned32")
EnabledStatus, = mibBuilder.importSymbols("P-BRIDGE-MIB", "EnabledStatus")
VlanId, = mibBuilder.importSymbols("Q-BRIDGE-MIB", "VlanId")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, MibIdentifier, iso, Counter64, Counter32, Bits, IpAddress, Integer32, Gauge32, ModuleIdentity, ObjectIdentity, TimeTicks, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "MibIdentifier", "iso", "Counter64", "Counter32", "Bits", "IpAddress", "Integer32", "Gauge32", "ModuleIdentity", "ObjectIdentity", "TimeTicks", "Unsigned32")
RowStatus, TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "TextualConvention", "DisplayString")
hwSepMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223))
if mibBuilder.loadTexts: hwSepMIB.setLastUpdated('200911171530Z')
if mibBuilder.loadTexts: hwSepMIB.setOrganization('Huawei Technologies Co.,Ltd.')
hwSepObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1))
hwSepResetPktCnt = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 65535))).clone(namedValues=NamedValues(("clear", 1), ("unused", 65535)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwSepResetPktCnt.setStatus('current')
hwSepSegmentTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2), )
if mibBuilder.loadTexts: hwSepSegmentTable.setStatus('current')
hwSepSegmentEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1), ).setIndexNames((0, "HUAWEI-SEP-MIB", "hwSepSegmentId"))
if mibBuilder.loadTexts: hwSepSegmentEntry.setStatus('current')
hwSepSegmentId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1024)))
if mibBuilder.loadTexts: hwSepSegmentId.setStatus('current')
hwSepControlVlanId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 4094), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepControlVlanId.setStatus('current')
hwSepPreemptManual = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepPreemptManual.setStatus('current')
hwSepPreemptDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(15, 600), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepPreemptDelay.setStatus('current')
hwSepBlockPortMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("optimal", 1), ("middle", 2), ("hop", 3), ("name", 4), ("null", 5)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepBlockPortMode.setStatus('current')
hwSepBlockPortHop = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 512), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepBlockPortHop.setStatus('current')
hwSepBlockPortSysname = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepBlockPortSysname.setStatus('current')
hwSepBlockPortIfname = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 8), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 63))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepBlockPortIfname.setStatus('current')
hwSepTcNotifySep = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 129))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepTcNotifySep.setStatus('current')
hwSepTcNotifyRrpp = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 10), EnabledStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepTcNotifyRrpp.setStatus('current')
hwSepTcNotifyStp = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 11), EnabledStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepTcNotifyStp.setStatus('current')
hwSepTcNotifyVpls = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 12), EnabledStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepTcNotifyVpls.setStatus('current')
hwSepTcNotifyVll = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 13), EnabledStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepTcNotifyVll.setStatus('current')
hwSepTcNotifySmartLinkCtrlVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 4094), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepTcNotifySmartLinkCtrlVlan.setStatus('current')
hwSepDealSmartLinkFlush = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 15), EnabledStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepDealSmartLinkFlush.setStatus('current')
hwSepProtectedInstanceList = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 16), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 512))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepProtectedInstanceList.setStatus('current')
hwSepTcProtectionInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 17), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepTcProtectionInterval.setStatus('current')
hwSepSegmentRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 2, 1, 128), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepSegmentRowStatus.setStatus('current')
hwSepTopologyTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3), )
if mibBuilder.loadTexts: hwSepTopologyTable.setStatus('current')
hwSepTopologyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1), ).setIndexNames((0, "HUAWEI-SEP-MIB", "hwSepSegmentId"), (0, "HUAWEI-SEP-MIB", "hwSepHop"))
if mibBuilder.loadTexts: hwSepTopologyEntry.setStatus('current')
hwSepHop = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 512)))
if mibBuilder.loadTexts: hwSepHop.setStatus('current')
hwSepPortId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepPortId.setStatus('current')
hwSepTopoSysname = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTopoSysname.setStatus('current')
hwSepTopoPortname = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTopoPortname.setStatus('current')
hwSepTopoPortConfigPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTopoPortConfigPriority.setStatus('current')
hwSepTopoPortActivePriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTopoPortActivePriority.setStatus('current')
hwSepTopoConfigPortRole = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTopoConfigPortRole.setStatus('current')
hwSepTopoActivePortRole = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTopoActivePortRole.setStatus('current')
hwSepTopoPortNbrState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("down", 1), ("init", 2), ("preup", 3), ("up", 4), ("conflict", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTopoPortNbrState.setStatus('current')
hwSepTopoBrotherPortId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 10), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTopoBrotherPortId.setStatus('current')
hwSepTopoNbrPortId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 11), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTopoNbrPortId.setStatus('current')
hwSepTopoPortLinkState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("down", 1), ("up", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTopoPortLinkState.setStatus('current')
hwSepTopoPortFwdState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 3, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("discarding", 1), ("forwarding", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTopoPortFwdState.setStatus('current')
hwSepPortTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4), )
if mibBuilder.loadTexts: hwSepPortTable.setStatus('current')
hwSepPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1), ).setIndexNames((0, "HUAWEI-SEP-MIB", "hwSepSegmentId"), (0, "HUAWEI-SEP-MIB", "hwSepPortType"), (0, "HUAWEI-SEP-MIB", "hwSepPortId1"), (0, "HUAWEI-SEP-MIB", "hwSepPortId2"), (0, "HUAWEI-SEP-MIB", "hwSepPortId3"), (0, "HUAWEI-SEP-MIB", "hwSepPortId4"))
if mibBuilder.loadTexts: hwSepPortEntry.setStatus('current')
hwSepPortType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 1)))
if mibBuilder.loadTexts: hwSepPortType.setStatus('current')
hwSepPortId1 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: hwSepPortId1.setStatus('current')
hwSepPortId2 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: hwSepPortId2.setStatus('current')
hwSepPortId3 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: hwSepPortId3.setStatus('current')
hwSepPortId4 = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: hwSepPortId4.setStatus('current')
hwSepSysname = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepSysname.setStatus('current')
hwSepPortname = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepPortname.setStatus('current')
hwSepPortConfigPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepPortConfigPriority.setStatus('current')
hwSepPortActivePriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepPortActivePriority.setStatus('current')
hwSepConfigPortRole = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 10), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepConfigPortRole.setStatus('current')
hwSepActivePortRole = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepActivePortRole.setStatus('current')
hwSepPortNbrState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("down", 1), ("init", 2), ("preup", 3), ("up", 4), ("conflict", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepPortNbrState.setStatus('current')
hwSepNbrPortId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 13), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepNbrPortId.setStatus('current')
hwSepPortFwdState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("discarding", 1), ("forwarding", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepPortFwdState.setStatus('current')
hwSepRxNbrPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepRxNbrPktCnt.setStatus('current')
hwSepTxNbrPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTxNbrPktCnt.setStatus('current')
hwSepRxLsaInfoPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepRxLsaInfoPktCnt.setStatus('current')
hwSepTxLsaInfoPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 18), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTxLsaInfoPktCnt.setStatus('current')
hwSepRxLsaAckPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 19), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepRxLsaAckPktCnt.setStatus('current')
hwSepTxLsaAckPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 20), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTxLsaAckPktCnt.setStatus('current')
hwSepRxPreemptReqPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 21), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepRxPreemptReqPktCnt.setStatus('current')
hwSepTxPreemptReqPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 22), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTxPreemptReqPktCnt.setStatus('current')
hwSepRxPreemptAckPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 23), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepRxPreemptAckPktCnt.setStatus('current')
hwSepTxPreemptAckPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 24), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTxPreemptAckPktCnt.setStatus('current')
hwSepRxTcPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 25), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepRxTcPktCnt.setStatus('current')
hwSepTxTcPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 26), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTxTcPktCnt.setStatus('current')
hwSepRxEpaPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 27), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepRxEpaPktCnt.setStatus('current')
hwSepTxEpaPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 28), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwSepTxEpaPktCnt.setStatus('current')
hwSepResetPortPktCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 29), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 65535))).clone(namedValues=NamedValues(("clear", 1), ("unused", 65535)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwSepResetPortPktCnt.setStatus('current')
hwSepPortRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 1, 4, 1, 128), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSepPortRowStatus.setStatus('current')
hwSepGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 2))
hwSepGlobalInfoGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 2, 1)).setObjects(("HUAWEI-SEP-MIB", "hwSepResetPktCnt"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwSepGlobalInfoGroup = hwSepGlobalInfoGroup.setStatus('current')
hwSepSegmentInfoGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 2, 2)).setObjects(("HUAWEI-SEP-MIB", "hwSepSegmentId"), ("HUAWEI-SEP-MIB", "hwSepControlVlanId"), ("HUAWEI-SEP-MIB", "hwSepPreemptManual"), ("HUAWEI-SEP-MIB", "hwSepPreemptDelay"), ("HUAWEI-SEP-MIB", "hwSepBlockPortMode"), ("HUAWEI-SEP-MIB", "hwSepBlockPortHop"), ("HUAWEI-SEP-MIB", "hwSepBlockPortSysname"), ("HUAWEI-SEP-MIB", "hwSepBlockPortIfname"), ("HUAWEI-SEP-MIB", "hwSepTcNotifySep"), ("HUAWEI-SEP-MIB", "hwSepTcNotifyRrpp"), ("HUAWEI-SEP-MIB", "hwSepTcNotifyStp"), ("HUAWEI-SEP-MIB", "hwSepTcNotifyVpls"), ("HUAWEI-SEP-MIB", "hwSepTcNotifyVll"), ("HUAWEI-SEP-MIB", "hwSepTcNotifySmartLinkCtrlVlan"), ("HUAWEI-SEP-MIB", "hwSepDealSmartLinkFlush"), ("HUAWEI-SEP-MIB", "hwSepProtectedInstanceList"), ("HUAWEI-SEP-MIB", "hwSepTcProtectionInterval"), ("HUAWEI-SEP-MIB", "hwSepSegmentRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwSepSegmentInfoGroup = hwSepSegmentInfoGroup.setStatus('current')
hwSepPortInfoGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 2, 3)).setObjects(("HUAWEI-SEP-MIB", "hwSepPortType"), ("HUAWEI-SEP-MIB", "hwSepPortId1"), ("HUAWEI-SEP-MIB", "hwSepPortId2"), ("HUAWEI-SEP-MIB", "hwSepPortId3"), ("HUAWEI-SEP-MIB", "hwSepPortId4"), ("HUAWEI-SEP-MIB", "hwSepSysname"), ("HUAWEI-SEP-MIB", "hwSepPortname"), ("HUAWEI-SEP-MIB", "hwSepPortConfigPriority"), ("HUAWEI-SEP-MIB", "hwSepPortActivePriority"), ("HUAWEI-SEP-MIB", "hwSepConfigPortRole"), ("HUAWEI-SEP-MIB", "hwSepActivePortRole"), ("HUAWEI-SEP-MIB", "hwSepPortNbrState"), ("HUAWEI-SEP-MIB", "hwSepNbrPortId"), ("HUAWEI-SEP-MIB", "hwSepPortFwdState"), ("HUAWEI-SEP-MIB", "hwSepRxNbrPktCnt"), ("HUAWEI-SEP-MIB", "hwSepTxNbrPktCnt"), ("HUAWEI-SEP-MIB", "hwSepRxLsaInfoPktCnt"), ("HUAWEI-SEP-MIB", "hwSepTxLsaInfoPktCnt"), ("HUAWEI-SEP-MIB", "hwSepRxLsaAckPktCnt"), ("HUAWEI-SEP-MIB", "hwSepTxLsaAckPktCnt"), ("HUAWEI-SEP-MIB", "hwSepRxPreemptReqPktCnt"), ("HUAWEI-SEP-MIB", "hwSepTxPreemptReqPktCnt"), ("HUAWEI-SEP-MIB", "hwSepRxPreemptAckPktCnt"), ("HUAWEI-SEP-MIB", "hwSepTxPreemptAckPktCnt"), ("HUAWEI-SEP-MIB", "hwSepRxTcPktCnt"), ("HUAWEI-SEP-MIB", "hwSepTxTcPktCnt"), ("HUAWEI-SEP-MIB", "hwSepRxEpaPktCnt"), ("HUAWEI-SEP-MIB", "hwSepTxEpaPktCnt"), ("HUAWEI-SEP-MIB", "hwSepResetPortPktCnt"), ("HUAWEI-SEP-MIB", "hwSepPortRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwSepPortInfoGroup = hwSepPortInfoGroup.setStatus('current')
hwSepTopologyInfoGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 223, 2, 4)).setObjects(("HUAWEI-SEP-MIB", "hwSepHop"), ("HUAWEI-SEP-MIB", "hwSepPortId"), ("HUAWEI-SEP-MIB", "hwSepTopoSysname"), ("HUAWEI-SEP-MIB", "hwSepTopoPortname"), ("HUAWEI-SEP-MIB", "hwSepTopoPortConfigPriority"), ("HUAWEI-SEP-MIB", "hwSepTopoPortActivePriority"), ("HUAWEI-SEP-MIB", "hwSepTopoConfigPortRole"), ("HUAWEI-SEP-MIB", "hwSepTopoActivePortRole"), ("HUAWEI-SEP-MIB", "hwSepTopoPortNbrState"), ("HUAWEI-SEP-MIB", "hwSepTopoNbrPortId"), ("HUAWEI-SEP-MIB", "hwSepTopoPortLinkState"), ("HUAWEI-SEP-MIB", "hwSepTopoPortFwdState"), ("HUAWEI-SEP-MIB", "hwSepTopoBrotherPortId"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwSepTopologyInfoGroup = hwSepTopologyInfoGroup.setStatus('current')
mibBuilder.exportSymbols("HUAWEI-SEP-MIB", hwSepTcNotifySep=hwSepTcNotifySep, hwSepPortname=hwSepPortname, hwSepSegmentTable=hwSepSegmentTable, hwSepMIB=hwSepMIB, hwSepTopoPortname=hwSepTopoPortname, hwSepTxTcPktCnt=hwSepTxTcPktCnt, hwSepGroups=hwSepGroups, hwSepPortId2=hwSepPortId2, hwSepTopoConfigPortRole=hwSepTopoConfigPortRole, hwSepRxLsaAckPktCnt=hwSepRxLsaAckPktCnt, hwSepActivePortRole=hwSepActivePortRole, hwSepPortRowStatus=hwSepPortRowStatus, hwSepTopoSysname=hwSepTopoSysname, hwSepTopoPortNbrState=hwSepTopoPortNbrState, hwSepPortId3=hwSepPortId3, hwSepPortNbrState=hwSepPortNbrState, hwSepPortType=hwSepPortType, hwSepTopologyTable=hwSepTopologyTable, hwSepSegmentId=hwSepSegmentId, hwSepRxTcPktCnt=hwSepRxTcPktCnt, hwSepBlockPortMode=hwSepBlockPortMode, hwSepBlockPortIfname=hwSepBlockPortIfname, hwSepPortTable=hwSepPortTable, hwSepConfigPortRole=hwSepConfigPortRole, hwSepHop=hwSepHop, hwSepRxPreemptReqPktCnt=hwSepRxPreemptReqPktCnt, hwSepSysname=hwSepSysname, hwSepTxPreemptAckPktCnt=hwSepTxPreemptAckPktCnt, hwSepProtectedInstanceList=hwSepProtectedInstanceList, hwSepTxEpaPktCnt=hwSepTxEpaPktCnt, hwSepTopologyEntry=hwSepTopologyEntry, hwSepTopoPortActivePriority=hwSepTopoPortActivePriority, hwSepTxLsaInfoPktCnt=hwSepTxLsaInfoPktCnt, hwSepGlobalInfoGroup=hwSepGlobalInfoGroup, hwSepRxEpaPktCnt=hwSepRxEpaPktCnt, hwSepPortEntry=hwSepPortEntry, hwSepTopoPortConfigPriority=hwSepTopoPortConfigPriority, hwSepSegmentInfoGroup=hwSepSegmentInfoGroup, hwSepTopoBrotherPortId=hwSepTopoBrotherPortId, hwSepPortFwdState=hwSepPortFwdState, hwSepTxNbrPktCnt=hwSepTxNbrPktCnt, hwSepResetPktCnt=hwSepResetPktCnt, hwSepSegmentEntry=hwSepSegmentEntry, hwSepDealSmartLinkFlush=hwSepDealSmartLinkFlush, hwSepTcProtectionInterval=hwSepTcProtectionInterval, hwSepNbrPortId=hwSepNbrPortId, hwSepRxPreemptAckPktCnt=hwSepRxPreemptAckPktCnt, hwSepResetPortPktCnt=hwSepResetPortPktCnt, hwSepPreemptManual=hwSepPreemptManual, hwSepPortId4=hwSepPortId4, hwSepTcNotifyVll=hwSepTcNotifyVll, hwSepTopoActivePortRole=hwSepTopoActivePortRole, hwSepPreemptDelay=hwSepPreemptDelay, hwSepPortActivePriority=hwSepPortActivePriority, hwSepTcNotifySmartLinkCtrlVlan=hwSepTcNotifySmartLinkCtrlVlan, hwSepTcNotifyVpls=hwSepTcNotifyVpls, hwSepBlockPortSysname=hwSepBlockPortSysname, hwSepPortConfigPriority=hwSepPortConfigPriority, hwSepPortInfoGroup=hwSepPortInfoGroup, hwSepTopologyInfoGroup=hwSepTopologyInfoGroup, hwSepControlVlanId=hwSepControlVlanId, hwSepObjects=hwSepObjects, hwSepTcNotifyStp=hwSepTcNotifyStp, hwSepPortId=hwSepPortId, hwSepTopoNbrPortId=hwSepTopoNbrPortId, hwSepTxLsaAckPktCnt=hwSepTxLsaAckPktCnt, hwSepTopoPortFwdState=hwSepTopoPortFwdState, hwSepRxLsaInfoPktCnt=hwSepRxLsaInfoPktCnt, hwSepPortId1=hwSepPortId1, hwSepRxNbrPktCnt=hwSepRxNbrPktCnt, PYSNMP_MODULE_ID=hwSepMIB, hwSepBlockPortHop=hwSepBlockPortHop, hwSepTcNotifyRrpp=hwSepTcNotifyRrpp, hwSepSegmentRowStatus=hwSepSegmentRowStatus, hwSepTopoPortLinkState=hwSepTopoPortLinkState, hwSepTxPreemptReqPktCnt=hwSepTxPreemptReqPktCnt)
|
nilq/baby-python
|
python
|
import unittest
import unittest.mock
import denonavr.__main__ as avr
class TestDenonAVR(unittest.TestCase):
def test_valid_input_source(self):
self.assertTrue(avr._is_valid_input_source("DVD"))
self.assertTrue(avr._is_valid_input_source("BD"))
self.assertTrue(avr._is_valid_input_source("GAME"))
self.assertTrue(avr._is_valid_input_source("SAT/CBL"))
self.assertFalse(avr._is_valid_input_source("VHS"))
def test_convert_input_source(self):
self.assertEqual(avr._convert_input_source("satcbl"), "SAT/CBL")
self.assertEqual(avr._convert_input_source("vhs"), "VHS")
def test_execute(self):
with unittest.mock.patch("telnetlib.Telnet") as telnet_mock:
telnet_mock.return_value.read_until.return_value = "b'Test\\r'"
self.assertEqual(avr.execute("?Test", avr.CONFIG), "Test")
telnet_mock.return_value.write.assert_called_once_with(b'?Test\r')
telnet_mock.return_value.close.assert_called_once_with()
def test_execute_error(self):
with unittest.mock.patch("telnetlib.Telnet") as telnet_mock:
telnet_mock.return_value.write.side_effect = OSError
self.assertEqual(avr.execute("CMD", avr.CONFIG), "ERROR")
telnet_mock.return_value = unittest.mock.MagicMock(spec=["write", "read_until", "close"])
telnet_mock.side_effect = None
telnet_mock.return_value.write.side_effect = OSError
self.assertEqual(avr.execute("CMD", avr.CONFIG), "ERROR")
telnet_mock.return_value.close.assert_called_with()
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
from ...hek.defs.obje import *
def get():
return obje_def
# replace the model animations dependency with an open sauce one
obje_attrs = dict(obje_attrs)
obje_attrs[8] = dependency('animation_graph', valid_model_animations_yelo)
obje_body = Struct('tagdata',
obje_attrs
)
obje_def = TagDef("obje",
blam_header('obje'),
obje_body,
ext=".object", endian=">", tag_cls=ObjeTag
)
|
nilq/baby-python
|
python
|
class Coordenadas():
def __init__(self, coordenadaX , coordenadaY):
self.coordenadaX = coordenadaX
self.coordenadaY = coordenadaY
def valores(self):
print("Los valores ingresados fueron:","(" , self.coordenadaX,",", self.coordenadaY ,")")
def cuadrante(self):
if(self.coordenadaX > 0 and self.coordenadaY>0 ):
print("Pertenece al primer cuadrante")
elif(self.coordenadaX <0 and self.coordenadaY >0 ):
print("Pertenece al segundo cuadrante")
elif (self.coordenadaX <0 and self.coordenadaY< 0):
print("Pertence al tercer cuadrante")
elif(self.coordenadaX>0 and self.coordenadaY<0):
print("Pertenece al cuarto cuadrante")
def vector_resultante(self):
otroX= int(input("Ingrese nueva coordenada X: "))
otroY= int(input("Ingrese nueva coordenada Y: "))
self.VRX = otroX - self.coordenadaX
self.VRY = otroY -self.coordenadaY
print("El vector resultante es:","(" , self.VRX,",", self.VRY ,")")
def distancia(self):
d = ((self.VRX)**2 + (self.VRY)**2)**0.5
print("La distancia entre sus puntos es: ", d)
def __resultante(self):
coor.vector_resultante()
coor = Coordenadas(coordenadaX = -2, coordenadaY=3)
coor.valores()
coor.cuadrante()
coor.vector_resultante()
coor.distancia()
|
nilq/baby-python
|
python
|
import sys
import unittest
import os
script_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(1, os.path.abspath(
os.path.join(script_dir, os.path.join('..', '..'))))
import pake
class SubpakeTest(unittest.TestCase):
def file_helper_test_stub(self, ctx, silent):
fp = pake.FileHelper(ctx)
self.assertEqual(fp.printer, ctx)
# FileHelper.makedirs
# =============================
fp.makedirs('test_data/filehelper/sub', silent=silent)
try:
fp.makedirs('test_data/filehelper/sub', silent=silent)
except Exception:
self.fail('pake.FileHelper.makedirs threw creating an existing directory tree. '
'It should not do this when exist_ok=True, which is default.')
with self.assertRaises(OSError):
fp.makedirs('test_data/filehelper/sub', exist_ok=False, silent=silent)
with self.assertRaises(OSError):
fp.makedirs('test_data/filehelper', exist_ok=False, silent=silent)
with self.assertRaises(OSError):
fp.makedirs('test_data', exist_ok=False, silent=silent)
self.assertTrue(os.path.isdir('test_data/filehelper'))
self.assertTrue(os.path.isdir('test_data/filehelper/sub'))
for i in range(0, 3):
fp.makedirs('test_data/filehelper/delete_me_{}/sub'.format(i), silent=silent)
self.assertTrue(os.path.isdir('test_data/filehelper/delete_me_{}/sub'.format(i)))
touch_file = 'test_data/filehelper/delete_me_{idx}/sub/file{idx}.txt'.format(idx=i)
fp.touch(touch_file, silent=silent)
self.assertTrue(os.path.isfile(touch_file))
# FileHelper.copytree
# =============================
fp.copytree('test_data/filehelper', 'test_data/filehelper/copy_test', silent=silent)
self.assertTrue(os.path.isdir('test_data/filehelper/copy_test'))
for i in range(0, 3):
touch_file = 'test_data/filehelper/copy_test/delete_me_{idx}/sub/file{idx}.txt'.format(idx=i)
self.assertTrue(os.path.isfile(touch_file))
with self.assertRaises(FileExistsError):
fp.copytree('test_data/filehelper', 'test_data/filehelper/copy_test', silent=silent)
# FileHelper.move
# =============================
fp.makedirs('test_data/filehelper/movedir', silent=silent)
fp.touch('test_data/filehelper/move.txt', silent=silent)
fp.move('test_data/filehelper/move.txt', 'test_data/filehelper/movedir', silent=silent)
self.assertTrue(os.path.isfile('test_data/filehelper/movedir/move.txt'))
fp.move('test_data/filehelper/movedir', 'test_data/filehelper/copy_test', silent=silent)
self.assertTrue(os.path.isfile('test_data/filehelper/copy_test/movedir/move.txt'))
# FileHelper.remove
# =============================
fp.remove('test_data/filehelper/copy_test/movedir/move.txt', silent=silent)
self.assertFalse(os.path.isfile('test_data/filehelper/copy_test/movedir/move.txt'))
try:
fp.remove('test_data/filehelper/copy_test/movedir/move.txt', silent=silent)
except Exception:
self.fail(
'pake.FileHelper.remove threw removing a non existing file. It should not do this when must_exist=True, which is default.')
with self.assertRaises(FileNotFoundError):
fp.remove('test_data/filehelper/copy_test/movedir/move.txt', must_exist=True, silent=silent)
# Cannot use remove to remove directories, must use rmtree
with self.assertRaises(OSError):
fp.remove('test_data/filehelper/copy_test/movedir', must_exist=True, silent=silent)
# FileHelper.touch
# =============================
try:
fp.touch('test_data/filehelper/delete_me_0/sub/file0.txt', silent=silent)
except Exception:
self.fail(
'pake.FileHelper.touch threw touching an existing file. It should not do this when exist_ok=True, which is default.')
with self.assertRaises(FileExistsError):
fp.touch('test_data/filehelper/delete_me_0/sub/file0.txt', silent=silent, exist_ok=False)
# FileHelper.glob_remove
# =============================
fp.glob_remove('test_data/filehelper/delete_me_**/sub/file*.txt', silent=silent)
for i in range(0, 3):
self.assertFalse(os.path.isfile('test_data/filehelper/delete_me_{idx}/sub/file{idx}.txt'.format(idx=i)))
# FileHelper.copy
# =============================
fp.copy('test_data/in1', 'test_data/filehelper', silent=silent)
self.assertTrue(os.path.isfile('test_data/filehelper/in1'))
try:
fp.copy('test_data/in1', 'test_data/filehelper', silent=silent)
except Exception:
self.fail(
'pake.FileHelper.copy threw overwriting an existing file. It should not do this.')
# Just to hit the second path, there is not really a way to portably test copying the metadata,
# it is up to the shutil module to do it anyway.
fp.copy('test_data/in2', 'test_data/filehelper', silent=silent, copy_metadata=True)
self.assertTrue(os.path.isfile('test_data/filehelper/in2'))
try:
fp.copy('test_data/in2', 'test_data/filehelper', silent=silent, copy_metadata=True)
except Exception:
self.fail(
'pake.FileHelper.copy with metadata threw overwriting an existing file. It should not do this.')
# FileHelper.glob_remove_dirs
# =============================
# remove the sub folders under the folders starting with delete_me_*
fp.glob_remove_dirs('test_data/filehelper/delete_me_**/su*', silent=silent)
for i in range(0, 3):
# delete_me_* should remain intact, the sub folders should not
self.assertTrue(os.path.isdir('test_data/filehelper/delete_me_{}'.format(i)))
self.assertFalse(os.path.isdir('test_data/filehelper/delete_me_{}/sub'.format(i)))
fp.glob_remove_dirs('test_data/filehelper/delete_me_*', silent=silent)
for i in range(0, 3):
# now they should be gone
self.assertFalse(os.path.isdir('test_data/filehelper/delete_me_{}'.format(i)))
# FileHelper.rmtree
# =============================
fp.rmtree('test_data/filehelper', silent=silent)
try:
fp.rmtree('test_data/filehelper', silent=silent)
except Exception:
self.fail(
'pake.FileHelper.rmtree threw removing a non existent directory. It should not do this when must_exist=False, which is default.')
with self.assertRaises(FileNotFoundError):
fp.rmtree('test_data/filehelper', silent=silent, must_exist=True)
def test_filehelper(self):
fh = pake.FileHelper()
self.assertEqual(fh.printer, None)
class SilentTestCtx:
def print(*args):
nonlocal self
self.fail('SilentTestCtx printed from file helper function set to be silent.')
class TestCtx:
def print(*args):
pass
class ErrCtx:
# I don't implement print
pass
with self.assertRaises(ValueError):
# Error because no print function is defined.
_ = pake.FileHelper(ErrCtx())
past_cwd = os.getcwd()
os.chdir(script_dir)
pake.FileHelper().rmtree('test_data/filehelper')
self.file_helper_test_stub(SilentTestCtx(), silent=True)
self.file_helper_test_stub(TestCtx(), silent=False)
self.file_helper_test_stub(None, silent=True)
self.file_helper_test_stub(None, silent=False)
os.chdir(past_cwd)
|
nilq/baby-python
|
python
|
import sys
import re
def main():
file = sys.stdin
if file.isatty():
filename = input('Input file name: ')
file = open(filename)
rules = file.readlines()
file.close()
bags = parseRules(rules)
shinyGoldBag = bags['shiny gold']
shinyGoldContainers = shinyGoldBag.findContainers()
print(f'Shiny gold bags can be carried in {len(shinyGoldContainers)} other bags')
print(f'Shiny gold bags must contain {shinyGoldBag.countContainedBags()} other bags')
def parseRules(rules):
bags = {bag.color: bag for bag in [Bag(rule) for rule in rules]}
ruleRegex = re.compile(r'(\d+) ([a-z]+ [a-z]+) bags?')
for bag in bags.values():
for rule in bag.containsRule.split(', '):
match = ruleRegex.match(rule)
if match and match.group(2) in bags:
cBag = bags[match.group(2)]
bag.contains[cBag.color] = ContainedBag(cBag, int(match.group(1)))
cBag.containedIn[bag.color] = bag
return bags
class Bag:
def __init__(self, rule):
(color, restOfRule) = rule.split(' bags contain ')
self.color = color
self.containsRule = restOfRule.strip('. \r\n')
self.contains = {}
self.containedIn = {}
def findContainers(self, containers = set()):
for container in self.containedIn.values():
if container not in containers:
containers.add(container)
container.findContainers(containers)
return containers
def countContainedBags(self):
count = 0
for contained in self.contains.values():
count += contained.count + contained.count * contained.bag.countContainedBags()
return count
class ContainedBag:
def __init__(self, bag, count):
self.bag = bag
self.count = count
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import copy
import logging
import os
import typing
import yaml
from nbcollection.ci.constants import ENCODING, SCANNER_ARTIFACT_DEST_DIR
from nbcollection.ci.generate_ci_environment.constants import NBCOLLECTION_BUILDER, CONFIG_TEMPLATE, JOB_TEMPLATE, \
PULL_REQUEST_TEMPLATE, NBCOLLECTION_WORKFLOW_NAME, PUBLISH_JOB_NAME_TEMPLATE
from nbcollection.ci.commands.datatypes import CIEnvironment
from nbcollection.ci.datatypes import BuildJob
from nbcollection.ci.renderer import render_template
logger = logging.getLogger(__name__)
def gen_ci_env(jobs: typing.List[BuildJob], ci_env: CIEnvironment, project_path: str, enable_website_publication: bool,
enable_nightly: bool = False) -> None:
if ci_env is not CIEnvironment.CircleCI:
raise NotImplementedError(f'CIEnvironment "{ci_env}" not supported')
formatted_collections = []
formatted_job_names = []
config = copy.deepcopy(CONFIG_TEMPLATE)
logger.info(f'Using {NBCOLLECTION_BUILDER} for CircleCI Image Executor')
for build_job in jobs:
formatted_cat_name = ' '.join(build_job.category.name.split('_'))
formatted_cat_name = formatted_cat_name.title()
formatted_col_name = ' '.join(build_job.collection.name.split('_'))
formatted_col_name = formatted_col_name.title()
job_name = '-'.join([formatted_col_name, formatted_cat_name])
logger.info(f'Generating job for "{job_name}"')
job = copy.deepcopy(JOB_TEMPLATE)
job['steps'][2]['run']['command'] = ' '.join([
'nbcollection-ci build-notebooks',
f'--collection-names {build_job.collection.name}',
f'--category-names {build_job.category.name}',
])
job['steps'][2]['run']['name'] = f'Build {job_name} notebooks'
job['steps'][3]['store_artifacts']['path'] = SCANNER_ARTIFACT_DEST_DIR
config['jobs'][job_name] = job
config['workflows'][NBCOLLECTION_WORKFLOW_NAME]['jobs'].append(job_name)
if not build_job.collection.name in formatted_collections:
formatted_collections.append(build_job.collection.name)
formatted_job_names.append(job_name)
formatted_collections = ','.join(formatted_collections)
# Pull Request
pr_job_name = 'Pull Request'
config['jobs'][pr_job_name] = copy.deepcopy(PULL_REQUEST_TEMPLATE)
config['workflows'][NBCOLLECTION_WORKFLOW_NAME]['jobs'].append(pr_job_name)
# Publish Website
if enable_website_publication:
publish_job_name = 'Publish Website'
config['jobs'][publish_job_name] = copy.deepcopy(PUBLISH_JOB_NAME_TEMPLATE)
config['jobs'][publish_job_name]['steps'][2]['run']['command'] = f'nbcollection-ci merge-artifacts -c {formatted_collections} -o $CIRCLE_PROJECT_USERNAME -r $CIRCLE_PROJECT_REPONAME'
config['jobs'][publish_job_name]['steps'][2]['run']['name'] = 'Publish Website'
config['workflows'][NBCOLLECTION_WORKFLOW_NAME]['jobs'].append({
publish_job_name: {
'requires': formatted_job_names
}
})
if enable_nightly:
schedule_key = f'{NBCOLLECTION_WORKFLOW_NAME}-periodic'
config['workflows'][schedule_key] = copy.deepcopy(config['workflows'][NBCOLLECTION_WORKFLOW_NAME])
config['workflows'][schedule_key]['triggers'] = [
{
'schedule': {
'cron': '30 8 * * *',
'filters': {
'branches': {
'only': ['main']
}
}
}
}
]
config_path = os.path.join(project_path, '.circleci/config.yml')
config_dirpath = os.path.dirname(config_path)
if not os.path.exists(config_dirpath):
os.makedirs(config_dirpath)
logger.info(f'Writing config-file to "{config_path}"')
with open(config_path, 'wb') as stream:
stream.write(yaml.dump(config).encode(ENCODING))
setup_script_filepath = os.path.join(project_path, '.circleci/setup-env.sh')
logger.info(f"Rendering Setup Script: {setup_script_filepath}")
with open(setup_script_filepath, 'wb') as stream:
rendered_script = render_template('setup-env.sh', {})
stream.write(rendered_script.encode(ENCODING))
build_pull_request_filepath = os.path.join(project_path, '.circleci/build-pull-request.sh')
logger.info(f'Rendering Pull Request: {build_pull_request_filepath}')
with open(build_pull_request_filepath, 'wb') as stream:
rendered_pr_script = render_template('build-pull-request.sh', {})
stream.write(rendered_pr_script.encode(ENCODING))
|
nilq/baby-python
|
python
|
"""
protonate.py: Wrapper method for the reduce program: protonate (i.e., add hydrogens) a pdb using reduce
and save to an output file.
Pablo Gainza - LPDI STI EPFL 2019
Released under an Apache License 2.0
"""
from subprocess import Popen, PIPE
import os
from ipdb import set_trace
def protonate(in_pdb_file, out_pdb_file):
# protonate (i.e., add hydrogens) a pdb using reduce and save to an output file.
# in_pdb_file: file to protonate.
# out_pdb_file: output file where to save the protonated pdb file.
# Remove protons first, in case the structure is already protonated
args = ["reduce", "-Trim", in_pdb_file]
p2 = Popen(args, stdout=PIPE, stderr=PIPE)
stdout, stderr = p2.communicate()
outfile = open(out_pdb_file, "w")
outfile.write(stdout.rstrip())
outfile.close()
# Now add them again.
args = ["reduce", "-HIS", out_pdb_file]
p2 = Popen(args, stdout=PIPE, stderr=PIPE)
stdout, stderr = p2.communicate()
outfile = open(out_pdb_file, "w")
outfile.write(stdout)
outfile.close()
|
nilq/baby-python
|
python
|
import collections
import logging
from arekit.common.data.input.providers.opinions import OpinionProvider
logger = logging.getLogger(__name__)
class BaseRowProvider(object):
""" Base provider for rows that suppose to be filled into BaseRowsStorage.
"""
# region protected methods
def _provide_rows(self, parsed_news, text_opinion_linkage, idle_mode):
raise NotImplementedError()
# endregion
def iter_by_rows(self, opinion_provider, doc_ids_iter, idle_mode):
assert(isinstance(opinion_provider, OpinionProvider))
assert(isinstance(doc_ids_iter, collections.Iterable))
for parsed_news, linkage in opinion_provider.iter_linked_opinions(doc_ids_iter):
rows_it = self._provide_rows(parsed_news=parsed_news,
text_opinion_linkage=linkage,
idle_mode=idle_mode)
for row in rows_it:
yield row
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requirements = [
'pan-python',
]
test_requirements = [
'pan-python',
'mock',
'pytest',
]
setup(
name='pandevice',
version='0.6.3',
description='Framework for interacting with Palo Alto Networks devices via API',
long_description='The Palo Alto Networks Device Framework is a way to interact with Palo Alto Networks devices (including Next-generation Firewalls and Panorama) using the device API that is object oriented and conceptually similar to interaction with the device via the GUI or CLI.',
author='Palo Alto Networks',
author_email='techpartners@paloaltonetworks.com',
url='https://github.com/PaloAltoNetworks/pandevice',
packages=[
'pandevice',
],
package_dir={'pandevice':
'pandevice'},
include_package_data=True,
install_requires=requirements,
license="ISC",
zip_safe=False,
keywords='pandevice',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: ISC License (ISCL)',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
"Programming Language :: Python :: 3",
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=['pytest-runner', ],
)
|
nilq/baby-python
|
python
|
class Activation(object):
def __init__(self):
pass
from deepend.activations.Softmax import Softmax
from deepend.activations.LeakyReLU import LeakyReLU
from deepend.activations.Linear import Linear
from deepend.activations.TanH import TanH
from deepend.activations.Sigmoid import Sigmoid
from deepend.activations.ReLU import ReLU
activation_mapping = {
"softmax": Softmax,
"leaky_relu": LeakyReLU,
"linear": Linear,
"tanh": TanH,
"sigmoid": Sigmoid,
"relu": ReLU,
}
|
nilq/baby-python
|
python
|
"""Exemplo melhorias de Contraste."""
from PIL import Image, ImageEnhance
im = Image.open('beijo.jpg')
contrast = ImageEnhance.Contrast(im)
contrast.enhance(1.2)
contrast.show()
|
nilq/baby-python
|
python
|
#!/usr/bin/python
############################
# Librairies import
############################
import os
import checkUserInput
############################
# Functions
############################
def confirmation_address(address, what):
confirmation = False
if checkUserInput.question_and_verification("Confirmez-vous l'adresse " + address + " pour " + what + "\n[Y]: Oui\n[n]: Non\n[exit]: Quitter le programme\nReponse: ") == "y":
print("L'adresse " + address + " pour " + what + " a ete confirmee avec succes!")
confirmation = True
else:
print("L'adresse n'a pas ete confirmee! Veuillez la changer!")
return confirmation
def check_array_input(array):
for byte in array:
if len(byte) > 3 or byte == "":
print("L'adresse encodee n'est pas conforme!")
correctInput = False
break
else:
try:
int(byte)
correctInput = True
except:
print("L'adresse encodee n'est pas conforme!")
correctInput = False
break
return correctInput
def check_input(type, what):
while True:
try:
userInput = input("Encodez l'adresse" + type + "pour " + what + ": ")
arrayInput = userInput.split(".")
except:
print("Une erreur s'est produite!")
arrayInput = []
if len(arrayInput) == 4:
correctInput = check_array_input(arrayInput)
else:
print("L'adresse encodee n'est pas conforme!")
correctInput = False
if correctInput:
confirmation = confirmation_address(userInput, what)
if confirmation:
return arrayInput
def confirm_dhcp_address(dhcpAddress, place, goodAddress):
confirmation = False
if goodAddress:
confirmation = confirmation_address('.'.join(dhcpAddress), "la " + place + " address du DHCP ")
if goodAddress == False or confirmation == False:
print("Veuillez encodez manuellement la " + place + " addresse du range DHCP!")
dhcpAddress = checkInput("", place + " client du range DHCP")
return dhcpAddress
def calculate_dhcp(ipAddress, netmask):
networkAddress = []
broadcastAddress = []
index = 0
goodAddress = True
for byte in netmask:
if int(byte) == 255:
networkAddress.append(str(ipAddress[index]))
broadcastAddress.append(str(ipAddress[index]))
elif int(byte) == 0:
networkAddress.append(str(0))
broadcastAddress.append(str(255))
else:
rangeAddress = 255 - int(byte)
trySubNetwork = 0
nbSubNetwork = 255/rangeAddress
while trySubnetwork < nbSubNetwork:
networkAddressByte = trySubNetwork * nbSubNetwork
broadcastAddressByte = (trySubNetwork + 1) * nbSubNetwork
if int(byte) >= networkAddress and int(byte) <= broadcastAddress:
networkAddress.append(str(networkAddressByte))
broadcastAddress.append(str(broadcastAddressByte))
break
firstDhcpAddress = networkAddress
lastDhcpAddress = broadcastAddress
try:
firstDhcpAddress[3] = str(int(firstDhcpAddress[3]) + 1)
lastDhcpAddress[3] = str(int(lastDhcpAddress[3]) - 1)
except:
goodAddress = False
firstDhcpAddress = confirm_dhcp_address(firstDhcpAddress, "premiere", goodAddress)
lastDhcpAddress = confirm_dhcp_address(lastDhcpAddress, "derniere", goodAddress)
return firstDhcpAddress, lastDhcpAddress
def configure_dhcp(ipAddress, netmask):
try:
print("Enable dnsmasq")
p = os.system("systemctl enable dnsmasq")
print("Start dnsmasq")
p = os.system("systemctl start dnsmasq")
except:
print("L'activation du service DHCP a echoue!")
firstDhcpAddress, lastDhcpAddress = calculate_dhcp(ipAddress, netmask)
# Prepare DHCP file
line = "dhcp-range=" + '.'.join(firstDhcpAddress) + "," + '.'.join(lastDhcpAddress) + "," + '.'.join(netmask) + ",12h"
try:
path = "/home/dev/Configuration-Folder/51-dhcp-range.conf"
with open(path, "w") as file:
file.write(line)
except:
print("Impossible d'enregistrer le fichier de configuration!")
def configure_ipv6():
if checkUserInput.question_and_verification("Voulez-vous utiliser le prefixe IPv6 par defaut d'EVESA: \"FD05:A40B:6F6::/48\" ?\n[Y]: Oui\n[n]: Non\n[exit]: Quitter le programme\nReponse: ") == "y":
ipv6Prefix = "FD05:A40B:6F6::/48"
else:
while True:
answer = input("Encodez le prefixe IPv6 sous la forme xxxx:xxxx:xxxx:xxxx/yy\nPrefixe: ")
if confirmation_address(answer, "le prefixe IPv6"):
ipv6Prefix = answer
break
try:
commandLine = "sudo netmgr -i iotr network_prefix set " + ipv6Prefix
print("Set IPv6 prefixe")
p = os.system(commandLine)
except:
print("Le prefixe IPv6 n'a pas pu etre encode!")
def search_network_informations(ipAddress, netmask, searchPath, filename):
try:
for root, dir, files in os.walk(searchPath):
if filename in files:
with open(os.path.join(root, filename), "r") as file:
previousLine = ""
ipv6Address = ""
for line in file:
if "iface eth0 inet6 static" in previousLine and "address" in line:
ipv6Address = line
previousLine = line
macAddress = (':'.join(['{:02x}'.format((uuid.getnode() >> element) & 0xff) for element in range(0,8*6,8)][::-1]))
line = "# Wired adapter #1\nauto eth0\niface eth0 inet static\n"
line += "\taddress " + '.'.join(ipAddress) + "\n"
line += "\tnetmask " + '.'.join(netmask) + "\n"
line += "\thwaddress ether " + macAddress + "\n"
if ipv6Address != "" :
line += "\tiface eth0 inet6 static\n"
line += ipv6Address
line += "\tnetmask 64\n\n"
line += "# Local loopback/n"
line += "auto lo\n"
line += "\tiface lo inet loopback"
path = "/home/dev/Configuration-Folder/interfaces_static"
with open(path, "w") as file:
file.write(line)
break
except:
print("Un probleme est survenu lors de la configuration des parametres du reseau!")
############################
# Main program
############################
def main():
ipAddress = check_input(" ip ", "le routeur")
netmask = check_input(" ", "le masque reseau")
if checkUserInput.question_and_verification("Voulez-vous utiliser le service DHCP du routeur?\n[Y]: Oui\n[n]: Non\n[exit]: Quitter le programme\nReponse: ") == "y":
configure_dhcp(ipAddress, netmask)
else:
try:
print("Stop dnsmasq")
p = os.system("systemctl stop dnsmasq")
print("Disable dnsmasq")
p = os.system("systemctl disable dnsmasq")
except:
print("La desactivation du service DHCP a echoue!")
if checkUserInput.question_and_verification("Voulez-vous utilisez les adresses IPv6?\n[Y]: Oui\n[n]: Non\n[exit]: Quitter le programme\nReponse: ") == "y":
configure_ipv6()
search_network_informations(ipAddress, netmask, "/var/snap/ssnmode", "interfaces_static")
|
nilq/baby-python
|
python
|
# -*- python -*-
import logging
import unittest
import electionfraud.testdata as eftd
import electionfraud.redist as redist
logging.basicConfig(level=logging.DEBUG)
class RedistributorTest(unittest.TestCase):
pass
class TestIdentity(RedistributorTest):
def setUp(self):
self.rd = redist.Identity(eftd.FOOD_STV_20)
def test_identity(self):
redistributed = [x for x in self.rd]
self.assertEqual(redistributed, eftd.FOOD_STV_20)
class TestNthSubset(RedistributorTest):
def setUp(self):
self.n = 7
def test_divisor(self):
self.assertRaises(ValueError, redist.NthSubset, eftd.FOOD_STV_20, 1)
self.assertRaises(ValueError, redist.NthSubset, eftd.FOOD_STV_20, 2)
self.assertRaises(ValueError, redist.NthSubset, eftd.FOOD_STV_20, 4)
def test_depth(self):
rd = redist.NthSubset(range(20), self.n);
redistributed = [x for x in rd]
self.assertEqual(redistributed, [6, 13, 0, 7, 14, 1, 8, 15, 2, 9, 16, 3, 10, 17, 4, 11, 18, 5, 12, 19])
def test_food_distribution(self):
rd = redist.NthSubset(eftd.FOOD_STV_20, self.n)
redistributed = [x for x in rd]
self.assertEqual(len(eftd.FOOD_STV_20), len(redistributed))
self.assertNotEqual(eftd.FOOD_STV_20, redistributed)
class TestCincinnati(RedistributorTest):
def test_cincy_divisor(self):
self.assertRaises(ValueError, redist.Cincinnati, range(11))
def test_cincy_depth(self):
rd = redist.Cincinnati(range(20))
redistributed = [x for x in rd]
self.assertEqual(redistributed, [10, 1, 12, 3, 14, 5, 16, 7, 18, 9, 0, 11, 2, 13, 4, 15, 6, 17, 8, 19])
def test_cincy_food(self):
rd = redist.Cincinnati(eftd.FOOD_STV_20)
redistributed = [x for x in rd]
self.assertEqual(len(eftd.FOOD_STV_20), len(redistributed))
self.assertNotEqual(eftd.FOOD_STV_20, redistributed)
class TestHareRandom(RedistributorTest):
def setUp(self):
pass
def test_hare_depth(self):
tedium = 10
shuffles = list()
for i in range(tedium):
rd = redist.HareRandom(eftd.FOOD_STV_20)
shuffle = [x for x in rd]
self.assertEqual(len(eftd.FOOD_STV_20), len(shuffle))
self.assertNotEqual(eftd.FOOD_STV_20, shuffle)
shuffles.append(shuffle)
for i in range(tedium - 1):
self.assertNotEqual(shuffles[i], shuffles[i + 1])
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
#%% cell
"""
# Solving a New Keynesian model with Python
This file is part of a computational appendix that accompanies the paper.
> MATLAB, Python, Julia: What to Choose in Economics?
>
> Coleman, Lyon, Maliar, and Maliar (2017)
In order to run the codes in this file you will need to install and
configure a few Python packages. We recommend following the instructions
on
[quantecon.org](https://lectures.quantecon.org/jl/getting_started.html)
for getting a base python installation set up. Then to acquire
additional packages used in this file, uncomment the lines in the
cell below (delete the `#` and space at the beginning of the line) and
then run the cell:
For some details regarding the model solved in this file, please see
the [companion notebook that describes the model](http://bookshelf.quant
econ.org/submission/59fa1b45145fc3772b0cef82).
"""
#%% cell
# !pip install git+https://github.com/EconForge/interpolation.py.git
# !pip install git+https://github.com/naught101/sobol_seq.git
# !pip install requests
#%% cell
"""
## Python Code
The Python version of our algorithm is implemented as a few methods defined on
a core class named `Model`. This class is itself composed of instances of three
different classes that hold the model parameters, steady state, and grids
needed to describe the numerical model. Before we get to the classes, we need
to bring in some dependencies:
"""
#%% cell
import os
import sys
import math
from math import sqrt
import time as time
from collections import namedtuple
import requests
if "table" not in sys.argv:
import matplotlib.pyplot as plt
# turn on interactive mode so plots display automatically
plt.ion()
import numpy as np
from numpy import exp
from scipy.io import loadmat
from interpolation.complete_poly import (_complete_poly_impl_vec,
_complete_poly_impl,
complete_polynomial)
import sobol_seq
# get path to this folder
DIR = os.path.abspath("")
# set seed on random number generator to make results reproducible
np.random.seed(42)
#%% cell
"""
We will also need the following two functions, which use monomial rules to
compute quadrature nodes and weights:
"""
#%% cell
def qnwmonomial1(vcv):
n = vcv.shape[0]
n_nodes = 2*n
z1 = np.zeros((n_nodes, n))
# In each node, random variable i takes value either 1 or -1, and
# all other variables take value 0. For example, for N = 2,
# z1 = [1 0; -1 0; 0 1; 0 -1]
for i in range(n):
z1[2*i:2*(i+1), i] = [1, -1]
sqrt_vcv = np.linalg.cholesky(vcv)
R = np.sqrt(n)*sqrt_vcv
ϵj = z1 @ R
ωj = np.ones(n_nodes) / n_nodes
return ϵj, ωj
def qnwmonomial2(vcv):
n = vcv.shape[0]
assert n == vcv.shape[1], "Variance covariance matrix must be square"
z0 = np.zeros((1, n))
z1 = np.zeros((2*n, n))
# In each node, random variable i takes value either 1 or -1, and
# all other variables take value 0. For example, for N = 2,
# z1 = [1 0; -1 0; 0 1; 0 -1]
for i in range(n):
z1[2*i:2*(i+1), i] = [1, -1]
z2 = np.zeros((2*n*(n-1), n))
i = 0
# In each node, a pair of random variables (p,q) takes either values
# (1,1) or (1,-1) or (-1,1) or (-1,-1), and all other variables take
# value 0. For example, for N = 2, `z2 = [1 1; 1 -1; -1 1; -1 1]`
for p in range(n-1):
for q in range(p+1, n):
z2[4*i:4*(i+1), p] = [1, -1, 1, -1]
z2[4*i:4*(i+1), q] = [1, 1, -1, -1]
i += 1
sqrt_vcv = np.linalg.cholesky(vcv)
R = np.sqrt(n+2)*sqrt_vcv
S = np.sqrt((n+2)/2)*sqrt_vcv
ϵj = np.row_stack([z0, z1 @ R, z2 @ S])
ωj = np.concatenate([2/(n+2) * np.ones(z0.shape[0]),
(4-n)/(2*(n+2)**2) * np.ones(z1.shape[0]),
1/(n+2)**2 * np.ones(z2.shape[0])])
return ϵj, ωj
#%% cell
"""
## Classes
First we have the `Params` class, which holds all the model parameters as well
as the paramters that drive the algorithm.
"""
#%% cell
SteadyState = namedtuple("SteadyState",
["Yn", "Y", "π", "δ", "L", "C", "F", "S", "R", "w"])
class Params(object):
def __init__(self, zlb=True, γ=1, β=0.99, ϑ=2.09, ϵ=4.45, ϕ_y=0.07,
ϕ_π=2.21, μ=0.82, Θ=0.83, πstar=1, gbar=0.23,
ρηR=0.0, ρηa=0.95, ρηL=0.25, ρηu=0.92, ρηB=0.0, ρηG=0.95,
σηR=0.0028, σηa=0.0045, σηL=0.0500, σηu=0.0054, σηB=0.0010,
σηG=0.0038, degree=2):
self.zlb = zlb # whether or not the zlb should be imposed
self.γ = γ # Utility-function parameter
self.β = β # Discount factor
self.ϑ = ϑ # Utility-function parameter
self.ϵ = ϵ # Parameter in the Dixit-Stiglitz aggregator
self.ϕ_y = ϕ_y # Parameter of the Taylor rule
self.ϕ_π = ϕ_π # Parameter of the Taylor rule
self.μ = μ # Parameter of the Taylor rule
self.Θ = Θ # Share of non-reoptimizing firms (Calvo's pricing)
self.πstar = πstar # Target (gross) inflation rate
self.gbar = gbar # Steady-state share of gov. spending in output
# autocorrelation coefficients
self.ρηR = ρηR # See process (28) in MM (2015)
self.ρηa = ρηa # See process (22) in MM (2015)
self.ρηL = ρηL # See process (16) in MM (2015)
self.ρηu = ρηu # See process (15) in MM (2015)
self.ρηB = ρηB # See process (17) in MM (2015)
self.ρηG = ρηG # See process (26) in MM (2015)
# standard deviations
self.σηR = σηR # See process (28) in MM (2015)
self.σηa = σηa # See process (22) in MM (2015)
self.σηL = σηL # See process (16) in MM (2015)
self.σηu = σηu # See process (15) in MM (2015)
self.σηB = σηB # See process (17) in MM (2015)
self.σηG = σηG # See process (26) in MM (2015)
self.degree = degree
@property
def vcov(self):
return np.diag([self.σηR**2, self.σηa**2, self.σηL**2,
self.σηu**2, self.σηB**2, self.σηG**2])
@property
def steady_state(self):
Yn_ss = exp(self.gbar)**(self.γ/(self.ϑ+self.γ))
Y_ss = Yn_ss
π_ss = 1.0
δ_ss = 1.0
L_ss = Y_ss/δ_ss
C_ss = (1-self.gbar)*Y_ss
F_ss = C_ss**(-self.γ)*Y_ss/(1-self.β*self.Θ*π_ss**(self.ϵ-1))
S_ss = L_ss**self.ϑ*Y_ss/(1-self.β*self.Θ*π_ss**self.ϵ)
R_ss = π_ss/self.β
w_ss = (L_ss**self.ϑ)*(C_ss**self.γ)
return SteadyState(
Yn_ss, Y_ss, π_ss, δ_ss, L_ss, C_ss, F_ss, S_ss, R_ss, w_ss
)
@property
def grid_size(self):
"Grid size pinned down by degree of polynomials"
return {1: 20, 2: 100, 3: 300, 4: 1000, 5: 2000}[self.degree]
#%% cell
"""
Notice that we have a namedtuple to hold the steady state of the model. Using
the namedtuple infrastructure allows us to have convenient "dot-style" access
to the steady state, without defining a full class.
Given an instance of `Params` class, we can construct the grid on which we will
solve the model.
The `Grids` class holds this grid as well as matrices used to compute
expectations.
To match the Julia and Matlab versions of the code, the `__init__` method for
`Grids` below loads pre-generated grids from a `.mat` file for both Sobol and
random grids. This ensures that the exact same code is run in each language. If
you would like to generate the grids in pure Python, you can set the
`grid_source` keyword argument to `"python"`
"""
#%% cell
class Grids(object):
def __init__(self, p, kind="random", grid_source="mat"):
m = p.grid_size
σ = np.array([p.σηR, p.σηa, p.σηL, p.σηu, p.σηB, p.σηG])
ρ = np.array([p.ρηR, p.ρηa, p.ρηL, p.ρηu, p.ρηB, p.ρηG])
if kind == "sobol":
if grid_source == "mat":
_path = os.path.join(DIR, "Sobol_grids.mat")
s = loadmat(_path)["Sobol_grids"][:m, :]
else:
s = sobol_seq.i4_sobol_generate(8, m)
sη = s[:, :6]
η = (-2*σ + 4*(sη.max(0)-sη) / (sη.max(0)-sη.min(0))*σ)/np.sqrt(1-ρ**2)
R = 1+0.05*(np.max(s[:, 6])-s[:, 6])/(np.max(s[:, 6])-np.min(s[:, 6]))
δ = 0.95+0.05*(np.max(s[:, 7])-s[:, 7])/(np.max(s[:, 7])-np.min(s[:, 7]))
else:
# Values of exogenous state variables are distributed uniformly
# in the interval +/- std/sqrt(1-rho_nu**2)
if grid_source == "mat":
_path = os.path.join(DIR, "random_grids.mat")
s = loadmat(_path)["random_grids"][:m, :]
else:
s = np.random.rand(m, 8)
sη = s[:, :6]
η = (-2*σ + 4*σ*sη) / np.sqrt(1-ρ**2)
# Values of endogenous state variables are distributed uniformly
# in the intervals [1 1.05] and [0.95 1], respectively
R = 1 + 0.05 * s[:, 6]
δ = 0.95 + 0.05 * s[:, 7]
ηR = η[:, 0]
ηa = η[:, 1]
ηL = η[:, 2]
ηu = η[:, 3]
ηB = η[:, 4]
ηG = η[:, 5]
self.ηR = ηR
self.ηa = ηa
self.ηL = ηL
self.ηu = ηu
self.ηB = ηB
self.ηG = ηG
self.R = R
self.δ = δ
# shape (8, m)
self.X = np.vstack([np.log(R), np.log(δ), η.T])
# shape (n_complete(8, p.Degree), m)
self.X0_G = {
1: complete_polynomial(self.X, 1),
p.degree: complete_polynomial(self.X, p.degree)
}
# shape (2*n=12, n=6)
self.ϵ_nodes, self.ω_nodes = qnwmonomial1(p.vcov)
# all shape (len(ϵ_nodes), m)
self.ηR1 = p.ρηR * ηR[None, :] + self.ϵ_nodes[:, None, 0]
self.ηa1 = p.ρηa * ηa[None, :] + self.ϵ_nodes[:, None, 1]
self.ηL1 = p.ρηL * ηL[None, :] + self.ϵ_nodes[:, None, 2]
self.ηu1 = p.ρηu * ηu[None, :] + self.ϵ_nodes[:, None, 3]
self.ηB1 = p.ρηB * ηB[None, :] + self.ϵ_nodes[:, None, 4]
self.ηG1 = p.ρηG * ηG[None, :] + self.ϵ_nodes[:, None, 5]
#%% cell
"""
Finally, we construct the Model class, which has an instance of Params,
SteadyState and Grids as its three attributes.
This block of code will be longer than the others because we also include
routines to solve and simulate the model as methods on the Model class. These
methods will be clearly marked and commented.
"""
#%% cell
class Model(object):
def __init__(self, p=Params(), g=None):
if g is None:
g = Grids(p)
self.p = p
self.g = g
self.s = self.p.steady_state
def init_coefs(self, degree):
"Iniital guess for coefs. We evaluate interpoland as coefs @ basis_mat"
npol = self.g.X0_G[degree].shape[0]
coefs = np.full((3, npol), 1e-5)
coefs[:, 0] = [self.s.S, self.s.F, self.s.C**(-self.p.γ)]
return coefs
def step(self, S, F, C, δ0, R0, ηG, ηa, ηL, ηR):
# simplify notation
Θ, ϵ, gbar, ϑ, γ = self.p.Θ, self.p.ϵ, self.p.gbar, self.p.ϑ, self.p.γ
β, μ, ϕ_π, ϕ_y = self.p.β, self.p.μ, self.p.ϕ_π, self.p.ϕ_y
πstar = self.p.πstar
# Compute pie(t) from condition (35) in MM (2015)
π0 = ((1-(1-Θ)*(S/F)**(1-ϵ))/Θ)**(1/(ϵ-1))
# Compute delta(t) from condition (36) in MM (2015)
δ1 = ((1-Θ)*((1-Θ*π0**(ϵ-1))/(1-Θ))**(ϵ/(ϵ-1))+Θ*π0**ϵ/δ0)**(-1)
# Compute Y(t) from condition (38) in MM (2015)
Y0 = C/(1-gbar/exp(ηG))
# Compute L(t) from condition (37) in MM (2015)
L0 = Y0/exp(ηa)/δ1
# Compute Yn(t) from condition (31) in MM (2015)
Yn0 = (exp(ηa)**(1+ϑ)*(1-gbar/exp(ηG))**(-γ)/exp(ηL))**(1/(ϑ+γ))
# Compute R(t) from conditions (27), (39) in MM (2015) -- Taylor rule
R1 = πstar/β*(R0*β/πstar)**μ*((π0/πstar)**ϕ_π * (Y0/Yn0)**ϕ_y)**(1-μ)*exp(ηR)
return π0, δ1, Y0, L0, Yn0, R1
def solve(self, damp=0.1, tol=1e-7, verbose=False):
# rename self to m to make code below readable
m = self
n = len(m.g.ηR)
n_nodes = len(m.g.ω_nodes)
## allocate memory
# euler equations
e = np.zeros((3, n))
# previous iteration S, F, C
S0_old_G = np.ones(n)
F0_old_G = np.ones(n)
C0_old_G = np.ones(n)
# current iteration S, F, C
S0_new_G = np.ones(n)
F0_new_G = np.ones(n)
C0_new_G = np.ones(n)
# future S, F, C
S1 = np.zeros((n_nodes, n))
F1 = np.zeros((n_nodes, n))
C1 = np.zeros((n_nodes, n))
degs = [self.p.degree] if self.p.degree == 1 else [1, self.p.degree]
for deg in degs:
# housekeeping
err = 1.0
it = 0
X0_G = m.g.X0_G[deg]
start_time = time.time()
if deg <= 2:
coefs = self.init_coefs(deg)
else:
coefs = np.linalg.lstsq(X0_G.T, e.T)[0].T
# old_coefs = coefs.copy()
# coefs = self.init_coefs(deg)
# coefs[:, :old_coefs.shape[1]] = old_coefs
while err > tol:
it += 1
# Current choices (at t)
# ------------------------------
SFC0 = coefs @ X0_G
S0 = SFC0[0, :] # Compute S(t) using coefs
F0 = SFC0[1, :] # Compute F(t) using coefs
C0 = (SFC0[2, :])**(-1/m.p.γ) # Compute C(t) using coefs
π0, δ1, Y0, L0, Yn0, R1 = self.step(
S0, F0, C0, m.g.δ, m.g.R, m.g.ηG, m.g.ηa, m.g.ηL, m.g.ηR
)
if self.p.zlb:
R1 = np.maximum(R1, 1.0)
for u in range(n_nodes):
# Form complete polynomial of degree "Degree" (at t+1 states)
grid1 = [np.log(R1), np.log(δ1), m.g.ηR1[u, :], m.g.ηa1[u, :],
m.g.ηL1[u, :], m.g.ηu1[u, :], m.g.ηB1[u, :], m.g.ηG1[u, :]]
X1 = complete_polynomial(grid1, deg)
S1[u, :] = coefs[0, :] @ X1 # Compute S(t+1)
F1[u, :] = coefs[1, :] @ X1 # Compute F(t+1)
C1[u, :] = (coefs[2, :] @ X1)**(-1/m.p.γ) # Compute C(t+1)
# Compute next-period π using condition
# (35) in MM (2015)
π1 = ((1-(1-m.p.Θ)*(S1/F1)**(1-m.p.ϵ))/m.p.Θ)**(1/(m.p.ϵ-1))
# Evaluate conditional expectations in the Euler equations
#---------------------------------------------------------
e[0, :] = exp(m.g.ηu)*exp(m.g.ηL)*L0**m.p.ϑ*Y0/exp(m.g.ηa) + m.g.ω_nodes @ (m.p.β*m.p.Θ*π1**m.p.ϵ*S1)
e[1, :] = exp(m.g.ηu)*C0**(-m.p.γ)*Y0 + m.g.ω_nodes @ (m.p.β*m.p.Θ*π1**(m.p.ϵ-1)*F1)
e[2, :] = m.p.β*exp(m.g.ηB)/exp(m.g.ηu)*R1 * (m.g.ω_nodes @ ((exp(m.g.ηu1)*C1**(-m.p.γ)/π1)))
# Variables of the current iteration
#-----------------------------------
np.copyto(S0_new_G, S0)
np.copyto(F0_new_G, F0)
np.copyto(C0_new_G, C0)
# Compute and update the coefficients of the decision functions
# -------------------------------------------------------------
coefs_hat = np.linalg.lstsq(X0_G.T, e.T)[0].T
# Update the coefficients using damping
coefs = damp*coefs_hat + (1-damp)*coefs
# Evaluate the percentage (unit-free) difference between the values
# on the grid from the previous and current iterations
# -----------------------------------------------------------------
# The convergence criterion is adjusted to the damping parameters
err = (np.mean(np.abs(1-S0_new_G/S0_old_G)) +
np.mean(np.abs(1-F0_new_G/F0_old_G)) +
np.mean(np.abs(1-C0_new_G/C0_old_G)))
# Store the obtained values for S(t), F(t), C(t) on the grid to
# be used on the subsequent iteration in Section 10.2.6
#-----------------------------------------------------------------------
np.copyto(S0_old_G, S0_new_G)
np.copyto(F0_old_G, F0_new_G)
np.copyto(C0_old_G, C0_new_G)
if it % 20 == 0 and verbose:
print("On iteration {:d} err is {:6.7e}".format(it, err))
elapsed = time.time() - start_time
return coefs, elapsed
def simulate(self, coefs=None, capT=10201):
if coefs is None:
coefs, elapsed = self.solve()
# rename self to m to make code below readable
m = self
# create namedtuple to hold simulation results in an organized container
Simulation = namedtuple(
"Simulation",
["nuR", "nua", "nuL", "nuu", "nuB", "nuG",
"δ", "R", "S", "F", "C", "π", "Y", "L", "Yn", "w"]
)
# 11. Simualating a time-series solution
#---------------------------------------
# Initialize the values of 6 exogenous shocks and draw innovations
#-----------------------------------------------------------------
nuR = np.zeros(capT)
nua = np.zeros(capT)
nuL = np.zeros(capT)
nuu = np.zeros(capT)
nuB = np.zeros(capT)
nuG = np.zeros(capT)
# Generate the series for shocks
#-------------------------------
_path = os.path.join(DIR, "epsi_test_NK.mat")
rands = (loadmat(_path)["epsi_test_NK"])
capT = rands.shape[0]
# rands = np.random.randn(capT-1, 6)
for t in range(capT-1):
nuR[t+1] = self.p.ρηR*nuR[t] + self.p.σηR*rands[t, 0]
nua[t+1] = self.p.ρηa*nua[t] + self.p.σηa*rands[t, 1]
nuL[t+1] = self.p.ρηL*nuL[t] + self.p.σηL*rands[t, 2]
nuu[t+1] = self.p.ρηu*nuu[t] + self.p.σηu*rands[t, 3]
nuB[t+1] = self.p.ρηB*nuB[t] + self.p.σηB*rands[t, 4]
nuG[t+1] = self.p.ρηG*nuG[t] + self.p.σηG*rands[t, 5]
# Allocate memory for time series of ...
δ = np.ones(capT+1) # ... delta(t)
R = np.ones(capT+1) # ... R(t)
S = np.ones(capT) # ... S(t)
F = np.ones(capT) # ... F(t)
C = np.ones(capT) # ... C(t)
π = np.ones(capT) # ... π(t)
Y = np.ones(capT) # ... Y(t)
L = np.ones(capT) # ... L(t)
Yn = np.ones(capT) # ... Yn(t)
w = np.ones(capT) # ... w(t)
pol_bases = np.empty(coefs.shape[1])
states = np.empty(8)
for t in range(capT):
states[0] = math.log(R[t])
states[1] = math.log(δ[t])
states[2] = nuR[t]
states[3] = nua[t]
states[4] = nuL[t]
states[5] = nuu[t]
states[6] = nuB[t]
states[7] = nuG[t]
_complete_poly_impl_vec(states, self.p.degree, pol_bases)
vals = coefs @ pol_bases
S[t] = vals[0]
F[t] = vals[1]
C[t] = (vals[2])**(-1/m.p.γ)
π[t], δ[t+1], Y[t], L[t], Yn[t], R[t+1] = self.step(
S[t], F[t], C[t], δ[t], R[t], nuG[t], nua[t], nuL[t], nuR[t]
)
# Compute real wage
w[t] = exp(nuL[t])*(L[t]**m.p.ϑ)*(C[t]**m.p.γ)
# If ZLB is imposed, set R(t)=1 if ZLB binds
if self.p.zlb:
R[t+1] = max(R[t+1], 1.0)
return Simulation(nuR, nua, nuL, nuu, nuB, nuG, δ, R, S, F, C, π, Y, L, Yn, w)
def residuals(self, coefs, sim, burn=200):
m = self # rename self to m so the rest of this code is more readable
capT = len(sim.w)
resids = np.zeros((capT, 9))
# Integration method for evaluating accuracy
# ------------------------------------------
# Monomial integration rule with 2N**2+1 nodes
ϵ_nodes, ω_nodes = qnwmonomial2(m.p.vcov)
n_nodes = len(ω_nodes)
# Allocate for arrays needed in the loop
basis_mat = np.empty((8, n_nodes))
X1 = np.empty((coefs.shape[1], n_nodes))
nuR1 = np.empty(n_nodes)
nua1 = np.empty(n_nodes)
nuL1 = np.empty(n_nodes)
nuu1 = np.empty(n_nodes)
nuB1 = np.empty(n_nodes)
nuG1 = np.empty(n_nodes)
for t in range(capT): # For each given point,
# Take the corresponding value for shocks at t
#---------------------------------------------
nuR0 = sim.nuR[t] # nuR(t)
nua0 = sim.nua[t] # nua(t)
nuL0 = sim.nuL[t] # nuL(t)
nuu0 = sim.nuu[t] # nuu(t)
nuB0 = sim.nuB[t] # nuB(t)
nuG0 = sim.nuG[t] # nuG(t)
# Exctract time t values for all other variables (and t+1 for R, δ)
#------------------------------------------------------------------
R0 = sim.R[t] # R(t-1)
δ0 = sim.δ[t] # δ(t-1)
R1 = sim.R[t+1] # R(t)
δ1 = sim.δ[t+1] # δ(t)
L0 = sim.L[t] # L(t)
Y0 = sim.Y[t] # Y(t)
Yn0 = sim.Yn[t] # Yn(t)
π0 = sim.π[t] # π(t)
S0 = sim.S[t] # S(t)
F0 = sim.F[t] # F(t)
C0 = sim.C[t] # C(t)
# Fill basis matrix with R1, δ1 and shocks
#-----------------------------------------
# Note that we do not premultiply by standard deviations as ϵ_nodes
# already include them. All these variables are vectors of length n_nodes
nuR1[:] = nuR0*m.p.ρηR + ϵ_nodes[:, 0]
nua1[:] = nua0*m.p.ρηa + ϵ_nodes[:, 1]
nuL1[:] = nuL0*m.p.ρηL + ϵ_nodes[:, 2]
nuu1[:] = nuu0*m.p.ρηu + ϵ_nodes[:, 3]
nuB1[:] = nuB0*m.p.ρηB + ϵ_nodes[:, 4]
nuG1[:] = nuG0*m.p.ρηG + ϵ_nodes[:, 5]
basis_mat[0, :] = np.log(R1)
basis_mat[1, :] = np.log(δ1)
basis_mat[2, :] = nuR1
basis_mat[3, :] = nua1
basis_mat[4, :] = nuL1
basis_mat[5, :] = nuu1
basis_mat[6, :] = nuB1
basis_mat[7, :] = nuG1
# Future choices at t+1
#----------------------
# Form a complete polynomial of degree "Degree" (at t+1) on future state
# variables; n_nodes-by-npol
_complete_poly_impl(basis_mat, self.p.degree, X1)
# Compute S(t+1), F(t+1) and C(t+1) in all nodes using coefs
S1 = coefs[0, :] @ X1
F1 = coefs[1, :] @ X1
C1 = (coefs[2, :] @ X1)**(-1/m.p.γ)
# Compute π(t+1) using condition (35) in MM (2015)
π1 = ((1-(1-m.p.Θ)*(S1/F1)**(1-m.p.ϵ))/m.p.Θ)**(1/(m.p.ϵ-1))
# Compute residuals for each of the 9 equilibrium conditions
#-----------------------------------------------------------
resids[t, 0] = 1 - (ω_nodes @
(exp(nuu0)*exp(nuL0)*L0**m.p.ϑ*Y0/exp(nua0) +
m.p.β*m.p.Θ*π1**m.p.ϵ*S1)/S0
)
resids[t, 1] = 1 - (ω_nodes @
(exp(nuu0)*C0**(-m.p.γ)*Y0 + m.p.β*m.p.Θ*π1**(m.p.ϵ-1)*F1)/F0
)
resids[t, 2] = 1 - (ω_nodes @
(m.p.β*exp(nuB0)/exp(nuu0)*R1*exp(nuu1)*C1**(-m.p.γ)/π1)/C0**(-m.p.γ)
)
resids[t, 3] = 1 - ((1-m.p.Θ*π0**(m.p.ϵ-1))/(1-m.p.Θ))**(1/(1-m.p.ϵ))*F0/S0
resids[t, 4] = 1 - ((1-m.p.Θ)*((1-m.p.Θ*π0**(m.p.ϵ-1))/(1-m.p.Θ))**(m.p.ϵ/(m.p.ϵ-1)) + m.p.Θ*π0**m.p.ϵ/δ0)**(-1)/δ1
resids[t, 5] = 1 - exp(nua0)*L0*δ1/Y0
resids[t, 6] = 1 - (1-m.p.gbar/exp(nuG0))*Y0/C0
resids[t, 7] = 1 - (exp(nua0)**(1+m.p.ϑ)*(1-m.p.gbar/exp(nuG0))**(-m.p.γ)/exp(nuL0))**(1/(m.p.ϑ+m.p.γ))/Yn0
resids[t, 8] = 1 - m.s.π/m.p.β*(R0*m.p.β/m.s.π)**m.p.μ*((π0/m.s.π)**m.p.ϕ_π * (Y0/Yn0)**m.p.ϕ_y)**(1-m.p.μ)*exp(nuR0)/R1 # Taylor rule
# If the ZLB is imposed and R>1, the residuals in the Taylor rule (the
# 9th equation) are zero
if m.p.zlb and R1 <= 1:
resids[t, 8] = 0.0
return resids[burn:, :]
#%% cell
"""
## Running the code
Now that we've done all the hard work to define the model, its solution and
simulation, and accuracy checks, let's put things together and run the code!
"""
#%% cell
def ensurefile(url, localpath):
if not os.path.isfile(localpath):
print("Downloading {} to {}".format(url, localpath))
with open(localpath, "wb") as f:
res = requests.get(url)
f.write(res.content)
def main(m=None, file=None, plot=True, verbose=False):
ensurefile("https://github.com/sglyon/CLMMJuliaPythonMatlab/raw/master/NewKeynesian/Sobol_grids.mat", "Sobol_grids.mat")
ensurefile("https://github.com/sglyon/CLMMJuliaPythonMatlab/raw/master/NewKeynesian/epsi_test_NK.mat", "epsi_test_NK.mat")
ensurefile("https://github.com/sglyon/CLMMJuliaPythonMatlab/raw/master/NewKeynesian/random_grids.mat", "random_grids.mat")
if m is None:
m = Model()
if file is None:
mprint = print
else:
def mprint(*x):
print(*x, file=file)
# solve the model
coefs, solve_time = m.solve(verbose=verbose)
# simulate the model
t1 = time.time()
sim = m.simulate(coefs)
sim_time = time.time() - t1
# check accuracy
t1 = time.time()
resids = m.residuals(coefs, sim)
resids_time = time.time() - t1
tot_time = solve_time + sim_time + resids_time
max_err = np.log10(np.abs(resids).max() + 1e-16)
max_err_eqn = np.log10(np.abs(resids).max(0) + 1e-16)
l1 = np.log10(np.abs(resids).mean() + 1e-16)
mprint("Solver time (in seconds): ", solve_time)
mprint("Simulation time (in seconds): ", sim_time)
mprint("Residuals time (in seconds): ", resids_time)
mprint("Total time (in seconds): ", tot_time)
mprint("\nAPPROXIMATION ERRORS (log10):")
mprint("\ta) mean error in the model equations: {:0.3f}".format(l1))
mprint("\tb) max error in the model equations: {:0.3f}".format(max_err))
mprint("\tc) max error by equation: ", max_err_eqn)
mprint("tex row: {:.2f} & {:.2f} & {:.2f}".format(l1, max_err, solve_time))
# plot simulated path of variables
if plot:
fig, ax = plt.subplots(2, 2, figsize=(10, 8))
t = np.arange(1, 101)
ax[0, 0].plot(t, sim.S[t], label="S")
ax[0, 0].plot(t, sim.F[t], label="F")
ax[0, 0].set_title("Figure 1a. S and F")
ax[0, 0].legend()
ax[0, 1].plot(t, sim.Y[t], label="Y")
ax[0, 1].plot(t, sim.Yn[t], label="Yn")
ax[0, 1].set_title("Figure 1b. Output and natural output")
ax[0, 1].legend()
ax[1, 0].plot(t, sim.C[t], label="C")
ax[1, 0].plot(t, sim.L[t], label="L")
ax[1, 0].set_title("Figure 1c. Consumption and labor")
ax[1, 0].legend()
ax[1, 1].plot(t, sim.δ[t], label="δ")
ax[1, 1].plot(t, sim.R[t], label="R")
ax[1, 1].plot(t, sim.π[t], label="π")
ax[1, 1].set_title("Figure 1d. Distortion, interest rate and inflation")
ax[1, 1].legend()
return fig, solve_time, sim_time, resids_time, coefs, sim, resids, l1, max_err
else:
return None, solve_time, sim_time, resids_time, coefs, sim, resids, l1, max_err
#%% cell
def build_paper_table():
msg = "starting πstar={πstar} and degree={degree} zlb={zlb}"
with open(os.path.join(DIR, "output.csv"), "w") as f_csv:
f_csv.write("pi_star,zlb,degree,solve_time,l_1,l_inf\n")
with open(os.path.join(DIR, "output.log"), "w") as f:
for params in (dict(πstar=1.0, zlb=False),
dict(πstar=1, zlb=True),
dict(πstar=1 + 0.0598/4, zlb=False),):
for degree in range(1, 6):
print(msg.format(degree=degree, **params))
p = Params(degree=degree, **params)
g = Grids(p, kind="sobol")
m = Model(p, g)
print(msg.format(degree=degree, **params), file=f)
outputs = main(m, f, plot=False)
_stuff = [params["πstar"], params["zlb"]*1, degree,
outputs[1], outputs[7], outputs[8]]
f_csv.write(",".join(map(str, _stuff)) + "\n")
print("\n"*5, file=f)
# flush io streams so we can see output in real time
f_csv.flush()
f.flush()
#%% cell
if "table" in sys.argv:
build_paper_table()
else:
results = main()
|
nilq/baby-python
|
python
|
arr = [1, 34, 3, 98, 9, 76, 45, 4]
l = len(arr)
digits =[]
# # Uncomment and use either of the 2 digitconverter
# def digitconverter(num):
# # digit = []
# while num !=0:
# digits.append(num%10)
# num = num//10
# def digitconverter(num):
# number = str(num)
# for i in range(len(number)):
# digits.append(int(number[i]))
for i in range(l):
if arr[i] >=10:
digitconverter(arr[i])
else:
digits.append(arr[i])
# print(digits)
digits.sort()
digits = digits[::-1] #descending order sort
# print(digits) # array of max no. possible
n = "".join(map(str,digits)) # converting every digit to str
print(n) # required max no. possible
|
nilq/baby-python
|
python
|
from rest_framework.serializers import ModelSerializer
from rest_framework_gis.serializers import GeoFeatureModelSerializer
from .models import IdentifiedBaseStation, Operator
class IdentifiedBaseStationSerializer(GeoFeatureModelSerializer):
class Meta:
model = IdentifiedBaseStation
geo_field = 'point'
fields = ('id', 'cgi', 'radio', 'average_signal')
class OperatorSerializer(ModelSerializer):
class Meta:
model = Operator
fields = ('id', 'friendly_name', 'name', 'cnpj')
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pathlib
import pickle
import yaml
import numpy as np
from librosa import load
from tools import yaml_loader
__author__ = 'Konstantinos Drossos -- Tampere University'
__docformat__ = 'reStructuredText'
__all__ = [
'dump_pickle_file', 'load_pickle_file', 'read_txt_file',
'load_audio_file', 'dump_numpy_object', 'load_numpy_object',
'load_yaml_file', 'load_settings_file'
]
def dump_numpy_object(np_obj, file_name, ext='.npy', replace_ext=True):
"""Dumps a numpy object to HDD.
:param np_obj: Numpy object.
:type np_obj: numpy.ndarray
:param file_name: File name to be used.
:type file_name: pathlib.Path
:param ext: Extension for the dumped object.
:type ext: str
:param replace_ext: Replace extension?
:type replace_ext: bool
"""
f_name = file_name.with_suffix(ext) if replace_ext else file_name
np.save(str(f_name), np_obj)
def dump_pickle_file(obj, file_name, protocol=2):
"""Dumps an object to pickle file.
:param obj: Object to dump.
:type obj: object | list | dict | numpy.ndarray
:param file_name: Resulting file name.
:type file_name: pathlib.Path
:param protocol: Protocol to be used.
:type protocol: int
"""
with file_name.open('wb') as f:
pickle.dump(obj, f, protocol=protocol)
def load_audio_file(audio_file, sr, mono, offset=0.0, duration=None):
"""Loads the data of an audio file.
:param audio_file: Path of the audio file.
:type audio_file: pathlib.Path
:param sr: Sampling frequency to be used.
:type sr: int
:param mono: Turn to mono?
:type mono: bool
:param offset: Offset to be used (in seconds).
:type offset: float
:param duration: Duration of signal to load (in seconds).
:type duration: float|None
:return: Audio data.
:rtype: numpy.ndarray
"""
return load(path=str(audio_file), sr=sr, mono=mono,
offset=offset, duration=duration)[0]
def load_numpy_object(f_name):
"""Loads and returns a numpy object.
:param f_name: Path of the object.
:type f_name: pathlib.Path
:return: Numpy object.
:rtype: numpy.ndarray
"""
return np.load(str(f_name), allow_pickle=True)
def load_pickle_file(file_name, encoding='latin1'):
"""Loads a pickle file.
:param file_name: File name (extension included).
:type file_name: pathlib.Path
:param encoding: Encoding of the file.
:type encoding: str
:return: Loaded object.
:rtype: object | list | dict | numpy.ndarray
"""
with file_name.open('rb') as f:
return pickle.load(f, encoding=encoding)
def load_settings_file(file_name, settings_dir=pathlib.Path('settings')):
"""Reads and returns the contents of a YAML settings file.
:param file_name: Name of the settings file.
:type file_name: pathlib.Path
:param settings_dir: Directory with the settings files.
:type settings_dir: pathlib.Path
:return: Contents of the YAML settings file.
:rtype: dict
"""
settings_file_path = settings_dir.joinpath(file_name.with_suffix('.yaml'))
return load_yaml_file(settings_file_path)
def load_yaml_file(file_path):
"""Reads and returns the contents of a YAML file.
:param file_path: Path to the YAML file.
:type file_path: pathlib.Path
:return: Contents of the YAML file.
:rtype: dict
"""
with file_path.open('r') as f:
return yaml.load(f, Loader=yaml_loader.YAMLLoader)
def read_txt_file(file_name):
"""Reads a text (.txt) file and returns the contents.
:param file_name: File name of the txt file.
:type file_name: pathlib.Path
:return: Contents of the file.
:rtype: list[str]
"""
with file_name.open() as f:
return f.readlines()
# EOF
|
nilq/baby-python
|
python
|
# Generated by Django 3.2.7 on 2021-09-17 12:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Course', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='LearnGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=32, verbose_name='Название')),
('is_studies', models.BooleanField(default=False, verbose_name='Идут занятия')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Создана')),
],
options={
'verbose_name': 'Группа',
'verbose_name_plural': 'Группы',
},
),
migrations.AlterModelOptions(
name='student',
options={'verbose_name': 'Ученик', 'verbose_name_plural': 'Ученики'},
),
migrations.AlterField(
model_name='student',
name='contact',
field=models.CharField(max_length=128, verbose_name='Контакты'),
),
migrations.AlterField(
model_name='student',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='Зарегестрирован'),
),
migrations.AlterField(
model_name='student',
name='email',
field=models.EmailField(max_length=64, unique=True, verbose_name='Почта'),
),
migrations.AlterField(
model_name='student',
name='is_learned',
field=models.BooleanField(default=False, verbose_name='Учащийся'),
),
migrations.AlterField(
model_name='student',
name='name',
field=models.CharField(max_length=32, verbose_name='Имя'),
),
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32, verbose_name='Имя')),
('contact', models.CharField(max_length=128, verbose_name='Контакты')),
('email', models.EmailField(max_length=64, unique=True, verbose_name='Почта')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Зарегестрирован')),
('groups', models.ManyToManyField(to='Course.LearnGroup')),
],
options={
'verbose_name': 'Ученик',
'verbose_name_plural': 'Ученики',
},
),
]
|
nilq/baby-python
|
python
|
import gym
import copy
env = gym.make('LunarLander-v2')
env.seed(111) # we cna fix the background for now
env.action_space.np_random.seed(123) #fix random actions for now
env.reset()
for step in range(60):
#input()
env.render()
#save info before action
if step == 55:
save_state = copy.copy(info)
print("save pos", save_state['posx'], save_state['posy'])
input("Let's restart here")
elif step > 55:
print(step, info['posx'], info['posy'])
obs, r, done, info = env.step(0)#env.action_space.sample()) # take a random action
if step == 55:
#save after state
after_55 = copy.copy(info)
obs_after_55 = obs
# print(obs)
# print(r)
# print(done)
# print(obs)
# print(info['posx'], info['posy'])
# print()
#print('SAVED lander', save_state['lander'])
print("recover pos", save_state['leg_posx'], save_state['leg_posy'])
obs, r, done, info = env.reset(game_state = save_state, action = 0)
#print('SAVED lander', save_state['lander'])
print("obs after 55")
print(obs_after_55)
print("obs after reset")
print(obs)
env.render()
print("recovered pos", after_55['leg_posx'])
print("reset state", info['leg_posx'])
input()
for step in range(56,60):
env.render()
print(step, info['posx'], info['posy'])
obs, r, done, info = env.step(0)#env.action_space.sample()) # take a random action
# print(obs)
# print(r)
# print(done)
#print(obs)
# print()
input()
env.close()
#Questions, how do we reset the env with a particular state?
#didn't work. I think I need to rewrite all the leg and body code with and without previous state
#hmm I tried, but still seems to not be working. Why is it different? Need to run debugger and find out.
#okay so I reset the leg linearVelocity and angular velocity
#TODO: doesn't work!! Need to debug. The posx and posy and angle for the legs is wrong for some reason... I thought I had it working...
#Need to run two debuggers and see when it changes
#check None action!
#TODO test with non-zero actions and make sure it works and see if I can get the particles to also work.
#TODO test with dispersion noise
|
nilq/baby-python
|
python
|
import asyncio
import inspect
import warnings
import functools
from typing import Callable, Generic, TypeVar, Type, Optional
def deprecated(reason, stacklevel=2) -> Callable:
"""
This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
Source: https://stackoverflow.com/questions/2536307/decorators-in-the-python-standard-lib-deprecated-specifically
"""
if isinstance(reason, str):
# The @deprecated is used with a 'reason'.
#
# .. code-block:: python
#
# @deprecated("please, use another function")
# def old_function(x, y):
# pass
def decorator(func):
if inspect.isclass(func):
msg = "Call to deprecated class {name} ({reason})."
else:
msg = "Call to deprecated function {name} ({reason})."
@functools.wraps(func)
def wrapper(*args, **kwargs):
warn_deprecated(msg.format(name=func.__name__, reason=reason), stacklevel=stacklevel)
warnings.simplefilter('default', DeprecationWarning)
return func(*args, **kwargs)
return wrapper
return decorator
if inspect.isclass(reason) or inspect.isfunction(reason):
# The @deprecated is used without any 'reason'.
#
# .. code-block:: python
#
# @deprecated
# def old_function(x, y):
# pass
func1 = reason
if inspect.isclass(func1):
msg1 = "Call to deprecated class {name}."
else:
msg1 = "Call to deprecated function {name}."
@functools.wraps(func1)
def wrapper1(*args, **kwargs):
warn_deprecated(msg1.format(name=func1.__name__), stacklevel=stacklevel)
return func1(*args, **kwargs)
return wrapper1
raise TypeError(repr(type(reason)))
def warn_deprecated(message, warning=DeprecationWarning, stacklevel=2):
warnings.simplefilter('always', warning)
warnings.warn(message, category=warning, stacklevel=stacklevel)
warnings.simplefilter('default', warning)
def renamed_argument(old_name: str, new_name: str, until_version: str, stacklevel: int = 3):
"""
A meta-decorator to mark an argument as deprecated.
.. code-block:: python3
@renamed_argument("chat", "chat_id", "3.0") # stacklevel=3 by default
@renamed_argument("user", "user_id", "3.0", stacklevel=4)
def some_function(user_id, chat_id=None):
print(f"user_id={user_id}, chat_id={chat_id}")
some_function(user=123) # prints 'user_id=123, chat_id=None' with warning
some_function(123) # prints 'user_id=123, chat_id=None' without warning
some_function(user_id=123) # prints 'user_id=123, chat_id=None' without warning
:param old_name:
:param new_name:
:param until_version: the version in which the argument is scheduled to be removed
:param stacklevel: leave it to default if it's the first decorator used.
Increment with any new decorator used.
:return: decorator
"""
def decorator(func):
is_coroutine = asyncio.iscoroutinefunction(func)
def _handling(kwargs):
"""
Returns updated version of kwargs.
"""
routine_type = 'coroutine' if is_coroutine else 'function'
if old_name in kwargs:
warn_deprecated(f"In {routine_type} '{func.__name__}' argument '{old_name}' "
f"is renamed to '{new_name}' "
f"and will be removed in aiogram {until_version}",
stacklevel=stacklevel)
kwargs = kwargs.copy()
kwargs.update({new_name: kwargs.pop(old_name)})
return kwargs
if is_coroutine:
@functools.wraps(func)
async def wrapped(*args, **kwargs):
kwargs = _handling(kwargs)
return await func(*args, **kwargs)
else:
@functools.wraps(func)
def wrapped(*args, **kwargs):
kwargs = _handling(kwargs)
return func(*args, **kwargs)
return wrapped
return decorator
def removed_argument(name: str, until_version: str, stacklevel: int = 3):
"""
A meta-decorator to mark an argument as removed.
.. code-block:: python3
@removed_argument("until_date", "3.0") # stacklevel=3 by default
def some_function(user_id, chat_id=None):
print(f"user_id={user_id}, chat_id={chat_id}")
:param name:
:param until_version: the version in which the argument is scheduled to be removed
:param stacklevel: leave it to default if it's the first decorator used.
Increment with any new decorator used.
:return: decorator
"""
def decorator(func):
is_coroutine = asyncio.iscoroutinefunction(func)
def _handling(kwargs):
"""
Returns updated version of kwargs.
"""
routine_type = 'coroutine' if is_coroutine else 'function'
if name in kwargs:
warn_deprecated(
f"In {routine_type} {func.__name__!r} argument {name!r} "
f"is planned to be removed in aiogram {until_version}",
stacklevel=stacklevel,
)
kwargs = kwargs.copy()
del kwargs[name]
return kwargs
if is_coroutine:
@functools.wraps(func)
async def wrapped(*args, **kwargs):
kwargs = _handling(kwargs)
return await func(*args, **kwargs)
else:
@functools.wraps(func)
def wrapped(*args, **kwargs):
kwargs = _handling(kwargs)
return func(*args, **kwargs)
return wrapped
return decorator
_VT = TypeVar("_VT")
_OwnerCls = TypeVar("_OwnerCls")
class DeprecatedReadOnlyClassVar(Generic[_OwnerCls, _VT]):
"""
DeprecatedReadOnlyClassVar[Owner, ValueType]
:param warning_message: Warning message when getter gets called
:param new_value_getter: Any callable with (owner_class: Type[Owner]) -> ValueType
signature that will be executed
Usage example:
>>> class MyClass:
... some_attribute: DeprecatedReadOnlyClassVar[MyClass, int] = \
... DeprecatedReadOnlyClassVar(
... "Warning message.", lambda owner: 15)
...
>>> MyClass.some_attribute # does warning.warn with `Warning message` and returns 15 in the current case
"""
__slots__ = "_new_value_getter", "_warning_message"
def __init__(self, warning_message: str, new_value_getter: Callable[[_OwnerCls], _VT]):
self._warning_message = warning_message
self._new_value_getter = new_value_getter
def __get__(self, instance: Optional[_OwnerCls], owner: Type[_OwnerCls]):
warn_deprecated(self._warning_message, stacklevel=3)
return self._new_value_getter(owner)
|
nilq/baby-python
|
python
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack.package import *
class HashTest3(Package):
"""Used to test package hashing
"""
homepage = "http://www.hashtest3.org"
url = "http://www.hashtest1.org/downloads/hashtest3-1.1.tar.bz2"
version('1.2', 'b' * 32)
version('1.3', 'c' * 32)
version('1.5', 'd' * 32)
version('1.6', 'e' * 32)
version('1.7', 'f' * 32)
variant('variantx', default=False, description='Test variant X')
variant('varianty', default=False, description='Test variant Y')
def setup_dependent_build_environment(self, env, dependent_spec):
pass
@when('@:1.4')
def install(self, spec, prefix):
print("install 1")
os.listdir(os.getcwd())
# sanity_check_prefix requires something in the install directory
mkdirp(prefix.bin)
@when('@1.5:')
def install(self, spec, prefix):
os.listdir(os.getcwd())
# sanity_check_prefix requires something in the install directory
mkdirp(prefix.bin)
for _version_constraint in ['@1.5', '@1.6']:
@when(_version_constraint)
def extra_phase(self, spec, prefix):
pass
|
nilq/baby-python
|
python
|
"""
Yandex Transport Webdriver API. Continuous Monitoring tests.
NOTE: These are designed to run indefinitely and check current YandexTransportAPI status.
Tests are working with Live Data, with several random delays between them.
They take a lot of time as a result.
NOTE: Tests require running YandexTransportProxy server
UPD: These are kinda questionable, they made sence in the era of YandexTransportMonitor,
but now they kinda... don't.
"""
import pytest
import random
import time
import json
from yandex_transport_webdriver_api import YandexTransportProxy
# Working server settings
SERVER_HOST = '172.17.0.1'
SERVER_PORT = 25555
# Station URLs used in tests.
# Template: {"": ""}
mini_set = \
{"Москва/Метро Сокол": "https://yandex.ru/maps/213/moscow/?ll=37.511152%2C55.804204&masstransit%5BstopId%5D=stop__9647423&mode=stop&z=17",
"Москва/Улица Станиславского": "https://yandex.ru/maps/213/moscow/?ll=37.664542%2C55.744704&masstransit%5BstopId%5D=stop__9647379&mode=stop&z=17"
}
# These are working stations, they should return getStopInfo and getLayerRegions.
station_urls = \
{"Москва/Метро Сокол": "https://yandex.ru/maps/213/moscow/?ll=37.511152%2C55.804204&masstransit%5BstopId%5D=stop__9647423&mode=stop&z=17",
"Москва/Улица Станиславского": "https://yandex.ru/maps/213/moscow/?ll=37.664542%2C55.744704&masstransit%5BstopId%5D=stop__9647379&mode=stop&z=17",
"Москва/Платформа Тестовская": "https://yandex.ru/maps/213/moscow/?ll=37.535037%2C55.752682&masstransit%5BstopId%5D=stop__9649559&mode=stop&z=17",
"Москва/Тишинская площадь": "https://yandex.ru/maps/213/moscow/?ll=37.587580%2C55.770117&masstransit%5BstopId%5D=stop__9648355&mode=stop&z=17",
"Москва/Метро Китай-город": "https://yandex.ru/maps/213/moscow/?ll=37.634151%2C55.754175&masstransit%5BstopId%5D=stop__10187976&mode=stop&ol=geo&ouri=ymapsbm1%3A%2F%2Fgeo%3Fll%3D37.633884%252C55.754364%26spn%3D0.001000%252C0.001000%26text%3D%25D0%25A0%25D0%25BE%25D1%2581%25D1%2581%25D0%25B8%25D1%258F%252C%2520%25D0%259C%25D0%25BE%25D1%2581%25D0%25BA%25D0%25B2%25D0%25B0%252C%2520%25D0%25A2%25D0%25B0%25D0%25B3%25D0%25B0%25D0%25BD%25D1%2581%25D0%25BA%25D0%25BE-%25D0%259A%25D1%2580%25D0%25B0%25D1%2581%25D0%25BD%25D0%25BE%25D0%25BF%25D1%2580%25D0%25B5%25D1%2581%25D0%25BD%25D0%25B5%25D0%25BD%25D1%2581%25D0%25BA%25D0%25B0%25D1%258F%2520%25D0%25BB%25D0%25B8%25D0%25BD%25D0%25B8%25D1%258F%252C%2520%25D0%25BC%25D0%25B5%25D1%2582%25D1%2580%25D0%25BE%2520%25D0%259A%25D0%25B8%25D1%2582%25D0%25B0%25D0%25B9-%25D0%25B3%25D0%25BE%25D1%2580%25D0%25BE%25D0%25B4%2520&z=19",
"Петропавловск-Камчатский/Советская улица": "https://yandex.ru/maps/78/petropavlovsk/?ll=158.650965%2C53.015840&masstransit%5BstopId%5D=1543338149&mode=stop&z=17",
"Магадан/Телевышка": "https://yandex.ru/maps/79/magadan/?ll=150.800171%2C59.560040&masstransit%5BstopId%5D=1941449091&mode=stop&z=16",
"Владивосток/Центр": "https://yandex.ru/maps/75/vladivostok/?ll=131.886671%2C43.115497&masstransit%5BstopId%5D=stop__9980150&mode=stop&sll=37.540794%2C55.925019&sspn=0.145741%2C0.050022&z=17",
"Якутск/Крестьянский рынок": "https://yandex.ru/maps/74/yakutsk/?ll=129.728396%2C62.035988&masstransit%5BstopId%5D=2040377980&mode=stop&z=16",
"Иркутск/Железнодорожный вокзал": "https://yandex.ru/maps/63/irkutsk/?ll=104.259650%2C52.282821&masstransit%5BstopId%5D=stop__9795272&mode=stop&sctx=ZAAAAAgBEAAaKAoSCWnCm9o%2BElpAEVnd6jlpJUpAEhIJE7%2Ft%2F5%2Bnwj8RVFOjSVz4qz8iBAABAgQoCjAAOKqiz7joupHNA0DVzQZIAFXNzMw%2BWABqAnJ1cACdAc3MzD2gAQCoAQA%3D&sll=104.259650%2C52.282821&sspn=0.004554%2C0.001708&text=%D0%98%D1%80%D0%BA%D1%83%D1%82%D1%81%D0%BA%20cnfywbz&z=18",
"Красноярск/Железнодорожный вокзал": "https://yandex.ru/maps/62/krasnoyarsk/?ll=92.832626%2C56.006039&masstransit%5BstopId%5D=stop__9901229&mode=stop&ol=geo&ouri=ymapsbm1%3A%2F%2Fgeo%3Fll%3D92.852577%252C56.010567%26spn%3D0.541885%252C0.222061%26text%3D%25D0%25A0%25D0%25BE%25D1%2581%25D1%2581%25D0%25B8%25D1%258F%252C%2520%25D0%259A%25D1%2580%25D0%25B0%25D1%2581%25D0%25BD%25D0%25BE%25D1%258F%25D1%2580%25D1%2581%25D0%25BA%2520&z=17",
"Омск/Железнодорожный вокзал": "https://yandex.ru/maps/66/omsk/?ll=73.386035%2C54.939776&masstransit%5BstopId%5D=stop__9727412&mode=stop&ol=geo&ouri=ymapsbm1%3A%2F%2Fgeo%3Fll%3D73.368217%252C54.989346%26spn%3D0.563622%252C0.594631%26text%3D%25D0%25A0%25D0%25BE%25D1%2581%25D1%2581%25D0%25B8%25D1%258F%252C%2520%25D0%259E%25D0%25BC%25D1%2581%25D0%25BA%2520&z=17",
"Екатеринбург/1-й километр": "https://yandex.ru/maps/54/yekaterinburg/?ll=60.611944%2C56.863058&masstransit%5BstopId%5D=stop__9810370&mode=stop&ol=geo&ouri=ymapsbm1%3A%2F%2Fgeo%3Fll%3D60.597473%252C56.838013%26spn%3D0.679832%252C0.389126%26&z=18",
"Самара/Некрасовская улица": "https://yandex.ru/maps/51/samara/?ll=50.102397%2C53.189701&masstransit%5BstopId%5D=stop__10097748&mode=stop&ol=geo&ouri=ymapsbm1%3A%2F%2Fgeo%3Fll%3D50.101788%252C53.195541%26spn%3D0.659111%252C0.459122%26text%3D%25D0%25A0%25D0%25BE%25D1%2581%25D1%2581%25D0%25B8%25D1%258F%252C%2520%25D0%25A1%25D0%25B0%25D0%25BC%25D0%25B0%25D1%2580%25D0%25B0%2520&z=17",
"Санкт-Петербург/Станция метро Невский проспект": "https://yandex.ru/maps/2/saint-petersburg/?ll=30.326364%2C59.935241&masstransit%5BstopId%5D=stop__10075220&mode=stop&ol=geo&ouri=ymapsbm1%3A%2F%2Fgeo%3Fll%3D30.315639%252C59.938953%26spn%3D1.334415%252C0.611099%26text%3D%25D0%25A0%25D0%25BE%25D1%2581%25D1%2581%25D0%25B8%25D1%258F%252C%2520%25D0%25A1%25D0%25B0%25D0%25BD%25D0%25BA%25D1%2582-%25D0%259F%25D0%25B5%25D1%2582%25D0%25B5%25D1%2580%25D0%25B1%25D1%2583%25D1%2580%25D0%25B3%2520&z=18",
"Калининград/Гостиница Калининград": "https://yandex.ru/maps/22/kaliningrad/?ll=20.509223%2C54.712040&masstransit%5BstopId%5D=3313917805&mode=stop&ol=geo&ouri=ymapsbm1%3A%2F%2Fgeo%3Fll%3D20.507313%252C54.707394%26spn%3D0.359865%252C0.148655%26text%3D%25D0%25A0%25D0%25BE%25D1%2581%25D1%2581%25D0%25B8%25D1%258F%252C%2520%25D0%259A%25D0%25B0%25D0%25BB%25D0%25B8%25D0%25BD%25D0%25B8%25D0%25BD%25D0%25B3%25D1%2580%25D0%25B0%25D0%25B4%2520&z=18",
"Москва/Метро Марьино (южная)": "https://yandex.ru/maps/213/moscow/?ll=37.744035%2C55.649321&masstransit%5BstopId%5D=stop__9647488&mode=stop&ol=geo&ouri=ymapsbm1%3A%2F%2Fgeo%3Fll%3D37.743473%252C55.650028%26spn%3D0.001000%252C0.001000%26text%3D%25D0%25A0%25D0%25BE%25D1%2581%25D1%2581%25D0%25B8%25D1%258F%252C%2520%25D0%259C%25D0%25BE%25D1%2581%25D0%25BA%25D0%25B2%25D0%25B0%252C%2520%25D0%25BC%25D0%25B5%25D1%2582%25D1%2580%25D0%25BE%2520%25D0%259C%25D0%25B0%25D1%2580%25D1%258C%25D0%25B8%25D0%25BD%25D0%25BE%2520&z=17"
}
# This is an empty station, it should return nothing.
# There was a small period when it returned getLayerRegions.
station_empty = {"Якутск/Школа №7": "https://yandex.ru/maps/74/yakutsk/?ll=129.725800%2C62.037399&mode=poi&poi%5Bpoint%5D=129.728085%2C62.036624&poi%5Buri%5D=ymapsbm1%3A%2F%2Forg%3Foid%3D179807288972&sll=37.586616%2C55.802258&sspn=0.036435%2C0.012545&text=%D1%8F%D0%BA%D1%83%D1%82%D1%81%D0%BA&z=16"}
# These are working routes, they should return getRouteInfo, getVehiclesInfo, getVehiclesInfoWithRegion, getLayerRegions
routes_urls = {"Москва/Автобус 105": "https://yandex.ru/maps/213/moscow/?ll=37.517402%2C55.804455&masstransit%5BlineId%5D=213_105_bus_mosgortrans&masstransit%5BstopId%5D=stop__9647423&masstransit%5BthreadId%5D=213A_105_bus_mosgortrans&mode=stop&z=14",
"Москва/Троллейбус 53": "https://yandex.ru/maps/213/moscow/?ll=37.746753%2C55.737977&masstransit%5BlineId%5D=2036926340&masstransit%5BstopId%5D=stop__9647379&masstransit%5BthreadId%5D=213A_53_trolleybus_mosgortrans&mode=stop&z=13",
"Москва/Автобус 12": "https://yandex.ru/maps/213/moscow/?ll=37.546941%2C55.755232&masstransit%5BlineId%5D=213_12_bus_mosgortrans&masstransit%5BstopId%5D=stop__9649559&masstransit%5BthreadId%5D=213A_12_bus_mosgortrans&mode=stop&z=15",
"Москва/Троллейбус 54": "https://yandex.ru/maps/213/moscow/?ll=37.587580%2C55.770117&masstransit%5BlineId%5D=213_54_trolleybus_mosgortrans&masstransit%5BstopId%5D=stop__9648355&masstransit%5BthreadId%5D=2036927249&mode=stop&z=17",
"Москва/Автобус Н1": "https://yandex.ru/maps/213/moscow/?ll=37.634151%2C55.754175&masstransit%5BlineId%5D=N1_bus_default&masstransit%5BstopId%5D=stop__10187976&masstransit%5BthreadId%5D=2036926069&mode=stop&z=19",
"Петропавловск-Камчатский/Автобус 1": "https://yandex.ru/maps/78/petropavlovsk/?ll=158.650258%2C53.016359&masstransit%5BlineId%5D=1704841626&masstransit%5BthreadId%5D=2163257102&mode=stop&z=17",
"Магадан/Автобус 1": "https://yandex.ru/maps/79/magadan/?ll=150.800171%2C59.560040&masstransit%5BlineId%5D=1704917872&masstransit%5BthreadId%5D=1952775971&mode=stop&z=16",
"Владивосток/Маршрутка 24": "https://yandex.ru/maps/75/vladivostok/?ll=131.886671%2C43.115497&masstransit%5BlineId%5D=2468209792&masstransit%5BthreadId%5D=2468209966&mode=stop&z=17",
"Якутск/Автобус 104": "https://yandex.ru/maps/74/yakutsk/?ll=129.723504%2C62.037152&masstransit%5BlineId%5D=1704844454&masstransit%5BthreadId%5D=3442738945&mode=stop&z=16",
"Иркутск/Трамвай 4А": "https://yandex.ru/maps/63/irkutsk/?ll=104.259354%2C52.282396&masstransit%5BlineId%5D=1962955244&masstransit%5BthreadId%5D=1962955369&mode=stop&z=18",
"Красноярск/Маршрутка 130": "https://yandex.ru/maps/62/krasnoyarsk/?ll=92.831247%2C56.005319&masstransit%5BlineId%5D=2611970500&masstransit%5BthreadId%5D=2611970606&mode=stop&z=17",
"Омск/Троллейбус 12": "https://yandex.ru/maps/66/omsk/?ll=73.386035%2C54.939776&masstransit%5BlineId%5D=2012848234&masstransit%5BthreadId%5D=2012848632&mode=stop&z=17",
"Екатеринбург/Трамвай 5": "https://yandex.ru/maps/54/yekaterinburg/?ll=60.614978%2C56.863073&masstransit%5BlineId%5D=2107048890&masstransit%5BthreadId%5D=2107049173&mode=stop&z=16",
"Самара/Трамвай 5": "https://yandex.ru/maps/51/samara/?ll=50.099858%2C53.188705&masstransit%5BlineId%5D=2193179444&masstransit%5BthreadId%5D=2193179903&mode=stop&z=17",
"Санкт-Петербург/Троллейбус 22": "https://yandex.ru/maps/2/saint-petersburg/?ll=30.324825%2C59.935390&masstransit%5BlineId%5D=22_trolleybus_discus&masstransit%5BthreadId%5D=22B_trolleybus_discus&mode=stop&z=18",
"Калининград/Автобус 593": "https://yandex.ru/maps/22/kaliningrad/?ll=20.508255%2C54.712590&masstransit%5BlineId%5D=3181656187&masstransit%5BthreadId%5D=3181656277&mode=stop&z=18",
"Москва/Маршрутка 937к": "https://yandex.ru/maps/213/moscow/?ll=37.465495%2C55.878790&masstransit%5BlineId%5D=937_minibus_default&masstransit%5BthreadId%5D=937A_minibus_default&mode=stop&z=13",
"Москва/Трамвай А": "https://yandex.ru/maps/213/moscow/?ll=37.638675%2C55.764634&masstransit%5BlineId%5D=213_A_tramway_mosgortrans&masstransit%5BthreadId%5D=2036927519&mode=stop&z=18"
}
# Accumulated results. Good idea is to actually SAVE accumulated data results.
query_results = []
json_data = []
# NOTE: It seems sometimes getting ALL services (get_stop_info, getLayerRegions etc) might fail.
# It may be a browser issue, or problem may be on Yandex side.
# In tests this sometimes appears near the end of strain of get_all_info queries while checking stops.
# For now we're increasing random period from 15-45 to 40-90
def wait_random_time():
value = random.randint(40, 90)
print("Waiting " + str(value) + " seconds.")
time.sleep(value)
# ----- DATA COLLECTION ----- #
do_data_collection = True
do_stations_collection = True
do_routes_collection = True
data_collection_passed = False
def perform_data_collection():
"""
Data collection test, every single request should return valid JSON object.
This test can be switched off, and data can be loaded from files instead during development.
This takes a huge amount of time to process, by the way, due to wait times between queries
(We don't want Yandex to get angry due to frequent queries, so we're playing safe here).
Expect about 40-60 minutes of data collection.
"""
global query_results
global do_data_collection
global do_stations_collection
global do_routes_collection
global data_collection_passed
if not do_data_collection:
return
if data_collection_passed:
return
print()
proxy = YandexTransportProxy(SERVER_HOST, SERVER_PORT)
if do_stations_collection:
for station, url in station_urls.items():
print("Collecting station: " + station + "... ", end='')
result=''
try:
result = proxy.get_all_info(url)
for entry in result:
query_results.append({"success": True,
"station": station,
"url": url,
"method": entry['method'],
"data": entry['data']})
print(entry['method'], end=' ')
print("[OK]")
except Exception as e:
query_results.append({"success": False,
"station": station,
"url": url,
"method": "getAllInfo (failed)",
"data": ""
}
)
print("[FAILED]")
print("Exception (station): ",str(e))
f = open('tests/testdata/output/station_' + station.replace('/', '-') + '.json.pretty', 'w', encoding='utf-8')
f.write(json.dumps(result, ensure_ascii=False, indent=4, separators=(',', ': ')))
f.close()
f = open('tests/testdata/output/station_' + station.replace('/', '-') + '.json', 'w', encoding='utf-8')
f.write(json.dumps(result, ensure_ascii=False))
f.close()
wait_random_time()
if do_routes_collection:
for route, url in routes_urls.items():
print("Collecting route: " + route + "... ", end='')
result = ''
try:
result = proxy.get_all_info(url)
for entry in result:
query_results.append({"success": True,
"route": route,
"url": url,
"method": entry['method'],
"data": entry['data']})
print(entry['method'], end=' ')
print("[OK]")
except Exception as e:
query_results.append({"success": False,
"route": route,
"url": url,
"method": "getAllInfo (failed)",
"data": ""
})
print("[FAILED]")
print("Exception (route): ", str(e))
f = open('tests/testdata/output/route_' + route.replace('/', '-') + '.json.pretty', 'w', encoding='utf-8')
f.write(json.dumps(result, ensure_ascii=False, indent=4, separators=(',', ': ')))
f.close()
f = open('tests/testdata/output/route_' + route.replace('/', '-') + '.json', 'w', encoding='utf-8')
f.write(json.dumps(result, ensure_ascii=False))
f.close()
wait_random_time()
# Saving data to files
f = open('test_data.json', 'w', encoding='utf-8')
f.write(json.dumps(query_results, ensure_ascii=False))
f.close()
# Setting "data collection passed" flag.
data_collection_passed = True
# Basically, always succeeds
assert True == True
def load_data_from_file():
global json_data
print()
f = open('test_data.json', 'r', encoding='utf-8')
data = f.readline()
f.close()
json_data = json.loads(data)
for entry in json_data:
if 'station' in entry:
print('Station : ', entry["station"], ",", entry["success"], ",", end=' ')
if 'method' in entry:
print(entry["method"])
else:
print("")
if 'route' in entry:
print('Route : ', entry["route"], ",", entry["success"], ",", end=' ')
if 'method' in entry:
print(entry["method"])
else:
print("")
@pytest.fixture(scope="session", autouse=True)
def prepare_data():
# Collect data from Yandex Maps, save it to a file
perform_data_collection()
# Load data from file for tests.
load_data_from_file()
# ----- TESTS ----- #
@pytest.mark.timeout(3600)
def test_data_load_stage():
"""Needed to call perform_data_collection and load_data_from_file functions"""
print()
assert True == True
@pytest.mark.timeout(120)
def test_initial():
"""Most basic test.py to ensure pytest DEFINITELY works"""
assert True == True
# ----- CONTINUOUS TESTS ----- #
# With "getting the JSON" approach, checking if JSON contains the required data (stopInfo, routeInfo etc)
# is too difficult. Instead, continuous tests just check integrity of obtained JSONS, and which methods were called.
# Testing of actual data should be performed for particular API functions instead, if implemented,
# like getVehiclesInfo should count vehicles on several routes, or getStopCoordinates should get stop coordinated,
# accordingly.
# -------------------------------------------------------------------------------------------------------------------- #
def test_check_if_failure():
""" Test if there was no failures"""
failure_found = False
for entry in json_data:
if not entry["success"]:
failure_found = True
if 'station' in entry:
print("Data for stop ", entry['station'], "collection failed.")
if 'route' in entry:
print("Data for route ", entry['route'], "collection failed.")
assert not failure_found
def test_check_if_json():
""" Test is every record is JSON"""
failure_found = False
for entry in json_data:
try:
record = json.dumps(entry['data'])
except Exception as e:
print("Exception:", str(e))
failure_found = True
if 'station' in entry:
print("Data for stop ", entry['station'], "is not JSON.")
if 'route' in entry:
print("Data for route ", entry['route'], "is not JSON.")
assert not failure_found
def test_check_if_no_error():
""" Check that there is no 'error' field in json"""
failure_found = False
for entry in json_data:
if 'error' in entry['data']:
failure_found = True
if 'station' in entry:
print("Data for stop ", entry['station'], "has an 'error'.")
if 'route' in entry:
print("Data for route ", entry['route'], "has an 'error'.")
assert not failure_found
def test_encountered_methods():
"""
Test that each method was encountered at least once.
Pretty "forgiving" test, "strict" would be expect every valid stop return getStopInfo and getLayerRegions,
ane every route return getLayerRegions, getVehiclesInfo, getVehiclesInfoWithRegion and getRouteInfo.
This may not happen due to network conditions.
"""
print()
print("Counting methods:")
result = {'getStopInfo': 0,
'getRouteInfo': 0,
'getLine': 0,
'getVehiclesInfo': 0,
'getVehiclesInfoWithRegion': 0,
'getLayerRegions': 0,
'getAllInfo (failed)': 0}
for entry in json_data:
result[entry['method']] += 1
for key, value in result.items():
print(key, ':', value)
assert result['getStopInfo'] > 0
assert result['getVehiclesInfo'] > 0
assert result['getLayerRegions'] > 0
assert result['getLine'] > 0
assert result['getVehiclesInfoWithRegion'] > 0
assert result['getAllInfo (failed)'] == 0
def test_no_data_returned():
"""
Test if there is a stop/route with no data returned
"""
print()
data_stations = {}
data_routes = {}
failure_found = False
for entry in json_data:
if 'station' in entry:
data_stations[entry['station']] = 1
if 'route' in entry:
data_routes[entry['route']] = 1
for key, value in station_urls.items():
if key not in data_stations:
failure_found = True
print("No data for station", key)
for key, value in routes_urls.items():
if key not in data_routes:
failure_found = True
print("No data for route", key)
assert not failure_found
# -------------------------------------------------------------------------------------------------------------------- #
|
nilq/baby-python
|
python
|
from __future__ import (
absolute_import,
unicode_literals,
)
import importlib
import os
import pytest
@pytest.fixture(scope='module')
def server_settings(server_class):
"""
Load the server_settings used by this service.
"""
if server_class.use_django:
from django.conf import settings
else:
settings_module = os.environ.get('PYSOA_SETTINGS_MODULE', None)
assert settings_module, 'PYSOA_SETTINGS_MODULE environment variable must be set to run tests.'
try:
settings = importlib.import_module(settings_module)
except ImportError:
raise AssertionError('Could not import PYSOA_SETTINGS_MODULE: {}'.format(settings_module))
try:
soa_settings = settings.SOA_SERVER_SETTINGS
except AttributeError:
try:
soa_settings = settings.settings
except AttributeError:
raise AssertionError('Could not access settings.SOA_SERVER_SETTINGS or settings.settings')
return soa_settings
@pytest.fixture(scope='module')
def service_client_settings(server_class, server_settings):
"""Config passed to the service client on instantiation"""
return {
server_class.service_name: {
'transport': {
'path': 'pysoa.common.transport.local:LocalClientTransport',
'kwargs': {
'server_class': server_class,
'server_settings': server_settings,
},
},
},
}
@pytest.fixture(scope='module')
def service_client_class(server_class):
"""
Override the service client being used to test to automatically inject the service name for
your testing convenience.
"""
from pysoa.client import Client # inline so as not to mess up coverage
class _TestClient(Client):
def call_action(self, action, body=None, service_name=None, **kwargs):
service_name = service_name or server_class.service_name
return super(_TestClient, self).call_action(service_name, action, body=body, **kwargs)
return _TestClient
@pytest.fixture(scope='module')
def service_client(service_client_class, service_client_settings):
"""
Instantiate the service client class with the requisite config. Service doing the testing should define
the server_class fixture.
"""
return service_client_class(service_client_settings)
@pytest.fixture
def action_stubber():
"""
Equivalent of the pytest `mocker` fixture for stub_action, with similar motivations and behavior.
Allows a test to stub actions without having to manually clean up after the test.
See https://github.com/pytest-dev/pytest-mock for more info
"""
from pysoa.test.stub_service import stub_action # inline so as not to mess up coverage
stubbies = []
def _do_stub(*args, **kwargs):
stubby = stub_action(*args, **kwargs)
stubbies.append(stubby)
return stubby.__enter__()
yield _do_stub
for stub in stubbies[::-1]:
stub.__exit__()
|
nilq/baby-python
|
python
|
#! /usr/bin/env python3
from setuptools import find_packages
from setuptools import setup
def parse_requirements(filename):
"""Given a filename, strip empty lines and those beginning with #."""
with open(filename) as rfd:
output = []
for line in rfd:
line = line.strip()
if line != '' and not line.startswith('#'):
output.append(line)
return output
setup(
name='FollowerAnalyzer',
version='0.1',
author='Sravan Bhamidipati',
packages=find_packages(),
install_requires=parse_requirements('requirements.txt'),
description="Application to analyze a Twitter user's followers.",
long_description='\n' + open('README.md').read(),
)
|
nilq/baby-python
|
python
|
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.params import Params
from allennlp.training.learning_rate_schedulers import (
LearningRateScheduler,
CombinedLearningRateScheduler,
PolynomialDecay,
)
from allennlp.training.optimizers import Optimizer
class TestCombinedLRScheduler(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.model = torch.nn.Sequential(torch.nn.Linear(10, 10))
self.optimizer = Optimizer.from_params(
model_parameters=self.model.named_parameters(),
params=Params({"type": "sgd", "lr": 1.0}),
)
def get_scheduler(self) -> LearningRateScheduler:
return LearningRateScheduler.from_params(
Params(
{
"type": "combined",
"schedulers": [
[
2,
{
"type": "polynomial_decay",
"warmup_steps": 10,
"end_learning_rate": 0.5,
},
],
[
5,
{
"type": "polynomial_decay",
"warmup_steps": 0,
"end_learning_rate": 0.1,
},
],
],
}
),
optimizer=self.optimizer,
num_steps_per_epoch=10,
)
def test_partial_schedule(self):
scheduler = self.get_scheduler()
assert isinstance(scheduler, CombinedLearningRateScheduler)
assert isinstance(scheduler._current_scheduler, PolynomialDecay)
# This should be 0 because the PolynomialDecay scheduler initializes the LR to 0.
assert self.optimizer.param_groups[0]["lr"] == 0.0
epoch_end_lrs = []
for epoch in range(10):
if epoch > 6:
assert scheduler._current_scheduler is None
elif epoch >= 2:
assert scheduler._current_scheduler is not None
assert scheduler._current_scheduler.total_steps == 50
assert scheduler._current_scheduler.base_values[0] == 0.5
else:
assert scheduler._current_scheduler is not None
assert scheduler._current_scheduler.total_steps == 20
assert scheduler._current_scheduler.base_values[0] == 1.0
for step in range(10):
scheduler.step_batch()
scheduler.step()
epoch_end_lrs.append(self.optimizer.param_groups[0]["lr"])
assert epoch_end_lrs[0] == 1.0
assert epoch_end_lrs[1] == 0.5
assert epoch_end_lrs[6] == 0.1
assert epoch_end_lrs[6] == 0.1
def test_load_from_checkpoint(self):
scheduler = self.get_scheduler()
for epoch in range(3):
for step in range(10):
scheduler.step_batch()
scheduler.step()
assert scheduler.last_epoch == 2
assert scheduler._current_scheduler is not None
assert scheduler._current_scheduler.total_steps == 50
assert scheduler._current_scheduler.base_values[0] == 0.5
state_dict = scheduler.state_dict()
new_scheduler = self.get_scheduler()
new_scheduler.load_state_dict(state_dict)
assert new_scheduler.last_epoch == 2
assert new_scheduler._current_scheduler is not None
assert new_scheduler._current_scheduler.total_steps == 50
assert new_scheduler._current_scheduler.base_values[0] == 0.5, state_dict
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
for i in range(122, 96, -1):
if i % 2 == 0:
print("{:s}".format(chr(i)), end="")
else:
print("{:s}".format(chr(i-32)), end="")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
## @package palette.main
#
# Main funcion.
# @author tody
# @date 2015/08/20
from palette.datasets.google_image import createDatasets
from palette.results.single_image import signleImageResults
from palette.results.multi_images import multiImagesResults
if __name__ == '__main__':
data_names = ["tulip", "flower"]
num_images = 5
data_ids = range(3)
createDatasets(data_names, num_images, update=False)
signleImageResults(data_names, data_ids)
multiImagesResults(data_names, data_ids)
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
class RNN(nn.Module):
def __init__(self, config):
"""
type_rnn: RNN, GRU, LSTM 可选
"""
super(RNN, self).__init__()
# self.xxx = config.xxx
self.input_size = config.input_size
self.hidden_size = config.hidden_size // 2 if config.bidirectional else config.hidden_size
self.num_layers = config.num_layers
self.dropout = config.dropout
self.bidirectional = config.bidirectional
self.last_layer_hn = config.last_layer_hn
self.type_rnn = config.type_rnn
rnn = eval(f'nn.{self.type_rnn}')
self.rnn = rnn(input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
dropout=self.dropout,
bidirectional=self.bidirectional,
bias=True,
batch_first=True)
def forward(self, x, x_len):
"""
Args:
torch.Tensor [batch_size, seq_max_length, input_size], [B, L, H_in] 一般是经过embedding后的值
x_len: torch.Tensor [L] 已经排好序的句长值
Returns:
output: torch.Tensor [B, L, H_out] 序列标注的使用结果
hn: torch.Tensor [B, N, H_out] / [B, H_out] 分类的结果,当 last_layer_hn 时只有最后一层结果
"""
B, L, _ = x.size()
H, N = self.hidden_size, self.num_layers
x_len = x_len.cpu()
x = pack_padded_sequence(x, x_len, batch_first=True, enforce_sorted=True)
output, hn = self.rnn(x)
output, _ = pad_packed_sequence(output, batch_first=True, total_length=L)
if self.type_rnn == 'LSTM':
hn = hn[0]
if self.bidirectional:
hn = hn.view(N, 2, B, H).transpose(1, 2).contiguous().view(N, B, 2 * H).transpose(0, 1)
else:
hn = hn.transpose(0, 1)
if self.last_layer_hn:
hn = hn[:, -1, :]
return output, hn
|
nilq/baby-python
|
python
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or '28#oN^^VfhcxV7x8H32yGOGIk2wLY%OFi!!V'
### email configs,https://pythonhosted.org/Flask-Mail/
###
MAIL_SERVER = os.environ.get('MAIL_SERVER')
MAIL_PORT = int(os.environ.get('MAIL_PORT', '25'))
MAIL_USE_SSL = False
# enable transport layer security security ,TLS
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS', 'true').lower() in ['true', 'on', '1']
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
MAIL_SUBJECT_PREFIX_JH = '[HJiahu]'
MAIL_SENDER_JH = os.environ.get('MAIL_SENDER_JH') # 发信者需要与所连接的smtp服务器用户名同
# 这里是管理员所使用的邮箱,原始的 flasky 使用的变量名是 FLASKY_ADMIN
MAIL_ADMIN_EMAIL_JH = os.environ.get('MAIL_ADMIN_EMAIL_JH','hjiahu@outlook.com')
SQLALCHEMY_TRACK_MODIFICATIONS = False
FLASKY_POSTS_PER_PAGE = 20
FLASKY_FOLLOWERS_PER_PAGE = 50
FLASKY_COMMENTS_PER_PAGE = 30
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
MAIL_SERVER = 'smtp.163.com'
MAIL_USERNAME = 'jiahuhenan'
MAIL_PASSWORD = 'jiahu123'
MAIL_SENDER_JH = 'jiahuhenan@163.com'
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite://'
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
|
nilq/baby-python
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Push notifications tests."""
__author__ = 'afshar@google.com (Ali Afshar)'
import unittest
from apiclient import push
from apiclient import model
from apiclient import http
from test_discovery import assertUrisEqual
class ClientTokenGeneratorTest(unittest.TestCase):
def test_next(self):
t = push.new_token()
self.assertTrue(t)
class ChannelTest(unittest.TestCase):
def test_creation_noargs(self):
c = push.Channel(channel_type='my_channel_type', channel_args={})
self.assertEqual('my_channel_type', c.channel_type)
self.assertEqual({}, c.channel_args)
def test_creation_args(self):
c = push.Channel(channel_type='my_channel_type',
channel_args={'a': 'b'})
self.assertEqual('my_channel_type', c.channel_type)
self.assertEqual({'a':'b'}, c.channel_args)
def test_as_header_value_noargs(self):
c = push.Channel(channel_type='my_channel_type', channel_args={})
self.assertEqual('my_channel_type?', c.as_header_value())
def test_as_header_value_args(self):
c = push.Channel(channel_type='my_channel_type',
channel_args={'a': 'b'})
self.assertEqual('my_channel_type?a=b', c.as_header_value())
def test_as_header_value_args_space(self):
c = push.Channel(channel_type='my_channel_type',
channel_args={'a': 'b c'})
self.assertEqual('my_channel_type?a=b+c', c.as_header_value())
def test_as_header_value_args_escape(self):
c = push.Channel(channel_type='my_channel_type',
channel_args={'a': 'b%c'})
self.assertEqual('my_channel_type?a=b%25c', c.as_header_value())
def test_write_header_noargs(self):
c = push.Channel(channel_type='my_channel_type', channel_args={})
headers = {}
c.write_header(headers)
self.assertEqual('my_channel_type?', headers['X-GOOG-SUBSCRIBE'])
def test_write_header_args(self):
c = push.Channel(channel_type='my_channel_type',
channel_args={'a': 'b'})
headers = {}
c.write_header(headers)
self.assertEqual('my_channel_type?a=b', headers['X-GOOG-SUBSCRIBE'])
def test_write_header_args_space(self):
c = push.Channel(channel_type='my_channel_type',
channel_args={'a': 'b c'})
headers = {}
c.write_header(headers)
self.assertEqual('my_channel_type?a=b+c', headers['X-GOOG-SUBSCRIBE'])
def test_write_header_args_escape(self):
c = push.Channel(channel_type='my_channel_type',
channel_args={'a': 'b%c'})
headers = {}
c.write_header(headers)
self.assertEqual('my_channel_type?a=b%25c', headers['X-GOOG-SUBSCRIBE'])
class WebhookChannelTest(unittest.TestCase):
def test_creation_no_appengine(self):
c = push.WebhookChannel('http://example.org')
assertUrisEqual(self,
'web_hook?url=http%3A%2F%2Fexample.org&app_engine=false',
c.as_header_value())
def test_creation_appengine(self):
c = push.WebhookChannel('http://example.org', app_engine=True)
assertUrisEqual(self,
'web_hook?url=http%3A%2F%2Fexample.org&app_engine=true',
c.as_header_value())
class HeadersTest(unittest.TestCase):
def test_creation(self):
h = push.Headers()
self.assertEqual('', h[push.SUBSCRIBE])
def test_items(self):
h = push.Headers()
h[push.SUBSCRIBE] = 'my_channel_type'
self.assertEqual([(push.SUBSCRIBE, 'my_channel_type')], list(h.items()))
def test_items_non_whitelisted(self):
h = push.Headers()
def set_bad_header(h=h):
h['X-Banana'] = 'my_channel_type'
self.assertRaises(ValueError, set_bad_header)
def test_read(self):
h = push.Headers()
h.read({'x-goog-subscribe': 'my_channel_type'})
self.assertEqual([(push.SUBSCRIBE, 'my_channel_type')], list(h.items()))
def test_read_non_whitelisted(self):
h = push.Headers()
h.read({'X-Banana': 'my_channel_type'})
self.assertEqual([], list(h.items()))
def test_write(self):
h = push.Headers()
h[push.SUBSCRIBE] = 'my_channel_type'
headers = {}
h.write(headers)
self.assertEqual({'x-goog-subscribe': 'my_channel_type'}, headers)
class SubscriptionTest(unittest.TestCase):
def test_create(self):
s = push.Subscription()
self.assertEqual('', s.client_token)
def test_create_for_channnel(self):
c = push.WebhookChannel('http://example.org')
s = push.Subscription.for_channel(c)
self.assertTrue(s.client_token)
assertUrisEqual(self,
'web_hook?url=http%3A%2F%2Fexample.org&app_engine=false',
s.subscribe)
def test_create_for_channel_client_token(self):
c = push.WebhookChannel('http://example.org')
s = push.Subscription.for_channel(c, client_token='my_token')
self.assertEqual('my_token', s.client_token)
assertUrisEqual(self,
'web_hook?url=http%3A%2F%2Fexample.org&app_engine=false',
s.subscribe)
def test_subscribe(self):
s = push.Subscription()
s.headers[push.SUBSCRIBE] = 'my_header'
self.assertEqual('my_header', s.subscribe)
def test_subscription_id(self):
s = push.Subscription()
s.headers[push.SUBSCRIPTION_ID] = 'my_header'
self.assertEqual('my_header', s.subscription_id)
def test_subscription_id_set(self):
c = push.WebhookChannel('http://example.org')
s = push.Subscription.for_channel(c)
self.assertTrue(s.subscription_id)
def test_topic_id(self):
s = push.Subscription()
s.headers[push.TOPIC_ID] = 'my_header'
self.assertEqual('my_header', s.topic_id)
def test_topic_uri(self):
s = push.Subscription()
s.headers[push.TOPIC_URI] = 'my_header'
self.assertEqual('my_header', s.topic_uri)
def test_client_token(self):
s = push.Subscription()
s.headers[push.CLIENT_TOKEN] = 'my_header'
self.assertEqual('my_header', s.client_token)
def test_event_type(self):
s = push.Subscription()
s.headers[push.EVENT_TYPE] = 'my_header'
self.assertEqual('my_header', s.event_type)
def test_unsubscribe(self):
s = push.Subscription()
s.headers[push.UNSUBSCRIBE] = 'my_header'
self.assertEqual('my_header', s.unsubscribe)
def test_do_subscribe(self):
m = model.JsonModel()
request = http.HttpRequest(
None,
m.response,
'https://www.googleapis.com/someapi/v1/collection/?foo=bar',
method='GET',
body='{}',
headers={'content-type': 'application/json'})
h = http.HttpMockSequence([
({'status': 200,
'X-Goog-Subscription-ID': 'my_subscription'},
'{}')])
c = push.Channel('my_channel', {})
s = push.Subscription.for_request(request, c)
request.execute(http=h)
self.assertEqual('my_subscription', s.subscription_id)
def test_subscribe_with_token(self):
m = model.JsonModel()
request = http.HttpRequest(
None,
m.response,
'https://www.googleapis.com/someapi/v1/collection/?foo=bar',
method='GET',
body='{}',
headers={'content-type': 'application/json'})
h = http.HttpMockSequence([
({'status': 200,
'X-Goog-Subscription-ID': 'my_subscription'},
'{}')])
c = push.Channel('my_channel', {})
s = push.Subscription.for_request(request, c, client_token='my_token')
request.execute(http=h)
self.assertEqual('my_subscription', s.subscription_id)
self.assertEqual('my_token', s.client_token)
def test_verify_good_token(self):
s = push.Subscription()
s.headers['X-Goog-Client-Token'] = '123'
notification_headers = {'x-goog-client-token': '123'}
self.assertTrue(s.verify(notification_headers))
def test_verify_bad_token(self):
s = push.Subscription()
s.headers['X-Goog-Client-Token'] = '321'
notification_headers = {'x-goog-client-token': '123'}
self.assertFalse(s.verify(notification_headers))
def test_request_is_post(self):
m = model.JsonModel()
request = http.HttpRequest(
None,
m.response,
'https://www.googleapis.com/someapi/v1/collection/?foo=bar',
method='GET',
body='{}',
headers={'content-type': 'application/json'})
c = push.Channel('my_channel', {})
push.Subscription.for_request(request, c)
self.assertEqual('POST', request.method)
def test_non_get_error(self):
m = model.JsonModel()
request = http.HttpRequest(
None,
m.response,
'https://www.googleapis.com/someapi/v1/collection/?foo=bar',
method='POST',
body='{}',
headers={'content-type': 'application/json'})
c = push.Channel('my_channel', {})
self.assertRaises(push.InvalidSubscriptionRequestError,
push.Subscription.for_request, request, c)
|
nilq/baby-python
|
python
|
import os
import sys
sys.path.append(
os.path.normpath(
os.path.join(os.path.abspath(__file__), "..", "..", "..", "common")
)
)
from env_indigo import *
indigo = Indigo()
indigo.setOption("ignore-stereochemistry-errors", True)
indigo.setOption("molfile-saving-skip-date", True)
def testSerializeIsotopes():
mol = indigo.loadMolecule(
"[H][12C]1=[8C]([2H])[10C]([3H])=C([2H])[14C]([3H])=[13C]1[1H]"
)
mol2 = indigo.unserialize(mol.serialize())
print(mol2.smiles())
if indigo.exactMatch(mol, mol2) is None:
print("NOT MATCHED!")
def testSerializeIsotopes2():
mol = indigo.loadMolecule("C")
for n in range(1, 300):
mol.getAtom(0).setIsotope(n)
try:
mol2 = indigo.unserialize(mol.serialize())
if indigo.exactMatch(mol, mol2) is None:
print("NOT MATCHED! " + n)
except IndigoException as e:
print("caught " + getIndigoExceptionText(e))
break
testSerializeIsotopes()
testSerializeIsotopes2()
|
nilq/baby-python
|
python
|
import arrow
import dateutil
import requests
COUNTRY_CODE = 'RO'
def fetch_RO():
url = 'http://www.transelectrica.ro/sen-filter'
data = {}
for item in requests.get(url).json():
d = list(item.iteritems())[0]
data[d[0]] = d[1]
obj = {
'countryCode': COUNTRY_CODE,
'datetime': arrow.get(data['row1_HARTASEN_DATA'], "YY/M/D H:mm:ss").replace(
tzinfo=dateutil.tz.gettz('Europe/Bucharest')).datetime
}
obj['consumption'] = {
'unknown': float(data['CONS'])
}
# According to http://www.transelectrica.ro/widget/web/tel/sen-harta/-/harta_WAR_SENOperareHartaportlet
# BALT and UCRS (for Baltic and Ukraine South) are categorized under Bulgary on transelectrica website. We did the same here.
obj['exchange'] = {
'BG': float(data.get('VARN', 0)) + float(data.get('DOBR', 0)) + float(data.get('KOZL1', 0)) + float(data.get('KOZL2', 0)) + float(data.get('BALT', 0)) + float(data.get('UCRS', 0)),
'HU': float(data.get('SAND', 0)) + float(data.get('BEKE1', 0)) + float(data.get('BEKE2', 0)),
'MD': float(data.get('COSE', 0)) + float(data.get('UNGE', 0)) + float(data.get('CIOA', 0)) + float(data.get('GOTE', 0)),
'RS': float(data.get('DJER', 0)) + float(data.get('PAN1', 0)) + float(data.get('PAN2', 0)) + float(data.get('KUSJ', 0)) + float(data.get('SIP_', 0)) + float(data.get('KIKI', 0)),
'UA': float(data.get('VULC', 0)) + float(data.get('MUKA', 0)) + float(data.get('COD1', 0))
}
obj['production'] = {
'biomass': float(data['BMASA']),
'coal': float(data['CARB']),
'gas': float(data['GAZE']),
'hydro': float(data['APE']),
'nuclear': float(data['NUCL']),
'solar': float(data['FOTO']),
'wind': float(data['EOLIAN'])
}
return obj
if __name__ == '__main__':
print fetch_RO()
|
nilq/baby-python
|
python
|
from claf.config.factory.data_reader import DataReaderFactory
from claf.config.factory.data_loader import DataLoaderFactory
from claf.config.factory.model import ModelFactory
from claf.config.factory.optimizer import OptimizerFactory
from claf.config.factory.tokens import TokenMakersFactory
__all__ = [
"DataReaderFactory",
"DataLoaderFactory",
"ModelFactory",
"OptimizerFactory",
"TokenMakersFactory",
]
|
nilq/baby-python
|
python
|
import os
import shutil
from datetime import datetime
from os.path import dirname, join
import torch
class Logger():
def __init__(self, para):
self.para = para
now = datetime.now() if 'time' not in vars(para) else para.time
now = now.strftime("%Y_%m_%d_%H_%M_%S")
mark = para.model + '_' + para.dataset
file_path = join(para.save_dir, now + '_' + mark, 'log.txt')
self.save_dir = dirname(file_path)
self.check_dir(file_path)
self.logger = open(file_path, 'a+')
# variable register
self.register_dict = {}
# tensorboard
def record_para(self):
self('recording parameters ...')
for key, value in vars(self.para).items():
self('{}: {}'.format(key, value), timestamp=False)
def check_dir(self, file_path):
dir = dirname(file_path)
os.makedirs(dir, exist_ok=True)
def __call__(self, *args, verbose=True, prefix='', timestamp=True):
if timestamp:
now = datetime.now()
now = now.strftime("%Y/%m/%d, %H:%M:%S - ")
else:
now = ''
info = prefix + now
for msg in args:
if not isinstance(msg, str):
msg = str(msg)
info += msg + '\n'
self.logger.write(info)
if verbose:
print(info, end='')
self.logger.flush()
def __del__(self):
self.logger.close()
# register values for each epoch, such as loss, PSNR etc.
def register(self, name, epoch, value):
if name in self.register_dict:
self.register_dict[name][epoch] = value
if value > self.register_dict[name]['max']:
self.register_dict[name]['max'] = value
if value < self.register_dict[name]['min']:
self.register_dict[name]['min'] = value
else:
self.register_dict[name] = {}
self.register_dict[name][epoch] = value
self.register_dict[name]['max'] = value
self.register_dict[name]['min'] = value
def report(self, items, state, epoch):
# items - [['MSE', 'min'], ['PSNR', 'max'] ... ]
msg = '[{}] '.format(state.lower())
state = '_' + state.lower()
for i in range(len(items)):
item, best = items[i]
msg += '{} : {:.4f} (best {:.4f})'.format(
item,
self.register_dict[item + state][epoch],
self.register_dict[item + state][best]
)
if i < len(items) - 1:
msg += ', '
self(msg, timestamp=False)
def is_best(self, epoch):
item = self.register_dict[self.para.loss + '_valid']
return item[epoch] == item['min']
def save(self, state, filename='checkpoint.pth.tar'):
path = join(self.save_dir, filename)
torch.save(state, path)
if self.is_best(state['epoch']):
copy_path = join(self.save_dir, 'model_best.pth.tar')
shutil.copy(path, copy_path)
|
nilq/baby-python
|
python
|
###############################################################################
# Exceptions
###############################################################################
class UnexpectedCharacter(Exception):
def __init__(self, char, idx, matcher):
super().__init__(
'Expected {} at position {} but got {}'.format(
getattr(matcher, '__name__', matcher), idx, char)
)
###############################################################################
# Constants
###############################################################################
PERIOD = b'.'
NEGATIVE_SIGN = b'-'
# Define the Parser.container_value_context_stack values.
ARRAY_VALUE_CONTEXT = 'ARRAY_VALUE_CONTEXT'
OBJECT_VALUE_CONTEXT = 'OBJECT_VALUE_CONTEXT'
###############################################################################
# Matchers
#
# Matchers are character strings or predicate functions that are used to both
# test whether a character is as expected and serve as an indicator as to which
# class a character belongs.
###############################################################################
class Matchers:
OBJECT_OPEN = b'{'
ARRAY_OPEN = b'['
STRING_START = b'"'
STRING_TERMINATOR = b'"'
NULL_START = b'n'
TRUE_START = b't'
FALSE_START = b'f'
IS_NUMBER_START = lambda c: c == NEGATIVE_SIGN or c.isdigit()
OBJECT_CLOSE = b'}'
ARRAY_CLOSE = b']'
KV_SEP = b':'
ITEM_SEP = b','
EOF = b''
# Set derived matchers.
# Create separate scalar / object / array matchers that use the same logic but
# exist as uniquely identifiable values.
def IS_OBJECT_KEY_START(c):
return c == Matchers.STRING_START
Matchers.IS_OBJECT_KEY_START = IS_OBJECT_KEY_START
def IS_VALUE_START(c):
return (
c == Matchers.OBJECT_OPEN
or c == Matchers.ARRAY_OPEN
or c == Matchers.STRING_START
or Matchers.IS_NUMBER_START(c)
or c == Matchers.NULL_START
or c == Matchers.TRUE_START
or c == Matchers.FALSE_START
)
Matchers.IS_VALUE_START = IS_VALUE_START
def IS_ARRAY_VALUE_START(c):
return IS_VALUE_START(c)
Matchers.IS_ARRAY_VALUE_START = IS_ARRAY_VALUE_START
def IS_OBJECT_VALUE_START(c):
return IS_VALUE_START(c)
Matchers.IS_OBJECT_VALUE_START = IS_OBJECT_VALUE_START
def IS_ARRAY_ITEM_SEP(c):
return c == Matchers.ITEM_SEP
Matchers.IS_ARRAY_ITEM_SEP = IS_ARRAY_ITEM_SEP
def IS_OBJECT_ITEM_SEP(c):
return c == Matchers.ITEM_SEP
Matchers.IS_OBJECT_ITEM_SEP = IS_OBJECT_ITEM_SEP
###############################################################################
# Events
#
# Events represent things that we expect to encounter, and want to act in
# response to, while parsing a JSON string.
###############################################################################
class Events:
ARRAY_CLOSE = 'ARRAY_CLOSE'
ARRAY_ITEM_SEP = 'ARRAY_ITEM_SEP'
ARRAY_OPEN = 'ARRAY_OPEN'
ARRAY_VALUE_FALSE = 'ARRAY_VALUE_FALSE'
ARRAY_VALUE_NULL = 'ARRAY_VALUE_NULL'
ARRAY_VALUE_NUMBER = 'ARRAY_VALUE_NUMBER'
ARRAY_VALUE_STRING = 'ARRAY_VALUE_STRING'
ARRAY_VALUE_TRUE = 'ARRAY_VALUE_TRUE'
EOF = 'END_OF_FILE'
FALSE = 'FALSE'
KV_SEP = 'KV_SEP'
NULL = 'NULL'
NUMBER = 'NUMBER'
OBJECT_CLOSE = 'OBJECT_CLOSE'
OBJECT_ITEM_SEP = 'OBJECT_ITEM_SEP'
OBJECT_KEY = 'OBJECT_KEY'
OBJECT_OPEN = 'OBJECT_OPEN'
OBJECT_VALUE_FALSE = 'OBJECT_VALUE_FALSE'
OBJECT_VALUE_NULL = 'OBJECT_VALUE_NULL'
OBJECT_VALUE_NUMBER = 'OBJECT_VALUE_NUMBER'
OBJECT_VALUE_STRING = 'OBJECT_VALUE_STRING'
OBJECT_VALUE_TRUE = 'OBJECT_VALUE_TRUE'
STRING = 'STRING'
TRUE = 'TRUE'
###############################################################################
# Helpers
###############################################################################
is_digit = lambda c: c.isdigit()
###############################################################################
# Parser
###############################################################################
class Parser:
def __init__(self, stream, encoding='utf-8'):
self.stream = stream
self.encoding = encoding
# Store the current stream char number for reporting the position of
# unexpected characters.
self.char_num = 0
# Store a place to stuff a character that we read from the stream but
# need to put back for the next read. next_char() will pop this value
# before reading again from the stream, thus providing a sort of 1-byte
# lookahead mechanism.
self.stuffed_char = None
# Define a stack to store the Matcher that we expect to match the next
# character from next_nonspace_char(). A single matcher element is
# considered to be manadatory and parsing will fail if the matcher
# fails. A 2-element tuple can be provided with the first element as an
# optional matcher and the second as a mandatory:
# i.e. ( <optional-match>, <mandatory-matcher> )
self.expect_stack = [ Matchers.EOF, Matchers.IS_VALUE_START ]
# Define a stack for storing the context of the current container-type
# (i.e. object value or array value) that we're currently parsing. This
# is used in order to yield the appropriate event on array/object
# close.
self.container_value_context_stack = []
def next_char(self):
# If there's a stuffed nonspace char, return that and do not increment
# char_num.
if self.stuffed_char is not None:
c = self.stuffed_char
self.stuffed_char = None
return c
# Return the next byte from the stream and increment char_num.
c = self.stream.read(1)
self.char_num += 1
return c
def next_nonspace_char(self):
# Advance the stream past the next non-whitespace character and return
# the character, or Matchers.EOF if the stream has been exhausted.
while True:
c = self.next_char()
if c == Matchers.EOF:
return Matchers.EOF
if not c.isspace():
return c
def stuff_char(self, c):
# Assert that stuffed_char is empty and write the character to it.
if self.stuffed_char is not None:
raise AssertionError
self.stuffed_char = c
def expect(self, matcher):
# Assert that the next non-whitespace charater is as expected and
# return both the character and the matcher that matched it.
c = self.next_nonspace_char()
# The expect_stack contains elements that are either a single matcher
# or a tuple of matches in the format:
# ( <optional-matcher>, <mandatory-matcher> )
# Iterate through all tuple-type matchers.
while isinstance(matcher, tuple):
optional, matcher = matcher
# If the matcher is callable, call it, otherwise test against the
# byte literal.
if (callable(optional) and optional(c)) or c == optional:
# An optional matcher matched, so push the mandatory one back
# onto the expect_stack.
self.expect_stack.append(matcher)
# Return the character and matched optional matcher.
return c, optional
# Either no optional matches were specified or none matched, so attempt
# to match against the mandatory matcher.
if (callable(matcher) and matcher(c)) or c == matcher:
# Return the character and matched mandatory matcher.
return c, matcher
# The mandatory matcher failed, so raise UnexpectedCharacter.
raise UnexpectedCharacter(c, self.char_num, matcher)
def yield_while(self, pred):
# Yield characters from the stream until testing them against the
# specified predicate function returns False.
while True:
# Read the next character from the stream.
c = self.next_char()
# Check whether the character satisfies the predicate.
if not pred(c):
# The predicate has not been satisfied so stuff the last-read
# character back and return.
self.stuff_char(c)
return
# Yield the character.
yield c
def parse_string(self):
# Yield characters from the stream up until the next string terminator
# (i.e. '"') character.
while True:
c = self.next_char()
if c == Matchers.STRING_TERMINATOR:
return
# Disallow control characters.
if c[0] <= 0x1f:
raise UnexpectedCharacter(c, self.char_num, 'NOT_CONTROL_CHAR')
yield c
def parse_number(self):
# Yield characters from the stream up until the next non-number char.
# Expect the first character to be a negative sign or digit.
yield self.expect(lambda c: c == NEGATIVE_SIGN or c.isdigit())[0]
# Expect one or more digits.
yield from self.yield_while(is_digit)
# Check to see if the next char is a decimal point.
c = self.next_char()
if c != PERIOD:
# Not a decimal point so stuff it back and return.
self.stuff_char(c)
return
# It is a decimal point.
yield c
# Expect the next character to be a digit.
yield self.expect(is_digit)[0]
# Yield any remaining digits.
yield from self.yield_while(is_digit)
def parse(self):
# Start parsing self.stream.
while True:
# Get the next event.
event, value_gen, expect = self.next_event()
# If event is EOF, we've reached the end of the stream.
if event is Events.EOF:
return
# Yield the event and any value generator.
yield event, value_gen
# If a value generator hasn't been fully consumed, drain it.
if value_gen is not None:
for _ in value_gen:
pass
# If next_event() returned something to expect next, push it.
if expect is not None:
self.expect_stack.append(expect)
def next_event(self):
"""Attempt to match the next stream character to what's on the top of
the expect stack and return a tuple in the format:
( <event>, <value-generator-or-None>, <expected-next-or-None> )
"""
# Call expect() with the next item from the expect_stack.
c, matcher = self.expect(self.expect_stack.pop())
if matcher == Matchers.EOF:
# Char is an empty string which indicates that the input stream has
# been exhausted.
return Events.EOF, None, None
if c == Matchers.ARRAY_OPEN:
# Char is an array initiator (i.e. '[').
# If the context is array or object, push the appropriate value
# onto the container_value_context_stack.
if matcher == Matchers.IS_ARRAY_VALUE_START:
self.container_value_context_stack.append(ARRAY_VALUE_CONTEXT)
elif matcher == Matchers.IS_OBJECT_VALUE_START:
self.container_value_context_stack.append(OBJECT_VALUE_CONTEXT)
# Expect an array terminator (which is already on the stack) to
# follow.
return (
Events.ARRAY_OPEN,
None,
(Matchers.IS_ARRAY_VALUE_START, Matchers.ARRAY_CLOSE)
)
if c == Matchers.OBJECT_OPEN:
# Char is an object initiator (i.e. '{')
# If the context is array or object, push the appropriate value
# onto the container_value_context_stack.
if matcher == Matchers.IS_ARRAY_VALUE_START:
self.container_value_context_stack.append(ARRAY_VALUE_CONTEXT)
elif matcher == Matchers.IS_OBJECT_VALUE_START:
self.container_value_context_stack.append(OBJECT_VALUE_CONTEXT)
# Expect an object key, item separator, or object terminator (which
# is already on the stack) to follow.
return (
Events.OBJECT_OPEN,
None,
(Matchers.IS_OBJECT_KEY_START, Matchers.OBJECT_CLOSE)
)
if matcher == Matchers.ARRAY_CLOSE:
# Char is an array terminator (i.e. ']')
# If container_value_context_stack is non-empty, pop the last
# context and expect whatever's appropriate to follow.
expect = None
if self.container_value_context_stack:
context = self.container_value_context_stack.pop()
item_sep_matcher = (
Matchers.IS_ARRAY_ITEM_SEP
if context == ARRAY_VALUE_CONTEXT
else Matchers.IS_OBJECT_ITEM_SEP
)
expect = (item_sep_matcher, self.expect_stack.pop())
return Events.ARRAY_CLOSE, None, expect
if matcher == Matchers.OBJECT_CLOSE:
# Char is an object terminator (i.e. '}').
# If container_value_context_stack is non-empty, pop the last
# context and expect whatever's appropriate to follow.
expect = None
if self.container_value_context_stack:
context = self.container_value_context_stack.pop()
item_sep_matcher = (
Matchers.IS_ARRAY_ITEM_SEP
if context == ARRAY_VALUE_CONTEXT
else Matchers.IS_OBJECT_ITEM_SEP
)
expect = (item_sep_matcher, self.expect_stack.pop())
return Events.OBJECT_CLOSE, None, expect
if matcher == Matchers.IS_OBJECT_KEY_START:
# Char is the expected object key's opening double-qoute.
# Expect a object key/value separator (i.e. ':') to follow.
return Events.OBJECT_KEY, self.parse_string(), Matchers.KV_SEP
if matcher == Matchers.KV_SEP:
# Char is an object key / value separator (i.e. ':')
# Expect an object value (e.g. string, number, null) to follow.
return Events.KV_SEP, None, Matchers.IS_OBJECT_VALUE_START
if matcher == Matchers.IS_OBJECT_ITEM_SEP:
# Char is an item separator (i.e. ',') in a post-object-value
# context. Expect an object key or object terminator (which is
# already on the stack) to follow.
return (
Events.OBJECT_ITEM_SEP,
None,
(Matchers.IS_OBJECT_KEY_START, self.expect_stack.pop())
)
if matcher == Matchers.IS_ARRAY_ITEM_SEP:
# Char is an item separator (i.e. ',') in a post-array-value
# context. Expect an array value, item separator (thus accomodating
# unlimited trailing commas), or array terminator to follow.
return (
Events.ARRAY_ITEM_SEP,
None,
(Matchers.IS_ARRAY_VALUE_START, self.expect_stack.pop())
)
if c == Matchers.STRING_START:
# Char is a string initiator (i.e. '"')
# Return the event along with a string value parser/generator.
if matcher == Matchers.IS_OBJECT_VALUE_START:
event = Events.OBJECT_VALUE_STRING
# Maybe expect an object item separator next.
expect = Matchers.IS_OBJECT_ITEM_SEP, self.expect_stack.pop()
elif matcher == Matchers.IS_ARRAY_VALUE_START:
event = Events.ARRAY_VALUE_STRING
# Maybe expect an array item separator next.
expect = Matchers.IS_ARRAY_ITEM_SEP, self.expect_stack.pop()
else:
event = Events.STRING
expect = None
return event, self.parse_string(), expect
if Matchers.IS_NUMBER_START(c):
# Char is a number initiator (i.e. '-' or a digit)
# Return the event along with a number value parser/generator.
if matcher == Matchers.IS_OBJECT_VALUE_START:
event = Events.OBJECT_VALUE_NUMBER
# Maybe expect an object item separator next.
expect = Matchers.IS_OBJECT_ITEM_SEP, self.expect_stack.pop()
elif matcher == Matchers.IS_ARRAY_VALUE_START:
event = Events.ARRAY_VALUE_NUMBER
# Maybe expect an array item separator next.
expect = Matchers.IS_ARRAY_ITEM_SEP, self.expect_stack.pop()
else:
event = Events.NUMBER
expect = None
# parse_number() is going to need this first character, so stuff it
# back in.
self.stuff_char(c)
return event, self.parse_number(), expect
if c == Matchers.NULL_START:
# Char is a null initiator (i.e. 'n'), expect the remaining chars.
self.expect(b'u')
self.expect(b'l')
self.expect(b'l')
if matcher == Matchers.IS_OBJECT_VALUE_START:
event = Events.OBJECT_VALUE_NULL
# Maybe expect an object item separator next.
expect = Matchers.IS_OBJECT_ITEM_SEP, self.expect_stack.pop()
elif matcher == Matchers.IS_ARRAY_VALUE_START:
event = Events.ARRAY_VALUE_NULL
# Maybe expect an array item separator next.
expect = Matchers.IS_ARRAY_ITEM_SEP, self.expect_stack.pop()
else:
event = Events.NULL
expect = None
return event, None, expect
if c == Matchers.TRUE_START:
# Char is a true initiator (i.e. 't'), expect the remaining chars.
self.expect(b'r')
self.expect(b'u')
self.expect(b'e')
if matcher == Matchers.IS_OBJECT_VALUE_START:
event = Events.OBJECT_VALUE_TRUE
# Maybe expect an object item separator next.
expect = Matchers.IS_OBJECT_ITEM_SEP, self.expect_stack.pop()
elif matcher == Matchers.IS_ARRAY_VALUE_START:
event = Events.ARRAY_VALUE_TRUE
# Maybe expect an array item separator next.
expect = Matchers.IS_ARRAY_ITEM_SEP, self.expect_stack.pop()
else:
event = Events.TRUE
expect = None
return event, None, expect
if c == Matchers.FALSE_START:
# Char is a false initiator (i.e. 'f'), expect the remaining chars.
self.expect(b'a')
self.expect(b'l')
self.expect(b's')
self.expect(b'e')
if matcher == Matchers.IS_OBJECT_VALUE_START:
event = Events.OBJECT_VALUE_FALSE
# Maybe expect an object item separator next.
expect = Matchers.IS_OBJECT_ITEM_SEP, self.expect_stack.pop()
elif matcher == Matchers.IS_ARRAY_VALUE_START:
event = Events.ARRAY_VALUE_FALSE
# Maybe expect an array item separator next.
expect = Matchers.IS_ARRAY_ITEM_SEP, self.expect_stack.pop()
else:
event = Events.FALSE
expect = None
return event, None, expect
# Something went wrong :shrug:
raise AssertionError(c, matcher)
def convert(self, event, value):
# Convert a parsed value to a Python type.
if (event == Events.ARRAY_VALUE_NULL
or event == Events.OBJECT_VALUE_NULL
or event == Events.NULL):
return None
if (event == Events.ARRAY_VALUE_TRUE
or event == Events.OBJECT_VALUE_TRUE
or event == Events.TRUE):
return True
if (event == Events.ARRAY_VALUE_FALSE
or event == Events.OBJECT_VALUE_FALSE
or event == Events.FALSE):
return False
if (event == Events.ARRAY_VALUE_STRING
or event == Events.OBJECT_VALUE_STRING
or event == Events.STRING
or event == Events.OBJECT_KEY):
return b''.join(value).decode(self.encoding)
if (event == Events.ARRAY_VALUE_NUMBER
or event == Events.OBJECT_VALUE_NUMBER
or event == Events.NUMBER):
s = b''.join(value)
# Cast to either float or int based on presence of a decimal place.
return float(s) if PERIOD in s else int(s)
raise NotImplementedError(event, value)
def yield_paths(self, paths):
# Yield ( <path>, <value-generator> ) tuples for all specified paths
# that exist in the data.
#
# paths must be an iterable of lists of byte strings and integers in
# the format:
# [ '<object-key>', <array-index>, ... ]
# Example:
# [ 'people', 0, 'first_name' ]
#
# Track the indexes of the paths in paths to be yielded so that we can
# abort as soon as all requested paths have been yielded.
unyielded_path_idxs = set(range(len(paths)))
# Define the current path stack.
path = []
parse_gen = self.parse()
for event, value in parse_gen:
if event == Events.OBJECT_OPEN:
# An object has opened.
# If the current path node is an array index, increment it.
if path and isinstance(path[-1], int):
path[-1] += 1
# For each unyielded path, attempt to match it against the
# current path. If it matches, yield the event and remove the
# path index from the unyielded set.
yielded = False
for i in unyielded_path_idxs:
if path == paths[i]:
# Reset the parser state such that the next call will
# re-yield this same OBJECT_OPEN to make load() work.
yield path, self.load(parse_gen)
unyielded_path_idxs.remove(i)
yielded = True
break
if not yielded:
# If this container was not already load()ed and yielded,
# Append an empty object indicator to the current path, to
# be overwritten by the next parsed key.
path.append(PERIOD)
continue
elif event == Events.OBJECT_CLOSE:
# The object has closed.
# Pop it from the current path.
path.pop()
continue
elif event == Events.ARRAY_OPEN:
# An array has opened.
# If the current path node is an array index, increment it.
if path and isinstance(path[-1], int):
path[-1] += 1
# For each unyielded path, attempt to match it against the
# current path. If it matches, yield the event and remove the
# path index from the unyielded set.
yielded = False
for i in unyielded_path_idxs:
if path == paths[i]:
# Reset the parser state such that the next call will
# re-yield this same ARRAY_OPEN to make load() work.
yield path, self.load(parse_gen)
unyielded_path_idxs.remove(i)
yielded = True
break
if not yielded:
# If this container was not already load()ed and yielded,
# Append an array index of -1 to the current path, to be
# increment on the next parsed array value.
path.append(-1)
continue
elif event == Events.ARRAY_CLOSE:
# The array has closed.
# Pop it from the current path.
path.pop()
continue
elif event == Events.OBJECT_KEY:
# We parsed an object key.
# Overwrite the current path node with the key value.
path[-1] = self.convert(Events.OBJECT_KEY, value)
elif (event == Events.ARRAY_VALUE_STRING
or event == Events.ARRAY_VALUE_NUMBER
or event == Events.ARRAY_VALUE_NULL
or event == Events.ARRAY_VALUE_TRUE
or event == Events.ARRAY_VALUE_FALSE):
# We parsed an array value.
# Increment the current path node array index.
path[-1] += 1
# For each unyielded path, attempt to match it against the
# current path. If it matches, yield the event and remove the
# path index from the unyielded set.
for i in unyielded_path_idxs:
if path == paths[i]:
yield path, self.convert(event, value)
unyielded_path_idxs.remove(i)
break
elif (event == Events.OBJECT_VALUE_STRING
or event == Events.OBJECT_VALUE_NUMBER
or event == Events.OBJECT_VALUE_NULL
or event == Events.OBJECT_VALUE_TRUE
or event == Events.OBJECT_VALUE_FALSE):
# We parsed an object value.
# For each unyielded path, attempt to match it against the
# current path. If it matches, yield the event and remove the
# path index from the unyielded set.
for i in unyielded_path_idxs:
if path == paths[i]:
yield path, self.convert(event, value)
unyielded_path_idxs.remove(i)
break
# Abort if all of the requested paths have been yielded.
if len(unyielded_path_idxs) == 0:
return
def load(self, parse_gen=None):
# If parse_gen is specified, parse the single next value in the stream,
# otherwise parse the entire stream, and return a single Python object,
# similar to the built-in json.load() / json.loads() behavior.
if parse_gen is None:
parse_gen = self.parse()
# Initialize the value based on the first read.
event, value = next(parse_gen)
# If it's a single scalar value, convert and return it.
if (event == Events.STRING
or event == Events.NUMBER
or event == Events.NULL
or event == Events.TRUE
or event == Events.FALSE):
return self.convert(event, value)
# Create an initial, root object to represent the initial container.
if (event == Events.OBJECT_OPEN or event == Events.OBJECT_KEY):
root = {}
elif (event == Events.ARRAY_OPEN or event.startswith('ARRAY_VALUE_')):
root = []
else:
raise NotImplementedError(event)
# Create a stack to store the hierarchy of open container objects.
container_stack = []
# Define the current container object. Building the final object will
# entail in-place mutation of whatever object 'container' points to.
container = root
# Define a place to store the last-parsed object key.
key = None
def open_container(_container):
nonlocal container
# Attach the new container to the one that's currently open.
if type(container) is list:
container.append(_container)
else:
container[key] = _container
# Push the currently-open container onto the stack.
container_stack.append(container)
# Set the new container as the current.
container = _container
def close_container():
# Close the current container object and reopen the last one.
nonlocal container
container = container_stack.pop()
# If we're already in the context of an array or object item, use
# it to init the container state.
if event.startswith('ARRAY_VALUE_'):
container.append(self.convert(event, value))
elif event == Events.OBJECT_KEY:
key = self.convert(event, value)
# Start parsing.
for event, value in parse_gen:
if event == Events.ARRAY_OPEN:
# An array just opened so open a new list container.
open_container([])
elif event == Events.OBJECT_OPEN:
# An array just opened so open a new object container.
open_container({})
elif event == Events.ARRAY_CLOSE or event == Events.OBJECT_CLOSE:
# The current array or object container just closed.
# If there are no open containers, stop parsing.
if len(container_stack) == 0:
break
# Close the current container and reopen the last one.
close_container()
elif (event == Events.ARRAY_VALUE_STRING
or event == Events.ARRAY_VALUE_NUMBER
or event == Events.ARRAY_VALUE_NULL
or event == Events.ARRAY_VALUE_TRUE
or event == Events.ARRAY_VALUE_FALSE):
# We just parsed an array value.
# Append it to the current list container.
container.append(self.convert(event, value))
elif event == Events.OBJECT_KEY:
# We just parsed an object key. Record it.
key = self.convert(event, value)
elif (event == Events.OBJECT_VALUE_STRING
or event == Events.OBJECT_VALUE_NUMBER
or event == Events.OBJECT_VALUE_NULL
or event == Events.OBJECT_VALUE_TRUE
or event == Events.OBJECT_VALUE_FALSE):
# We just parsed an object value.
# Use the last-parsed object key to create an item in the
# current object container.
container[key] = self.convert(event, value)
# Return the mutated root object.
return root
###############################################################################
# CLI
###############################################################################
def convert_dot_path_to_yield_path(path):
# Convert the dot-delimited --path argument to a path list required by
# Parser.yield_paths().
final_path = []
i = 0
splits = [int(seg) if seg.isdigit() else seg for seg in path.split('.')]
splits_len = len(splits)
while i < splits_len:
seg = splits[i]
if seg != '':
final_path.append(seg)
else:
# An empty seg indicates the presence of a double-dot which is used
# to indicate an escaped segment value dot.
if final_path:
final_path[-1] += '.' + splits[i + 1]
else:
final_path.append('.' + splits[i + 1])
i += 1
i += 1
return final_path
def convert_yielded_key_to_dot_path(key):
return '.'.join(str(seg) if isinstance(seg, int) else seg for seg in key)
if __name__ == '__main__':
import argparse
from io import BytesIO
from json import dumps
arg_parser = argparse.ArgumentParser()
g = arg_parser.add_mutually_exclusive_group()
g.add_argument('--file', type=argparse.FileType('rb'))
g.add_argument('--string', type=str)
arg_parser.add_argument('--action', choices=('load', 'parse'),
default="load")
arg_parser.add_argument('--path', type=str, action='append',
help='Dot-delimited path specifier with dots in '\
'keys escaped as a double-dot')
args = arg_parser.parse_args()
if args.string:
args.file = BytesIO(args.string.encode('utf-8'))
if args.path and args.action != 'load':
arg_parser.error('Please specify --action=load when using --path')
parser = Parser(args.file)
if args.action == 'load':
if not args.path:
# Load it all and pretty-print the result.
print(dumps(parser.load(), indent=2))
else:
# Load only the specified paths.
result = {}
# Assert that no path is the prefix of another, indicating both
# a container and sub sub-object which won't work because the
# container itself will be read/consumed before the sub-object
# ever has a chance.
num_paths = len(args.path)
for a in args.path:
for b in args.path:
if a == b:
continue
if b.startswith(a) and b[len(a)] == '.':
arg_parser.error(
'Specifying container sub-elements ({}) and the '\
'container itself ({}) is not supported.'
.format(b, a)
)
# Convert the dot-delimited paths to path segments lists as
# required by Parser.yield_paths().
paths = list(map(convert_dot_path_to_yield_path, args.path))
for key, value in parser.yield_paths(paths):
# Convert the yielded key back to a dot path.
key = convert_yielded_key_to_dot_path(key)
result[key] = value
# Print the result as JSON.
print(dumps(result, indent=2))
elif args.action == 'parse':
for event, value in parser.parse():
if value is not None:
value = parser.convert(event, value)
print(event, value)
|
nilq/baby-python
|
python
|
from ex112_1.utilidadescev import moeda, dados
preco = dados.leiadinheiro('Digite o preço: ')
moeda.resumo(preco, 90, 35)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
"""
Filters for masking stationary or near stationary data based on vessel speed
"""
def mask_stationary(Sv, speed, threshold):
"""
Mask stationary or near stationary data based on vessel speed
Args:
Sv (float): 2D numpy array with Sv data to be masked (dB)
speed (float): 1D numpy array with vessel speed data (knots)
threshold (int): speed below which Sv data will be masked (knots)
Returns:
bool: 2D numpy array mask (stationary = True)
float: 2D numpy array with Sv data masked with NAN values
"""
print('TODO')
# TODO: need to implement distance and speed retrieval in PyEchoLab
# which seems not to be working yet?
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.