content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import numpy as np
import pandas as pd
import xarray as xr
import cubepy
from pyplan_engine.classes.evaluators.BaseEvaluator import BaseEvaluator
from pyplan_engine.classes.evaluators.CubepyEvaluator import CubepyEvaluator
from pyplan_engine.classes.evaluators.IPythonEvaluator import IPythonEvaluator
from pyplan_engine.classes.evaluators.NumpyEvaluator import NumpyEvaluator
from pyplan_engine.classes.evaluators.PandasEvaluator import PandasEvaluator
from pyplan_engine.classes.evaluators.XArrayEvaluator import XArrayEvaluator
from pyplan_engine.classes.evaluators.BokehEvaluator import BokehEvaluator
from pyplan_engine.classes.evaluators.PlotlyEvaluator import PlotlyEvaluator
from pyplan_engine.classes.evaluators.MatplotlibEvaluator import MatplotlibEvaluator
from pyplan_engine.classes.XHelpers import XIndex
from bokeh.plotting import Figure
from bokeh.layouts import LayoutDOM
from plotly.graph_objs._figure import Figure as PlotlyFigue
import inspect
from matplotlib.artist import Artist as MatplotlibArtist
class Evaluator(object):
ipytonMethods = ["_repr_html_", "_repr_json_",
"_repr_jpeg_", "_repr_png_", "_repr_pretty_"]
@staticmethod
def createInstance(result):
if result is None:
return BaseEvaluator()
else:
if isinstance(result, pd.DataFrame) or isinstance(result, pd.Series) or isinstance(result, pd.Index):
return PandasEvaluator()
elif isinstance(result, xr.DataArray) or isinstance(result, XIndex):
return XArrayEvaluator()
elif isinstance(result, MatplotlibArtist) or inspect.ismodule(result) and "matplotlib.pyplot" in str(result) or isinstance(result, np.ndarray) and len(result) > 0 and isinstance(result.item(0), MatplotlibArtist):
return MatplotlibEvaluator()
elif isinstance(result, np.ndarray):
return NumpyEvaluator()
elif isinstance(result, Figure) or isinstance(result, LayoutDOM):
return BokehEvaluator()
elif isinstance(result, PlotlyFigue):
return PlotlyEvaluator()
elif isinstance(result, cubepy.Cube) or isinstance(result, cubepy.Index):
return CubepyEvaluator()
else:
_dir = dir(result)
if len(list(set(_dir) & set(Evaluator.ipytonMethods))) > 0:
return IPythonEvaluator()
else:
return BaseEvaluator()
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
# $Id: test_class.py 5174 2007-05-31 00:01:52Z wiemann $
# Author: Lea Wiemann <LeWiemann@gmail.com>
# Copyright: This module has been placed in the public domain.
"""
Tests for the 'class' directive.
"""
from __init__ import DocutilsTestSupport
def suite():
s = DocutilsTestSupport.ParserTestSuite()
s.generateTests(totest)
return s
totest = {}
totest['class'] = [
["""\
.. class:: class1 class2
""",
"""\
<document source="test data">
<pending>
.. internal attributes:
.transform: docutils.transforms.misc.ClassAttribute
.details:
class: ['class1', 'class2']
directive: 'class'
"""],
["""\
.. class:: class1 class2
The classes are applied to this paragraph.
And this one.
""",
"""\
<document source="test data">
<paragraph classes="class1 class2">
The classes are applied to this paragraph.
<paragraph classes="class1 class2">
And this one.
"""],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
|
nilq/baby-python
|
python
|
# coding:utf-8
# 将图片以小分片的形式裁剪下来
import tkFileDialog
import cv2
from configurationInjection import configInjection
import os
config = configInjection()
config.loadConfiguration()
rois = config.rois[0]
flag = True
videopath = tkFileDialog.askopenfilename(initialdir="/home/zb/myfile/cutSave")
count = 0
cap = cv2.VideoCapture(videopath)
while flag:
flag, img = cap.read()
basePath = "/home/zb/myfile/cutSave/1-1"
framesBasePath = "/home/zb/myfile/cutSave/1-1/frames7"
if not os.path.exists(basePath):
os.mkdir(basePath)
if not os.path.exists(framesBasePath):
os.mkdir(framesBasePath)
bigSlice = img[360:1080, 480:1440]
# cv2.imwrite(basePath + "/" + str(count) + ".jpg", bigSlice)
for roi in rois:
slice = img[roi[2]:roi[3], roi[0]:roi[1]]
count += 1
cv2.imwrite(framesBasePath + "/" + str(count) + ".jpg", slice)
cap.release()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""form base classes for aloha-editor integration"""
import floppyforms as forms
from djaloha.widgets import AlohaInput
from django.utils.encoding import smart_unicode
class DjalohaForm(forms.Form):
"""Base class for form with aloha editor"""
def __init__(self, model_class, lookup, field_name, data=None, field_value=None, *args, **kwargs):
super(DjalohaForm, self).__init__(data, *args, **kwargs)
self._model_class = model_class
self._lookup = lookup
self._field_name = field_name
model_name = "__".join(
(model_class.__module__.split('.')[-2], model_class.__name__)
)
lookup_str = "__".join([k + "__" + unicode(v).strip('"\'') for (k, v) in lookup.items()])
self._form_field = "__".join(
("djaloha", model_name, lookup_str, field_name)
)
self.fields[self._form_field] = forms.CharField(
required=False,
initial=field_value,
widget=AlohaInput()
)
def save(self):
"""save associated object"""
value = smart_unicode(self.cleaned_data[self._form_field])
obj = self._model_class.objects.get_or_create(**self._lookup)[0]
setattr(obj, self._field_name, value)
obj.save()
def as_is(self):
"""return html without parent tag"""
return self._html_output(
normal_row=u'%(field)s',
error_row=u'%s',
row_ender='',
help_text_html=u'',
errors_on_separate_row=True
)
|
nilq/baby-python
|
python
|
"""Replays
RocketLeagueReplays API module
"""
import requests
BASE_URL = 'https://www.rocketleaguereplays.com/api/replays/?page='
def get_replays(page_num):
"""
Requests a page of replay data from the RocketLeagueReplaysAPI
:param page_num: Page number to request
:return: list of matches returned
"""
url = f'{BASE_URL}{page_num}'
result = requests.get(url).json()
matches = result['results']
return matches
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import urwid
PALETTE = [
('red', 'dark red', ''),
('selectedred', 'dark red', 'yellow'),
('selected', '', 'yellow'),
]
# Modify these as needed
SIZES = {'small': (4, 3), 'medium': (6, 4), 'large': (8, 6)}
card_size = 'large'
class BaseCardWidget(urwid.WidgetWrap):
def __init__(self, *args, **kw):
self.card_columns, self.card_rows = SIZES[card_size]
super(BaseCardWidget, self).__init__(*args, **kw)
self.redraw()
def redraw(self):
self.text.set_text(self._draw_card_text())
def _draw_card_text(self):
raise NotImplementedError
class SpacerWidget(BaseCardWidget):
def __init__(self, **kw):
self.text = urwid.Text('', wrap='clip')
super(SpacerWidget, self).__init__(self.text)
def _draw_card_text(self):
# The decrement of rows is to account for expanding space in
# the vertical direction
return [u' '* self.card_columns +'\n'] * (self.card_rows-1)
class EmptyCardWidget(BaseCardWidget):
def __init__(self, onclick=None, **kw):
self.onclick = onclick
self.text = urwid.Text('', wrap='clip')
super(EmptyCardWidget, self).__init__(self.text)
def _draw_card_text(self):
return [
u'╭' + u'─' * (self.card_columns-2) + u'╮\n'
+ (self.card_rows-2) * (u'│'+ ' ' * (self.card_columns-2) + u'│\n')
+ u'╰' + u'─' * (self.card_columns-2) + u'╯\n'
]
def selectable(self):
return bool(self.onclick)
def mouse_event(self, size, event, button, col, row, focus):
if event == 'mouse press':
if self.onclick:
self.onclick(self)
def iter_widgets(self):
return iter([])
class CardWidget(BaseCardWidget):
def __init__(self, card, row_index, col_index, onclick=None):
self._card = card
self.row_index = row_index
self.col_index = col_index
self.text = urwid.Text('', wrap='clip')
self.highlighted = False
self.onclick = onclick
super(CardWidget, self).__init__(self.text)
def __repr__(self):
return '{}(card={!r}, highlighted={!r}, ...)'.format(
self.__class__.__name__, self.card, self.highlighted,
)
def mouse_event(self, size, event, button, col, row, focus):
if event == 'mouse press':
if self.onclick:
self.onclick(self)
def _draw_card_text(self):
columns, rows = self.card_columns, self.card_rows
style = 'selected' if self.highlighted else ''
redornot = 'red' if self.card.suit in ('hearts', 'diamonds') else ''
if self.highlighted:
redornot = 'selected' + redornot
if not self.face_up:
face_down_middle_filling = (columns-2) * u'╬'
filling = [u'│', (style, face_down_middle_filling), u'│\n'] * (rows-2)
else:
rank, suit = (self.card.rank, self.card.suit_symbol)
spaces = (columns-5) * ' '
filling = [u'│', (redornot, u'{}{}{}'.format(rank.ljust(2), spaces, suit)), u'│\n']
filling += (
[u'│', (style, u' ' * (columns-2)), u'│\n'] * (rows-4) +
[u'│', (redornot, u'{}{}{}'.format(suit, spaces,rank.rjust(2))), u'│\n']
)
top = u'╭'+ u'─' * (columns-2) + u'╮\n'
text = [top] + filling
text += [u'╰' + u'─' * (columns-2) + u'╯\n']
if isinstance(text[-1], tuple):
text[-1] = text[-1][0], text[-1][1].strip()
else:
text[-1] = text[-1].strip()
return text
@property
def card(self):
return self._card
@card.setter
def card(self, card):
self._card = card
self.redraw()
@property
def face_up(self):
return self.card.face_up
@face_up.setter
def face_up(self, val):
self.card.face_up = bool(val)
self.redraw()
|
nilq/baby-python
|
python
|
# coding: utf-8
"""
ELEMENTS API
The version of the OpenAPI document: 2
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from elements_sdk.configuration import Configuration
class TapeFile(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'int',
'tape': 'Tape',
'path': 'str',
'search_highlight': 'str',
'uid': 'int',
'name': 'str',
'is_dir': 'bool',
'read_only': 'bool',
'length': 'int',
'checksum': 'str',
'fullpath': 'str',
'parent': 'int'
}
attribute_map = {
'id': 'id',
'tape': 'tape',
'path': 'path',
'search_highlight': 'search_highlight',
'uid': 'uid',
'name': 'name',
'is_dir': 'is_dir',
'read_only': 'read_only',
'length': 'length',
'checksum': 'checksum',
'fullpath': 'fullpath',
'parent': 'parent'
}
def __init__(self, id=None, tape=None, path=None, search_highlight=None, uid=None, name=None, is_dir=None, read_only=None, length=None, checksum=None, fullpath=None, parent=None, local_vars_configuration=None): # noqa: E501
"""TapeFile - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._tape = None
self._path = None
self._search_highlight = None
self._uid = None
self._name = None
self._is_dir = None
self._read_only = None
self._length = None
self._checksum = None
self._fullpath = None
self._parent = None
self.discriminator = None
if id is not None:
self.id = id
if tape is not None:
self.tape = tape
self.path = path
if search_highlight is not None:
self.search_highlight = search_highlight
self.uid = uid
self.name = name
if is_dir is not None:
self.is_dir = is_dir
if read_only is not None:
self.read_only = read_only
if length is not None:
self.length = length
self.checksum = checksum
self.fullpath = fullpath
self.parent = parent
@property
def id(self):
"""Gets the id of this TapeFile. # noqa: E501
:return: The id of this TapeFile. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this TapeFile.
:param id: The id of this TapeFile. # noqa: E501
:type: int
"""
self._id = id
@property
def tape(self):
"""Gets the tape of this TapeFile. # noqa: E501
:return: The tape of this TapeFile. # noqa: E501
:rtype: Tape
"""
return self._tape
@tape.setter
def tape(self, tape):
"""Sets the tape of this TapeFile.
:param tape: The tape of this TapeFile. # noqa: E501
:type: Tape
"""
self._tape = tape
@property
def path(self):
"""Gets the path of this TapeFile. # noqa: E501
:return: The path of this TapeFile. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this TapeFile.
:param path: The path of this TapeFile. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and path is None: # noqa: E501
raise ValueError("Invalid value for `path`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
path is not None and len(path) < 1):
raise ValueError("Invalid value for `path`, length must be greater than or equal to `1`") # noqa: E501
self._path = path
@property
def search_highlight(self):
"""Gets the search_highlight of this TapeFile. # noqa: E501
:return: The search_highlight of this TapeFile. # noqa: E501
:rtype: str
"""
return self._search_highlight
@search_highlight.setter
def search_highlight(self, search_highlight):
"""Sets the search_highlight of this TapeFile.
:param search_highlight: The search_highlight of this TapeFile. # noqa: E501
:type: str
"""
self._search_highlight = search_highlight
@property
def uid(self):
"""Gets the uid of this TapeFile. # noqa: E501
:return: The uid of this TapeFile. # noqa: E501
:rtype: int
"""
return self._uid
@uid.setter
def uid(self, uid):
"""Sets the uid of this TapeFile.
:param uid: The uid of this TapeFile. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and uid is None: # noqa: E501
raise ValueError("Invalid value for `uid`, must not be `None`") # noqa: E501
self._uid = uid
@property
def name(self):
"""Gets the name of this TapeFile. # noqa: E501
:return: The name of this TapeFile. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this TapeFile.
:param name: The name of this TapeFile. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
name is not None and len(name) > 255):
raise ValueError("Invalid value for `name`, length must be less than or equal to `255`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
name is not None and len(name) < 1):
raise ValueError("Invalid value for `name`, length must be greater than or equal to `1`") # noqa: E501
self._name = name
@property
def is_dir(self):
"""Gets the is_dir of this TapeFile. # noqa: E501
:return: The is_dir of this TapeFile. # noqa: E501
:rtype: bool
"""
return self._is_dir
@is_dir.setter
def is_dir(self, is_dir):
"""Sets the is_dir of this TapeFile.
:param is_dir: The is_dir of this TapeFile. # noqa: E501
:type: bool
"""
self._is_dir = is_dir
@property
def read_only(self):
"""Gets the read_only of this TapeFile. # noqa: E501
:return: The read_only of this TapeFile. # noqa: E501
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""Sets the read_only of this TapeFile.
:param read_only: The read_only of this TapeFile. # noqa: E501
:type: bool
"""
self._read_only = read_only
@property
def length(self):
"""Gets the length of this TapeFile. # noqa: E501
:return: The length of this TapeFile. # noqa: E501
:rtype: int
"""
return self._length
@length.setter
def length(self, length):
"""Sets the length of this TapeFile.
:param length: The length of this TapeFile. # noqa: E501
:type: int
"""
self._length = length
@property
def checksum(self):
"""Gets the checksum of this TapeFile. # noqa: E501
:return: The checksum of this TapeFile. # noqa: E501
:rtype: str
"""
return self._checksum
@checksum.setter
def checksum(self, checksum):
"""Sets the checksum of this TapeFile.
:param checksum: The checksum of this TapeFile. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
checksum is not None and len(checksum) > 255):
raise ValueError("Invalid value for `checksum`, length must be less than or equal to `255`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
checksum is not None and len(checksum) < 1):
raise ValueError("Invalid value for `checksum`, length must be greater than or equal to `1`") # noqa: E501
self._checksum = checksum
@property
def fullpath(self):
"""Gets the fullpath of this TapeFile. # noqa: E501
:return: The fullpath of this TapeFile. # noqa: E501
:rtype: str
"""
return self._fullpath
@fullpath.setter
def fullpath(self, fullpath):
"""Sets the fullpath of this TapeFile.
:param fullpath: The fullpath of this TapeFile. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
fullpath is not None and len(fullpath) > 4095):
raise ValueError("Invalid value for `fullpath`, length must be less than or equal to `4095`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
fullpath is not None and len(fullpath) < 1):
raise ValueError("Invalid value for `fullpath`, length must be greater than or equal to `1`") # noqa: E501
self._fullpath = fullpath
@property
def parent(self):
"""Gets the parent of this TapeFile. # noqa: E501
:return: The parent of this TapeFile. # noqa: E501
:rtype: int
"""
return self._parent
@parent.setter
def parent(self, parent):
"""Sets the parent of this TapeFile.
:param parent: The parent of this TapeFile. # noqa: E501
:type: int
"""
self._parent = parent
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TapeFile):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TapeFile):
return True
return self.to_dict() != other.to_dict()
|
nilq/baby-python
|
python
|
from unittest import mock
from django import forms
from django.test import SimpleTestCase
from django.utils.functional import lazy
from phonenumber_field.formfields import PhoneNumberField
ALGERIAN_PHONE_NUMBER = "+213799136332"
class PhoneNumberFormFieldTest(SimpleTestCase):
def test_error_message(self):
class PhoneNumberForm(forms.Form):
number = PhoneNumberField()
form = PhoneNumberForm({"number": "invalid"})
self.assertIs(form.is_valid(), False)
self.assertEqual(
form.errors, {"number": ["Enter a valid phone number (e.g. +12125552368)."]}
)
def test_override_error_message(self):
class MyPhoneNumberField(PhoneNumberField):
default_error_messages = {"invalid": "MY INVALID MESSAGE!"}
class PhoneNumberForm(forms.Form):
number = MyPhoneNumberField()
form = PhoneNumberForm({"number": "invalid"})
self.assertIs(form.is_valid(), False)
self.assertEqual(form.errors, {"number": ["MY INVALID MESSAGE!"]})
def test_override_error_message_inline(self):
class PhoneNumberForm(forms.Form):
number = PhoneNumberField(
error_messages={"invalid": "MY INLINE INVALID MESSAGE!"}
)
form = PhoneNumberForm({"number": "invalid"})
self.assertIs(form.is_valid(), False)
self.assertEqual(form.errors, {"number": ["MY INLINE INVALID MESSAGE!"]})
def test_algerian_phone_number_in_form(self):
class PhoneNumberForm(forms.Form):
number = PhoneNumberField()
form = PhoneNumberForm({"number": ALGERIAN_PHONE_NUMBER})
self.assertTrue(form.is_valid())
self.assertEqual(ALGERIAN_PHONE_NUMBER, form.cleaned_data["number"])
def test_error_message_lazy(self):
def fail_gettext(msgid):
raise Exception("gettext was called unexpectedly.")
with mock.patch(
"phonenumber_field.formfields._",
side_effect=lazy(fail_gettext, str),
):
PhoneNumberField()
|
nilq/baby-python
|
python
|
from pkg.command.azcli_cmd import AzCliCommand
from pkg.entity._az_cli import AzCli
from pkg.executor._executor import Executor
from pkg.executor.azcli.command._azcli_cmd_executor import AzCliCommandExecutor
class AzCliExecutor(Executor):
def __init__(self):
pass
def run_az_cli(self, cmd: AzCli):
az_cli_command = AzCliCommand(cmd)
az_cli_cmd_executor = AzCliCommandExecutor(az_cli_command)
return az_cli_cmd_executor.execute()
|
nilq/baby-python
|
python
|
from logging import captureWarnings
from operator import inv
from typing import Container, Iterable, Union
import uuid
import time
import math
from datetime import datetime, timedelta, timezone
from unittest import TestCase
from unittest.mock import patch, MagicMock, ANY, call
from botocore.exceptions import ClientError, WaiterError, BotoCoreError
from samcli.commands.deploy.exceptions import (
DeployFailedError,
ChangeSetError,
DeployStackOutPutFailedError,
DeployBucketInDifferentRegionError,
)
from samcli.lib.deploy.deployer import Deployer
from samcli.lib.package.s3_uploader import S3Uploader
from samcli.lib.utils.time import utc_to_timestamp, to_datetime
class MockPaginator:
def __init__(self, resp):
self.resp = resp
def paginate(self, ChangeSetName=None, StackName=None):
return self.resp
class MockChangesetWaiter:
def __init__(self, ex=None):
self.ex = ex
def wait(self, ChangeSetName, StackName, WaiterConfig):
if self.ex:
raise self.ex
return
class MockCreateUpdateWaiter:
def __init__(self, ex=None):
self.ex = ex
def wait(self, StackName, WaiterConfig):
if self.ex:
raise self.ex
return
class CustomTestCase(TestCase):
def assertListSubset(self, l1: Iterable, l2: Union[Iterable, Container], msg=None) -> None:
"""
Assert l2 contains all items in l1.
Just like calling self.assertIn(l1[x], l2) in a loop.
"""
for x in l1:
self.assertIn(x, l2, msg)
class TestDeployer(CustomTestCase):
def setUp(self):
self.session = MagicMock()
self.cloudformation_client = self.session.client("cloudformation")
self.s3_client = self.session.client("s3")
self.deployer = Deployer(self.cloudformation_client)
def test_deployer_init(self):
self.assertEqual(self.deployer._client, self.cloudformation_client)
self.assertEqual(self.deployer.changeset_prefix, "samcli-deploy")
def test_deployer_init_custom_sleep(self):
deployer = Deployer(MagicMock().client("cloudformation"), client_sleep=10)
self.assertEqual(deployer.client_sleep, 10)
def test_deployer_init_custom_sleep_invalid(self):
deployer = Deployer(MagicMock().client("cloudformation"), client_sleep="INVALID")
self.assertEqual(deployer.client_sleep, 0.5) # 0.5 is the default value
def test_deployer_init_custom_sleep_negative(self):
deployer = Deployer(MagicMock().client("cloudformation"), client_sleep=-5)
self.assertEqual(deployer.client_sleep, 0.5) # 0.5 is the default value
def test_deployer_init_custom_sleep_zero(self):
deployer = Deployer(MagicMock().client("cloudformation"), client_sleep=0)
self.assertEqual(deployer.client_sleep, 0.5) # 0.5 is the default value
def test_deployer_init_default_sleep(self):
deployer = Deployer(MagicMock().client("cloudformation"))
self.assertEqual(deployer.client_sleep, 0.5)
def test_deployer_has_no_stack(self):
self.deployer._client.describe_stacks = MagicMock(return_value={"Stacks": []})
self.assertEqual(self.deployer.has_stack("test"), False)
def test_deployer_has_stack_in_review(self):
self.deployer._client.describe_stacks = MagicMock(
return_value={"Stacks": [{"StackStatus": "REVIEW_IN_PROGRESS"}]}
)
self.assertEqual(self.deployer.has_stack("test"), False)
def test_deployer_has_stack_exception_non_exsistent(self):
self.deployer._client.describe_stacks = MagicMock(
side_effect=ClientError(
error_response={"Error": {"Message": "Stack with id test does not exist"}},
operation_name="stack_status",
)
)
self.assertEqual(self.deployer.has_stack("test"), False)
def test_deployer_has_stack_exception(self):
self.deployer._client.describe_stacks = MagicMock(side_effect=Exception())
with self.assertRaises(Exception):
self.deployer.has_stack("test")
def test_deployer_has_stack_exception_botocore(self):
self.deployer._client.describe_stacks = MagicMock(side_effect=BotoCoreError())
with self.assertRaises(DeployFailedError):
self.deployer.has_stack("test")
def test_create_changeset(self):
self.deployer.has_stack = MagicMock(return_value=False)
self.deployer.create_changeset(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
{"ParameterKey": "c", "UsePreviousValue": True},
],
capabilities=["CAPABILITY_IAM"],
role_arn="role-arn",
notification_arns=[],
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
self.assertEqual(self.deployer._client.create_change_set.call_count, 1)
self.deployer._client.create_change_set.assert_called_with(
Capabilities=["CAPABILITY_IAM"],
ChangeSetName=ANY,
ChangeSetType="CREATE",
Description=ANY,
NotificationARNs=[],
Parameters=[{"ParameterKey": "a", "ParameterValue": "b"}],
RoleARN="role-arn",
StackName="test",
Tags={"unit": "true"},
TemplateURL=ANY,
)
def test_update_changeset(self):
self.deployer.has_stack = MagicMock(return_value=True)
self.deployer.create_changeset(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
{"ParameterKey": "c", "UsePreviousValue": True},
],
capabilities=["CAPABILITY_IAM"],
role_arn="role-arn",
notification_arns=[],
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
self.assertEqual(self.deployer._client.create_change_set.call_count, 1)
self.deployer._client.create_change_set.assert_called_with(
Capabilities=["CAPABILITY_IAM"],
ChangeSetName=ANY,
ChangeSetType="UPDATE",
Description=ANY,
NotificationARNs=[],
Parameters=[{"ParameterKey": "a", "ParameterValue": "b"}],
RoleARN="role-arn",
StackName="test",
Tags={"unit": "true"},
TemplateURL=ANY,
)
def test_create_changeset_exception(self):
self.deployer.has_stack = MagicMock(return_value=False)
self.deployer._client.create_change_set = MagicMock(side_effect=Exception)
with self.assertRaises(ChangeSetError):
self.deployer.create_changeset(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
{"ParameterKey": "c", "UsePreviousValue": True},
],
capabilities=["CAPABILITY_IAM"],
role_arn="role-arn",
notification_arns=[],
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
def test_create_changeset_ClientErrorException(self):
error_message = (
"An error occurred (ValidationError) when calling the CreateChangeSet "
"operation: S3 error: The bucket you are attempting to access must be "
"addressed using the specified endpoint. "
"Please send all future requests to this "
"endpoint.\nFor more information "
"check http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html"
)
self.deployer.has_stack = MagicMock(return_value=False)
self.deployer._client.create_change_set = MagicMock(
side_effect=ClientError(
error_response={"Error": {"Message": error_message}}, operation_name="create_changeset"
)
)
with self.assertRaises(DeployBucketInDifferentRegionError):
self.deployer.create_changeset(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
{"ParameterKey": "c", "UsePreviousValue": True},
],
capabilities=["CAPABILITY_IAM"],
role_arn="role-arn",
notification_arns=[],
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
def test_create_changeset_ClientErrorException_generic(self):
self.deployer.has_stack = MagicMock(return_value=False)
self.deployer._client.create_change_set = MagicMock(
side_effect=ClientError(error_response={"Error": {"Message": "Message"}}, operation_name="create_changeset")
)
with self.assertRaises(ChangeSetError):
self.deployer.create_changeset(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
{"ParameterKey": "c", "UsePreviousValue": True},
],
capabilities=["CAPABILITY_IAM"],
role_arn="role-arn",
notification_arns=[],
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
def test_create_changeset_pass_through_optional_arguments_only_if_having_values(self):
self.deployer.has_stack = MagicMock(return_value=False)
# assert that the arguments; Capabilities, RoleARN & NotificationARNs are passed through if having values
self.deployer.create_changeset(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
{"ParameterKey": "c", "UsePreviousValue": True},
],
capabilities=["CAPABILITY_IAM"],
role_arn="role-arn",
notification_arns=[],
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
self.deployer._client.create_change_set.assert_called_with(
Capabilities=["CAPABILITY_IAM"],
RoleARN="role-arn",
NotificationARNs=[],
ChangeSetName=ANY,
ChangeSetType="CREATE",
Description=ANY,
Parameters=[{"ParameterKey": "a", "ParameterValue": "b"}],
StackName="test",
Tags={"unit": "true"},
TemplateURL=ANY,
)
# assert that the arguments; Capabilities, RoleARN & NotificationARNs are not passed through if no values
self.deployer.create_changeset(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
{"ParameterKey": "c", "UsePreviousValue": True},
],
capabilities=None,
role_arn=None,
notification_arns=None,
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
self.deployer._client.create_change_set.assert_called_with(
ChangeSetName=ANY,
ChangeSetType="CREATE",
Description=ANY,
Parameters=[{"ParameterKey": "a", "ParameterValue": "b"}],
StackName="test",
Tags={"unit": "true"},
TemplateURL=ANY,
)
def test_describe_changeset_with_changes(self):
response = [
{
"Changes": [
{"ResourceChange": {"LogicalResourceId": "resource_id1", "ResourceType": "s3", "Action": "Add"}}
]
},
{
"Changes": [
{"ResourceChange": {"LogicalResourceId": "resource_id2", "ResourceType": "kms", "Action": "Add"}}
]
},
{
"Changes": [
{"ResourceChange": {"LogicalResourceId": "resource_id3", "ResourceType": "lambda", "Action": "Add"}}
]
},
]
self.deployer._client.get_paginator = MagicMock(return_value=MockPaginator(resp=response))
changes = self.deployer.describe_changeset("change_id", "test")
self.assertEqual(
changes,
{
"Add": [
{"LogicalResourceId": "resource_id1", "ResourceType": "s3", "Replacement": "N/A"},
{"LogicalResourceId": "resource_id2", "ResourceType": "kms", "Replacement": "N/A"},
{"LogicalResourceId": "resource_id3", "ResourceType": "lambda", "Replacement": "N/A"},
],
"Modify": [],
"Remove": [],
},
)
def test_describe_changeset_with_no_changes(self):
response = [{"Changes": []}]
self.deployer._client.get_paginator = MagicMock(return_value=MockPaginator(resp=response))
changes = self.deployer.describe_changeset("change_id", "test")
self.assertEqual(changes, {"Add": [], "Modify": [], "Remove": []})
def test_wait_for_changeset(self):
self.deployer._client.get_waiter = MagicMock(return_value=MockChangesetWaiter())
self.deployer.wait_for_changeset("test-id", "test-stack")
def test_wait_for_changeset_exception_ChangeEmpty(self):
self.deployer._client.get_waiter = MagicMock(
return_value=MockChangesetWaiter(
ex=WaiterError(
name="wait_for_changeset",
reason="unit-test",
last_response={"Status": "Failed", "StatusReason": "It's a unit test"},
)
)
)
with self.assertRaises(ChangeSetError):
self.deployer.wait_for_changeset("test-id", "test-stack")
def test_execute_changeset(self):
self.deployer.execute_changeset("id", "test", True)
self.deployer._client.execute_change_set.assert_called_with(
ChangeSetName="id", StackName="test", DisableRollback=True
)
def test_execute_changeset_exception(self):
self.deployer._client.execute_change_set = MagicMock(
side_effect=ClientError(error_response={"Error": {"Message": "Error"}}, operation_name="execute_changeset")
)
with self.assertRaises(DeployFailedError):
self.deployer.execute_changeset("id", "test", True)
def test_get_last_event_time(self):
timestamp = datetime.utcnow()
self.deployer._client.describe_stack_events = MagicMock(
return_value={"StackEvents": [{"Timestamp": timestamp}]}
)
self.assertEqual(self.deployer.get_last_event_time("test"), utc_to_timestamp(timestamp))
def test_get_last_event_time_unknown_last_time(self):
current_timestamp = datetime.utcnow()
self.deployer._client.describe_stack_events = MagicMock(side_effect=KeyError)
# Convert to milliseconds from seconds
last_stack_event_timestamp = to_datetime(self.deployer.get_last_event_time("test") * 1000)
self.assertEqual(last_stack_event_timestamp.year, current_timestamp.year)
self.assertEqual(last_stack_event_timestamp.month, current_timestamp.month)
self.assertEqual(last_stack_event_timestamp.day, current_timestamp.day)
self.assertEqual(last_stack_event_timestamp.hour, current_timestamp.hour)
self.assertEqual(last_stack_event_timestamp.minute, current_timestamp.minute)
self.assertEqual(last_stack_event_timestamp.second, current_timestamp.second)
@patch("time.sleep")
@patch("samcli.lib.deploy.deployer.pprint_columns")
def test_describe_stack_events_chronological_order(self, patched_pprint_columns, patched_time):
start_timestamp = datetime(2022, 1, 1, 16, 42, 0, 0, timezone.utc)
self.deployer._client.get_paginator = MagicMock(
return_value=MockPaginator(
# describe_stack_events is in reverse chronological order
[
{
"StackEvents": [
{
"StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
"StackName": "test",
"LogicalResourceId": "test",
"PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"ResourceType": "AWS::CloudFormation::Stack",
"Timestamp": start_timestamp + timedelta(seconds=3),
"ResourceStatus": "CREATE_COMPLETE",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp + timedelta(seconds=2),
"ResourceStatus": "CREATE_COMPLETE",
"ResourceType": "kms",
"LogicalResourceId": "mykms",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp + timedelta(seconds=1),
"ResourceStatus": "CREATE_COMPLETE",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp,
"ResourceStatus": "CREATE_IN_PROGRESS",
"ResourceType": "kms",
"LogicalResourceId": "mykms",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp,
"ResourceStatus": "CREATE_IN_PROGRESS",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
}
]
},
]
)
)
self.deployer.describe_stack_events("test", utc_to_timestamp(start_timestamp) - 1)
self.assertEqual(patched_pprint_columns.call_count, 5)
self.assertListSubset(
["CREATE_IN_PROGRESS", "s3", "mybucket"], patched_pprint_columns.call_args_list[0][1]["columns"]
)
self.assertListSubset(
["CREATE_IN_PROGRESS", "kms", "mykms"], patched_pprint_columns.call_args_list[1][1]["columns"]
)
self.assertListSubset(
["CREATE_COMPLETE", "s3", "mybucket"], patched_pprint_columns.call_args_list[2][1]["columns"]
)
self.assertListSubset(
["CREATE_COMPLETE", "kms", "mykms"], patched_pprint_columns.call_args_list[3][1]["columns"]
)
self.assertListSubset(
["CREATE_COMPLETE", "AWS::CloudFormation::Stack", "test"],
patched_pprint_columns.call_args_list[4][1]["columns"],
)
@patch("time.sleep")
@patch("samcli.lib.deploy.deployer.pprint_columns")
def test_describe_stack_events_chronological_order_with_previous_event(self, patched_pprint_columns, patched_time):
start_timestamp = datetime(2022, 1, 1, 16, 42, 0, 0, timezone.utc)
last_event_timestamp = start_timestamp - timedelta(hours=6)
self.deployer._client.get_paginator = MagicMock(
return_value=MockPaginator(
# describe_stack_events is in reverse chronological order
[
{
"StackEvents": [
{
"StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
"StackName": "test",
"LogicalResourceId": "test",
"PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"ResourceType": "AWS::CloudFormation::Stack",
"Timestamp": start_timestamp + timedelta(seconds=3),
"ResourceStatus": "UPDATE_COMPLETE",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp + timedelta(seconds=2),
"ResourceStatus": "UPDATE_COMPLETE",
"ResourceType": "kms",
"LogicalResourceId": "mykms",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp + timedelta(seconds=1),
"ResourceStatus": "UPDATE_COMPLETE",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp,
"ResourceStatus": "UPDATE_IN_PROGRESS",
"ResourceType": "kms",
"LogicalResourceId": "mykms",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp,
"ResourceStatus": "UPDATE_IN_PROGRESS",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
}
]
},
# Last event (from a former deployment)
{
"StackEvents": [
{
"StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
"StackName": "test",
"LogicalResourceId": "test",
"PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"ResourceType": "AWS::CloudFormation::Stack",
"Timestamp": last_event_timestamp,
"ResourceStatus": "CREATE_COMPLETE",
}
]
},
]
)
)
self.deployer.describe_stack_events("test", utc_to_timestamp(last_event_timestamp))
self.assertEqual(patched_pprint_columns.call_count, 5)
self.assertListSubset(
["UPDATE_IN_PROGRESS", "s3", "mybucket"], patched_pprint_columns.call_args_list[0][1]["columns"]
)
self.assertListSubset(
["UPDATE_IN_PROGRESS", "kms", "mykms"], patched_pprint_columns.call_args_list[1][1]["columns"]
)
self.assertListSubset(
["UPDATE_COMPLETE", "s3", "mybucket"], patched_pprint_columns.call_args_list[2][1]["columns"]
)
self.assertListSubset(
["UPDATE_COMPLETE", "kms", "mykms"], patched_pprint_columns.call_args_list[3][1]["columns"]
)
self.assertListSubset(
["UPDATE_COMPLETE", "AWS::CloudFormation::Stack", "test"],
patched_pprint_columns.call_args_list[4][1]["columns"],
)
@patch("time.sleep")
@patch("samcli.lib.deploy.deployer.pprint_columns")
def test_describe_stack_events_skip_old_event(self, patched_pprint_columns, patched_time):
start_timestamp = datetime(2022, 1, 1, 16, 42, 0, 0, timezone.utc)
last_event_timestamp = start_timestamp - timedelta(hours=6)
sample_events = [
# old deployment
{
"StackEvents": [
{
"StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
"StackName": "test",
"LogicalResourceId": "test",
"PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"ResourceType": "AWS::CloudFormation::Stack",
"Timestamp": last_event_timestamp - timedelta(seconds=10),
"ResourceStatus": "CREATE_IN_PROGRESS",
}
]
},
{
"StackEvents": [
{
"StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
"StackName": "test",
"LogicalResourceId": "test",
"PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"ResourceType": "AWS::CloudFormation::Stack",
"Timestamp": last_event_timestamp,
"ResourceStatus": "CREATE_COMPLETE",
}
]
},
# new deployment
{
"StackEvents": [
{
"StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
"StackName": "test",
"LogicalResourceId": "test",
"PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"ResourceType": "AWS::CloudFormation::Stack",
"Timestamp": start_timestamp,
"ResourceStatus": "UPDATE_IN_PROGRESS",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp + timedelta(seconds=10),
"ResourceStatus": "UPDATE_IN_PROGRESS",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp + timedelta(seconds=20),
"ResourceStatus": "UPDATE_COMPLETE",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
}
]
},
{
"StackEvents": [
{
"StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
"StackName": "test",
"LogicalResourceId": "test",
"PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"ResourceType": "AWS::CloudFormation::Stack",
"Timestamp": start_timestamp + timedelta(seconds=30),
"ResourceStatus": "UPDATE_COMPLETE",
}
]
},
]
invalid_event = {"StackEvents": [{}]} # if deployer() loop read this, KeyError would raise
self.deployer._client.get_paginator = MagicMock(
side_effect=[
MockPaginator([sample_events[0], invalid_event]),
MockPaginator([sample_events[1], sample_events[0], invalid_event]),
MockPaginator([sample_events[2], sample_events[1], invalid_event]),
MockPaginator([sample_events[3], sample_events[2], invalid_event]),
MockPaginator([sample_events[4], sample_events[3], invalid_event]),
MockPaginator([sample_events[5], sample_events[4], invalid_event]),
]
)
self.deployer.describe_stack_events("test", utc_to_timestamp(last_event_timestamp))
self.assertEqual(patched_pprint_columns.call_count, 4)
self.assertListSubset(
["UPDATE_IN_PROGRESS", "AWS::CloudFormation::Stack", "test"],
patched_pprint_columns.call_args_list[0][1]["columns"],
)
self.assertListSubset(
["UPDATE_COMPLETE", "AWS::CloudFormation::Stack", "test"],
patched_pprint_columns.call_args_list[3][1]["columns"],
)
@patch("time.sleep")
@patch("samcli.lib.deploy.deployer.pprint_columns")
def test_describe_stack_events_stop_at_first_not_in_progress(self, patched_pprint_columns, patched_time):
start_timestamp = datetime(2022, 1, 1, 16, 42, 0, 0, timezone.utc)
self.deployer._client.get_paginator = MagicMock(
return_value=MockPaginator(
# describe_stack_events is in reverse chronological order
[
{
"StackEvents": [
{
"StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
"StackName": "test",
"LogicalResourceId": "test",
"PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"ResourceType": "AWS::CloudFormation::Stack",
"Timestamp": start_timestamp + timedelta(seconds=33),
"ResourceStatus": "UPDATE_COMLPETE",
},
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp + timedelta(seconds=32),
"ResourceStatus": "UPDATE_COMPLETE",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
},
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp + timedelta(seconds=31),
"ResourceStatus": "UPDATE_IN_PROGRESS",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
},
]
},
{
"StackEvents": [
{
"StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
"StackName": "test",
"LogicalResourceId": "test",
"PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"ResourceType": "AWS::CloudFormation::Stack",
"Timestamp": start_timestamp + timedelta(seconds=30),
"ResourceStatus": "UPDATE_IN_PROGRESS",
},
{
# This event should stop the loop and ignore above events
"StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
"StackName": "test",
"LogicalResourceId": "test",
"PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"ResourceType": "AWS::CloudFormation::Stack",
"Timestamp": start_timestamp + timedelta(seconds=3),
"ResourceStatus": "CREATE_COMPLETE",
},
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp + timedelta(seconds=1),
"ResourceStatus": "CREATE_COMPLETE",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp,
"ResourceStatus": "CREATE_IN_PROGRESS",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
}
]
},
]
)
)
self.deployer.describe_stack_events("test", utc_to_timestamp(start_timestamp) - 1)
self.assertEqual(patched_pprint_columns.call_count, 3)
self.assertListSubset(
["CREATE_IN_PROGRESS", "s3", "mybucket"], patched_pprint_columns.call_args_list[0][1]["columns"]
)
self.assertListSubset(
["CREATE_COMPLETE", "s3", "mybucket"], patched_pprint_columns.call_args_list[1][1]["columns"]
)
self.assertListSubset(
["CREATE_COMPLETE", "AWS::CloudFormation::Stack", "test"],
patched_pprint_columns.call_args_list[2][1]["columns"],
)
@patch("samcli.lib.deploy.deployer.math")
@patch("time.sleep")
def test_describe_stack_events_exceptions(self, patched_time, patched_math):
self.deployer._client.get_paginator = MagicMock(
side_effect=[
ClientError(
error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
),
ClientError(
error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
),
ClientError(
error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
),
ClientError(
error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
),
]
)
# No exception raised, we return with a log message, this is because,
# the changeset is still getting executed, but displaying them is getting throttled.
self.deployer.describe_stack_events("test", time.time())
self.assertEqual(patched_math.pow.call_count, 3)
self.assertEqual(patched_math.pow.call_args_list, [call(2, 1), call(2, 2), call(2, 3)])
@patch("samcli.lib.deploy.deployer.math")
@patch("time.sleep")
def test_describe_stack_events_resume_after_exceptions(self, patched_time, patched_math):
start_timestamp = datetime(2022, 1, 1, 16, 42, 0, 0, timezone.utc)
self.deployer._client.get_paginator = MagicMock(
side_effect=[
ClientError(
error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
),
ClientError(
error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
),
ClientError(
error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
),
MockPaginator(
[
{
"StackEvents": [
{
"StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
"StackName": "test",
"LogicalResourceId": "test",
"PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"ResourceType": "AWS::CloudFormation::Stack",
"Timestamp": start_timestamp,
"ResourceStatus": "CREATE_COMPLETE",
},
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp,
"ResourceStatus": "CREATE_COMPLETE",
"ResourceType": "kms",
"LogicalResourceId": "mykms",
},
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp,
"ResourceStatus": "CREATE_COMPLETE",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp,
"ResourceStatus": "CREATE_IN_PROGRESS",
"ResourceType": "kms",
"LogicalResourceId": "mykms",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp,
"ResourceStatus": "CREATE_IN_PROGRESS",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
}
]
},
]
),
]
)
self.deployer.describe_stack_events("test", utc_to_timestamp(start_timestamp) - 1)
self.assertEqual(patched_math.pow.call_count, 3)
self.assertEqual(patched_math.pow.call_args_list, [call(2, 1), call(2, 2), call(2, 3)])
@patch("samcli.lib.deploy.deployer.math.pow", wraps=math.pow)
@patch("time.sleep")
def test_describe_stack_events_reset_retry_on_success_after_exceptions(self, patched_time, patched_pow):
start_timestamp = datetime(2022, 1, 1, 16, 42, 0, 0, timezone.utc)
self.deployer._client.get_paginator = MagicMock(
side_effect=[
MockPaginator(
[
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp,
"ResourceStatus": "CREATE_IN_PROGRESS",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
},
]
},
]
),
ClientError(
error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
),
ClientError(
error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
),
MockPaginator(
[
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp + timedelta(seconds=10),
"ResourceStatus": "CREATE_COMPLETE",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
}
]
},
]
),
ClientError(
error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
),
MockPaginator(
[
{
"StackEvents": [
{
"StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
"StackName": "test",
"LogicalResourceId": "test",
"PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"ResourceType": "AWS::CloudFormation::Stack",
"Timestamp": start_timestamp + timedelta(seconds=20),
"ResourceStatus": "CREATE_COMPLETE",
},
]
},
]
),
]
)
self.deployer.describe_stack_events("test", utc_to_timestamp(start_timestamp) - 1)
# There are 2 sleep call for exceptions (backoff + regular one at 0)
self.assertEqual(patched_time.call_count, 9)
self.assertEqual(
patched_time.call_args_list,
[call(0.5), call(0.5), call(2.0), call(0), call(4.0), call(0), call(0.5), call(2.0), call(0)],
)
self.assertEqual(patched_pow.call_count, 3)
self.assertEqual(patched_pow.call_args_list, [call(2, 1), call(2, 2), call(2, 1)])
def test_check_stack_status(self):
self.assertEqual(self.deployer._check_stack_not_in_progress("CREATE_COMPLETE"), True)
self.assertEqual(self.deployer._check_stack_not_in_progress("CREATE_FAILED"), True)
self.assertEqual(self.deployer._check_stack_not_in_progress("CREATE_IN_PROGRESS"), False)
self.assertEqual(self.deployer._check_stack_not_in_progress("DELETE_COMPLETE"), True)
self.assertEqual(self.deployer._check_stack_not_in_progress("DELETE_FAILED"), True)
self.assertEqual(self.deployer._check_stack_not_in_progress("DELETE_IN_PROGRESS"), False)
self.assertEqual(self.deployer._check_stack_not_in_progress("REVIEW_IN_PROGRESS"), False)
self.assertEqual(self.deployer._check_stack_not_in_progress("ROLLBACK_COMPLETE"), True)
self.assertEqual(self.deployer._check_stack_not_in_progress("ROLLBACK_IN_PROGRESS"), False)
self.assertEqual(self.deployer._check_stack_not_in_progress("UPDATE_COMPLETE"), True)
self.assertEqual(self.deployer._check_stack_not_in_progress("UPDATE_COMPLETE_CLEANUP_IN_PROGRESS"), False)
self.assertEqual(self.deployer._check_stack_not_in_progress("UPDATE_IN_PROGRESS"), False)
self.assertEqual(
self.deployer._check_stack_not_in_progress("UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS"), False
)
self.assertEqual(self.deployer._check_stack_not_in_progress("UPDATE_ROLLBACK_FAILED"), True)
self.assertEqual(self.deployer._check_stack_not_in_progress("UPDATE_ROLLBACK_IN_PROGRESS"), False)
@patch("time.sleep")
def test_wait_for_execute(self, patched_time):
self.deployer.describe_stack_events = MagicMock()
self.deployer._client.get_waiter = MagicMock(return_value=MockCreateUpdateWaiter())
self.deployer.wait_for_execute("test", "CREATE", False)
self.deployer.wait_for_execute("test", "UPDATE", True)
with self.assertRaises(RuntimeError):
self.deployer.wait_for_execute("test", "DESTRUCT", False)
self.deployer._client.get_waiter = MagicMock(
return_value=MockCreateUpdateWaiter(
ex=WaiterError(
name="create_changeset",
reason="unit-test",
last_response={"Status": "Failed", "StatusReason": "It's a unit test"},
)
)
)
with self.assertRaises(DeployFailedError):
self.deployer.wait_for_execute("test", "CREATE", False)
def test_create_and_wait_for_changeset(self):
self.deployer.create_changeset = MagicMock(return_value=({"Id": "test"}, "create"))
self.deployer.wait_for_changeset = MagicMock()
self.deployer.describe_changeset = MagicMock()
result = self.deployer.create_and_wait_for_changeset(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
{"ParameterKey": "c", "UsePreviousValue": True},
],
capabilities=["CAPABILITY_IAM"],
role_arn="role-arn",
notification_arns=[],
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
self.assertEqual(result, ({"Id": "test"}, "create"))
def test_create_and_wait_for_changeset_exception(self):
self.deployer.create_changeset = MagicMock(
side_effect=ClientError(
error_response={"Error": {"Message": "Something Wrong"}}, operation_name="create_changeset"
)
)
with self.assertRaises(DeployFailedError):
self.deployer.create_and_wait_for_changeset(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
{"ParameterKey": "c", "UsePreviousValue": True},
],
capabilities=["CAPABILITY_IAM"],
role_arn="role-arn",
notification_arns=[],
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
def test_get_stack_outputs(self):
outputs = {
"Stacks": [
{
"Outputs": [
{"OutputKey": "Key1", "OutputValue": "Value1", "Description": "output for s3"},
{"OutputKey": "Key2", "OutputValue": "Value2", "Description": "output for kms"},
]
}
]
}
self.deployer._client.describe_stacks = MagicMock(return_value=outputs)
self.assertEqual(outputs["Stacks"][0]["Outputs"], self.deployer.get_stack_outputs(stack_name="test"))
self.deployer._client.describe_stacks.assert_called_with(StackName="test")
@patch("samcli.lib.deploy.deployer.pprint_columns")
def test_get_stack_outputs_no_echo(self, mock_pprint_columns):
outputs = {
"Stacks": [
{
"Outputs": [
{"OutputKey": "Key1", "OutputValue": "Value1", "Description": "output for s3"},
{"OutputKey": "Key2", "OutputValue": "Value2", "Description": "output for kms"},
]
}
]
}
self.deployer._client.describe_stacks = MagicMock(return_value=outputs)
self.assertEqual(
outputs["Stacks"][0]["Outputs"], self.deployer.get_stack_outputs(stack_name="test", echo=False)
)
self.deployer._client.describe_stacks.assert_called_with(StackName="test")
self.assertEqual(mock_pprint_columns.call_count, 0)
def test_get_stack_outputs_no_outputs_no_exception(self):
outputs = {"Stacks": [{"SomeOtherKey": "Value"}]}
self.deployer._client.describe_stacks = MagicMock(return_value=outputs)
self.assertEqual(None, self.deployer.get_stack_outputs(stack_name="test"))
self.deployer._client.describe_stacks.assert_called_with(StackName="test")
def test_get_stack_outputs_exception(self):
self.deployer._client.describe_stacks = MagicMock(
side_effect=ClientError(error_response={"Error": {"Message": "Error"}}, operation_name="describe_stacks")
)
with self.assertRaises(DeployStackOutPutFailedError):
self.deployer.get_stack_outputs(stack_name="test")
@patch("time.sleep")
def test_wait_for_execute_no_outputs(self, patched_time):
self.deployer.describe_stack_events = MagicMock()
self.deployer._client.get_waiter = MagicMock(return_value=MockCreateUpdateWaiter())
self.deployer._display_stack_outputs = MagicMock()
self.deployer.get_stack_outputs = MagicMock(return_value=None)
self.deployer.wait_for_execute("test", "CREATE", False)
self.assertEqual(self.deployer._display_stack_outputs.call_count, 0)
@patch("time.sleep")
def test_wait_for_execute_with_outputs(self, patched_time):
self.deployer.describe_stack_events = MagicMock()
outputs = {
"Stacks": [
{
"Outputs": [
{"OutputKey": "Key1", "OutputValue": "Value1", "Description": "output for s3"},
{"OutputKey": "Key2", "OutputValue": "Value2", "Description": "output for kms"},
]
}
]
}
self.deployer._client.get_waiter = MagicMock(return_value=MockCreateUpdateWaiter())
self.deployer._display_stack_outputs = MagicMock()
self.deployer.get_stack_outputs = MagicMock(return_value=outputs["Stacks"][0]["Outputs"])
self.deployer.wait_for_execute("test", "CREATE", False)
self.assertEqual(self.deployer._display_stack_outputs.call_count, 1)
def test_sync_update_stack(self):
self.deployer.has_stack = MagicMock(return_value=True)
self.deployer.wait_for_execute = MagicMock()
self.deployer.sync(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
],
capabilities=["CAPABILITY_IAM"],
role_arn="role-arn",
notification_arns=[],
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
self.assertEqual(self.deployer._client.update_stack.call_count, 1)
self.deployer._client.update_stack.assert_called_with(
Capabilities=["CAPABILITY_IAM"],
NotificationARNs=[],
Parameters=[{"ParameterKey": "a", "ParameterValue": "b"}],
RoleARN="role-arn",
StackName="test",
Tags={"unit": "true"},
TemplateURL=ANY,
)
def test_sync_update_stack_exception(self):
self.deployer.has_stack = MagicMock(return_value=True)
self.deployer.wait_for_execute = MagicMock()
self.deployer._client.update_stack = MagicMock(side_effect=Exception)
with self.assertRaises(DeployFailedError):
self.deployer.sync(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
],
capabilities=["CAPABILITY_IAM"],
role_arn="role-arn",
notification_arns=[],
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
def test_sync_create_stack(self):
self.deployer.has_stack = MagicMock(return_value=False)
self.deployer.wait_for_execute = MagicMock()
self.deployer.sync(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
],
capabilities=["CAPABILITY_IAM"],
role_arn="role-arn",
notification_arns=[],
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
self.assertEqual(self.deployer._client.create_stack.call_count, 1)
self.deployer._client.create_stack.assert_called_with(
Capabilities=["CAPABILITY_IAM"],
NotificationARNs=[],
Parameters=[{"ParameterKey": "a", "ParameterValue": "b"}],
RoleARN="role-arn",
StackName="test",
Tags={"unit": "true"},
TemplateURL=ANY,
)
def test_sync_create_stack_exception(self):
self.deployer.has_stack = MagicMock(return_value=False)
self.deployer.wait_for_execute = MagicMock()
self.deployer._client.create_stack = MagicMock(side_effect=Exception)
with self.assertRaises(DeployFailedError):
self.deployer.sync(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
],
capabilities=["CAPABILITY_IAM"],
role_arn="role-arn",
notification_arns=[],
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
def test_process_kwargs(self):
kwargs = {"Capabilities": []}
capabilities = ["CAPABILITY_IAM"]
role_arn = "role-arn"
notification_arns = ["arn"]
expected = {
"Capabilities": ["CAPABILITY_IAM"],
"RoleARN": "role-arn",
"NotificationARNs": ["arn"],
}
result = self.deployer._process_kwargs(kwargs, None, capabilities, role_arn, notification_arns)
self.assertEqual(expected, result)
|
nilq/baby-python
|
python
|
import datetime
import dateutil.parser
import pytz
from django.conf import settings
from django.db.models import F, Q
from django.http import (
Http404, HttpResponseBadRequest, HttpResponseRedirect, JsonResponse,
)
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils import timezone
from django.utils.http import is_safe_url
from django.utils.six.moves.urllib.parse import quote
from django.views.decorators.http import require_POST
from django.views.generic.base import TemplateResponseMixin
from django.views.generic.detail import DetailView
from django.views.generic.edit import (
CreateView, DeleteView, ModelFormMixin, ProcessFormView, UpdateView,
)
from schedule.forms import EventForm, OccurrenceForm
from schedule.models import Calendar, Event, Occurrence
from schedule.periods import weekday_names
from schedule.settings import (
CHECK_EVENT_PERM_FUNC, CHECK_OCCURRENCE_PERM_FUNC, EVENT_NAME_PLACEHOLDER,
GET_EVENTS_FUNC, OCCURRENCE_CANCEL_REDIRECT, USE_FULLCALENDAR,
)
from schedule.utils import (
check_calendar_permissions, check_event_permissions,
check_occurrence_permissions, coerce_date_dict,
)
class CalendarViewPermissionMixin(object):
@classmethod
def as_view(cls, **initkwargs):
view = super(CalendarViewPermissionMixin, cls).as_view(**initkwargs)
return check_calendar_permissions(view)
class EventEditPermissionMixin(object):
@classmethod
def as_view(cls, **initkwargs):
view = super(EventEditPermissionMixin, cls).as_view(**initkwargs)
return check_event_permissions(view)
class OccurrenceEditPermissionMixin(object):
@classmethod
def as_view(cls, **initkwargs):
view = super(OccurrenceEditPermissionMixin, cls).as_view(**initkwargs)
return check_occurrence_permissions(view)
class CancelButtonMixin(object):
def post(self, request, *args, **kwargs):
next_url = kwargs.get('next')
self.success_url = get_next_url(request, next_url)
if "cancel" in request.POST:
return HttpResponseRedirect(self.success_url)
else:
return super(CancelButtonMixin, self).post(request, *args, **kwargs)
class CalendarMixin(CalendarViewPermissionMixin):
model = Calendar
slug_url_kwarg = 'calendar_slug'
class CalendarView(CalendarMixin, DetailView):
template_name = 'schedule/calendar.html'
class FullCalendarView(CalendarMixin, DetailView):
template_name = "fullcalendar.html"
def get_context_data(self, **kwargs):
context = super(FullCalendarView, self).get_context_data()
context['calendar_slug'] = self.kwargs.get('calendar_slug')
return context
class CalendarByPeriodsView(CalendarMixin, DetailView):
template_name = 'schedule/calendar_by_period.html'
def get_context_data(self, **kwargs):
context = super(CalendarByPeriodsView, self).get_context_data(**kwargs)
calendar = self.object
period_class = self.kwargs['period']
try:
date = coerce_date_dict(self.request.GET)
except ValueError:
raise Http404
if date:
try:
date = datetime.datetime(**date)
except ValueError:
raise Http404
else:
date = timezone.now()
event_list = GET_EVENTS_FUNC(self.request, calendar)
local_timezone = timezone.get_current_timezone()
period = period_class(event_list, date, tzinfo=local_timezone)
context.update({
'date': date,
'period': period,
'calendar': calendar,
'weekday_names': weekday_names,
'here': quote(self.request.get_full_path()),
})
return context
class OccurrenceMixin(CalendarViewPermissionMixin, TemplateResponseMixin):
model = Occurrence
pk_url_kwarg = 'occurrence_id'
form_class = OccurrenceForm
class OccurrenceEditMixin(CancelButtonMixin, OccurrenceEditPermissionMixin, OccurrenceMixin):
def get_initial(self):
initial_data = super(OccurrenceEditMixin, self).get_initial()
_, self.object = get_occurrence(**self.kwargs)
return initial_data
class OccurrenceView(OccurrenceMixin, DetailView):
template_name = 'schedule/occurrence.html'
class OccurrencePreview(OccurrenceMixin, ModelFormMixin, ProcessFormView):
template_name = 'schedule/occurrence.html'
def get_context_data(self, **kwargs):
context = super(OccurrencePreview, self).get_context_data()
context = {
'event': self.object.event,
'occurrence': self.object,
}
return context
class EditOccurrenceView(OccurrenceEditMixin, UpdateView):
template_name = 'schedule/edit_occurrence.html'
class CreateOccurrenceView(OccurrenceEditMixin, CreateView):
template_name = 'schedule/edit_occurrence.html'
class CancelOccurrenceView(OccurrenceEditMixin, ModelFormMixin, ProcessFormView):
template_name = 'schedule/cancel_occurrence.html'
def post(self, request, *args, **kwargs):
event, occurrence = get_occurrence(**kwargs)
self.success_url = kwargs.get(
'next',
get_next_url(request, event.get_absolute_url()))
if 'cancel' not in request.POST:
occurrence.cancel()
return HttpResponseRedirect(self.success_url)
class EventMixin(CalendarViewPermissionMixin):
model = Event
pk_url_kwarg = 'event_id'
class EventEditMixin(CancelButtonMixin, EventEditPermissionMixin, EventMixin):
pass
class EventView(EventMixin, DetailView):
template_name = 'schedule/event.html'
class EditEventView(EventEditMixin, UpdateView):
form_class = EventForm
template_name = 'schedule/create_event.html'
def form_valid(self, form):
event = form.save(commit=False)
old_event = Event.objects.get(pk=event.pk)
dts = datetime.timedelta(
minutes=int((event.start - old_event.start).total_seconds() / 60)
)
dte = datetime.timedelta(
minutes=int((event.end - old_event.end).total_seconds() / 60)
)
event.occurrence_set.all().update(
original_start=F('original_start') + dts,
original_end=F('original_end') + dte,
)
event.save()
return super(EditEventView, self).form_valid(form)
class CreateEventView(EventEditMixin, CreateView):
form_class = EventForm
template_name = 'schedule/create_event.html'
def get_initial(self):
date = coerce_date_dict(self.request.GET)
initial_data = None
if date:
try:
start = datetime.datetime(**date)
initial_data = {
'start': start,
'end': start + datetime.timedelta(minutes=30)
}
except TypeError:
raise Http404
except ValueError:
raise Http404
return initial_data
def form_valid(self, form):
event = form.save(commit=False)
event.creator = self.request.user
event.calendar = get_object_or_404(Calendar, slug=self.kwargs['calendar_slug'])
event.save()
return HttpResponseRedirect(event.get_absolute_url())
class DeleteEventView(EventEditMixin, DeleteView):
template_name = 'schedule/delete_event.html'
def get_context_data(self, **kwargs):
ctx = super(DeleteEventView, self).get_context_data(**kwargs)
ctx['next'] = self.get_success_url()
return ctx
def get_success_url(self):
"""
After the event is deleted there are three options for redirect, tried in
this order:
# Try to find a 'next' GET variable
# If the key word argument redirect is set
# Lastly redirect to the event detail of the recently create event
"""
url_val = 'fullcalendar' if USE_FULLCALENDAR else 'day_calendar'
next_url = self.kwargs.get('next') or reverse(url_val, args=[self.object.calendar.slug])
next_url = get_next_url(self.request, next_url)
return next_url
def get_occurrence(event_id, occurrence_id=None, year=None, month=None,
day=None, hour=None, minute=None, second=None,
tzinfo=None):
"""
Because occurrences don't have to be persisted, there must be two ways to
retrieve them. both need an event, but if its persisted the occurrence can
be retrieved with an id. If it is not persisted it takes a date to
retrieve it. This function returns an event and occurrence regardless of
which method is used.
"""
if(occurrence_id):
occurrence = get_object_or_404(Occurrence, id=occurrence_id)
event = occurrence.event
elif None not in (year, month, day, hour, minute, second):
event = get_object_or_404(Event, id=event_id)
date = timezone.make_aware(datetime.datetime(int(year), int(month),
int(day), int(hour), int(minute),
int(second)), tzinfo)
occurrence = event.get_occurrence(date)
if occurrence is None:
raise Http404
else:
raise Http404
return event, occurrence
def check_next_url(next_url):
"""
Checks to make sure the next url is not redirecting to another page.
Basically it is a minimal security check.
"""
if not next_url or '://' in next_url:
return None
return next_url
def get_next_url(request, default):
next_url = default
if OCCURRENCE_CANCEL_REDIRECT:
next_url = OCCURRENCE_CANCEL_REDIRECT
_next_url = request.GET.get('next') if request.method in ['GET', 'HEAD'] else request.POST.get('next')
if _next_url and is_safe_url(url=_next_url, host=request.get_host()):
next_url = _next_url
return next_url
@check_calendar_permissions
def api_occurrences(request):
start = request.GET.get('start')
end = request.GET.get('end')
calendar_slug = request.GET.get('calendar_slug')
timezone = request.GET.get('timezone')
try:
response_data = _api_occurrences(start, end, calendar_slug, timezone)
except (ValueError, Calendar.DoesNotExist) as e:
return HttpResponseBadRequest(e)
return JsonResponse(response_data, safe=False)
def _api_occurrences(start, end, calendar_slug, timezone):
if not start or not end:
raise ValueError('Start and end parameters are required')
# version 2 of full calendar
# TODO: improve this code with date util package
if '-' in start:
def convert(ddatetime):
if ddatetime:
ddatetime = ddatetime.split(' ')[0]
try:
return datetime.datetime.strptime(ddatetime, '%Y-%m-%d')
except ValueError:
# try a different date string format first before failing
return datetime.datetime.strptime(ddatetime, '%Y-%m-%dT%H:%M:%S')
else:
def convert(ddatetime):
return datetime.datetime.utcfromtimestamp(float(ddatetime))
start = convert(start)
end = convert(end)
current_tz = False
if timezone and timezone in pytz.common_timezones:
# make start and end dates aware in given timezone
current_tz = pytz.timezone(timezone)
start = current_tz.localize(start)
end = current_tz.localize(end)
elif settings.USE_TZ:
# If USE_TZ is True, make start and end dates aware in UTC timezone
utc = pytz.UTC
start = utc.localize(start)
end = utc.localize(end)
if calendar_slug:
# will raise DoesNotExist exception if no match
calendars = [Calendar.objects.get(slug=calendar_slug)]
# if no calendar slug is given, get all the calendars
else:
calendars = Calendar.objects.all()
response_data = []
# Algorithm to get an id for the occurrences in fullcalendar (NOT THE SAME
# AS IN THE DB) which are always unique.
# Fullcalendar thinks that all their "events" with the same "event.id" in
# their system are the same object, because it's not really built around
# the idea of events (generators)
# and occurrences (their events).
# Check the "persisted" boolean value that tells it whether to change the
# event, using the "event_id" or the occurrence with the specified "id".
# for more info https://github.com/llazzaro/django-scheduler/pull/169
i = 1
if Occurrence.objects.all().count() > 0:
i = Occurrence.objects.latest('id').id + 1
event_list = []
for calendar in calendars:
# create flat list of events from each calendar
event_list += calendar.events.filter(start__lte=end).filter(
Q(end_recurring_period__gte=start) |
Q(end_recurring_period__isnull=True))
for event in event_list:
occurrences = event.get_occurrences(start, end)
for occurrence in occurrences:
occurrence_id = i + occurrence.event.id
existed = False
if occurrence.id:
occurrence_id = occurrence.id
existed = True
recur_rule = occurrence.event.rule.name \
if occurrence.event.rule else None
if occurrence.event.end_recurring_period:
recur_period_end = occurrence.event.end_recurring_period
if current_tz:
# make recur_period_end aware in given timezone
recur_period_end = recur_period_end.astimezone(current_tz)
recur_period_end = recur_period_end
else:
recur_period_end = None
event_start = occurrence.start
event_end = occurrence.end
if current_tz:
# make event start and end dates aware in given timezone
event_start = event_start.astimezone(current_tz)
event_end = event_end.astimezone(current_tz)
response_data.append({
'id': occurrence_id,
'title': occurrence.title,
'start': event_start,
'end': event_end,
'existed': existed,
'event_id': occurrence.event.id,
'color': occurrence.event.color_event,
'description': occurrence.description,
'rule': recur_rule,
'end_recurring_period': recur_period_end,
'creator': str(occurrence.event.creator),
'calendar': occurrence.event.calendar.slug,
'cancelled': occurrence.cancelled,
})
return response_data
@require_POST
@check_calendar_permissions
def api_move_or_resize_by_code(request):
response_data = {}
user = request.user
id = request.POST.get('id')
existed = bool(request.POST.get('existed') == 'true')
delta = datetime.timedelta(minutes=int(request.POST.get('delta')))
resize = bool(request.POST.get('resize', False))
event_id = request.POST.get('event_id')
response_data = _api_move_or_resize_by_code(
user,
id,
existed,
delta,
resize,
event_id)
return JsonResponse(response_data)
def _api_move_or_resize_by_code(user, id, existed, delta, resize, event_id):
response_data = {}
response_data['status'] = "PERMISSION DENIED"
if existed:
occurrence = Occurrence.objects.get(id=id)
occurrence.end += delta
if not resize:
occurrence.start += delta
if CHECK_OCCURRENCE_PERM_FUNC(occurrence, user):
occurrence.save()
response_data['status'] = "OK"
else:
event = Event.objects.get(id=event_id)
dts = 0
dte = delta
if not resize:
event.start += delta
dts = delta
event.end = event.end + delta
if CHECK_EVENT_PERM_FUNC(event, user):
event.save()
event.occurrence_set.all().update(
original_start=F('original_start') + dts,
original_end=F('original_end') + dte,
)
response_data['status'] = "OK"
return response_data
@require_POST
@check_calendar_permissions
def api_select_create(request):
response_data = {}
start = request.POST.get('start')
end = request.POST.get('end')
calendar_slug = request.POST.get('calendar_slug')
response_data = _api_select_create(start, end, calendar_slug)
return JsonResponse(response_data)
def _api_select_create(start, end, calendar_slug):
start = dateutil.parser.parse(start)
end = dateutil.parser.parse(end)
calendar = Calendar.objects.get(slug=calendar_slug)
Event.objects.create(
start=start,
end=end,
title=EVENT_NAME_PLACEHOLDER,
calendar=calendar,
)
response_data = {}
response_data['status'] = "OK"
return response_data
|
nilq/baby-python
|
python
|
from invoke import task
from os.path import join, exists
from os import makedirs
from shutil import copy, rmtree
from subprocess import run
from tasks.util.env import (
BIN_DIR,
GLOBAL_BIN_DIR,
KUBECTL_BIN,
AZURE_RESOURCE_GROUP,
AZURE_VM_SIZE,
AKS_CLUSTER_NODE_COUNT,
AKS_CLUSTER_NAME,
)
from tasks.util.version import get_k8s_version
# Note - this must match the version used by Faasm
KNATIVE_VERSION = "0.24.0"
K9S_VERSION = "0.24.15"
# AKS commandline reference here:
# https://docs.microsoft.com/en-us/cli/azure/aks?view=azure-cli-latest
def _run_aks_cmd(name, az_args=None):
cmd = [
"az",
"aks {}".format(name),
"--resource-group {}".format(AZURE_RESOURCE_GROUP),
]
if az_args:
cmd.extend(az_args)
cmd = " ".join(cmd)
print(cmd)
run(cmd, shell=True, check=True)
@task
def list(ctx):
"""
List all AKS resources
"""
_run_aks_cmd("list")
@task
def provision(ctx):
"""
Provision the AKS cluster
"""
k8s_ver = get_k8s_version()
_run_aks_cmd(
"create",
[
"--name {}".format(AKS_CLUSTER_NAME),
"--node-count {}".format(AKS_CLUSTER_NODE_COUNT),
"--node-vm-size {}".format(AZURE_VM_SIZE),
"--kubernetes-version {}".format(k8s_ver),
"--generate-ssh-keys",
],
)
@task
def details(ctx):
"""
Show the details of the cluster
"""
_run_aks_cmd(
"show",
[
"--name {}".format(AKS_CLUSTER_NAME),
],
)
@task
def delete(ctx):
"""
Delete the AKS cluster
"""
_run_aks_cmd(
"delete",
[
"--name {}".format(AKS_CLUSTER_NAME),
"--yes",
],
)
@task
def credentials(ctx):
"""
Get credentials for the AKS cluster
"""
# Set up the credentials
_run_aks_cmd(
"get-credentials",
[
"--name {}".format(AKS_CLUSTER_NAME),
"--overwrite-existing",
],
)
# Check we can access the cluster
cmd = "{} get nodes".format(KUBECTL_BIN)
print(cmd)
run(cmd, shell=True, check=True)
def _download_binary(url, binary_name):
makedirs(BIN_DIR, exist_ok=True)
cmd = "curl -LO {}".format(url)
run(cmd, shell=True, check=True, cwd=BIN_DIR)
run("chmod +x {}".format(binary_name), shell=True, check=True, cwd=BIN_DIR)
return join(BIN_DIR, binary_name)
def _symlink_global_bin(binary_path, name):
global_path = join(GLOBAL_BIN_DIR, name)
if exists(global_path):
print("Removing existing binary at {}".format(global_path))
run(
"sudo rm -f {}".format(global_path),
shell=True,
check=True,
)
print("Symlinking {} -> {}".format(global_path, binary_path))
run(
"sudo ln -s {} {}".format(binary_path, name),
shell=True,
check=True,
cwd=GLOBAL_BIN_DIR,
)
@task
def install_kubectl(ctx, system=False):
"""
Install the k8s CLI (kubectl)
"""
k8s_ver = get_k8s_version()
url = "https://dl.k8s.io/release/v{}/bin/linux/amd64/kubectl".format(
k8s_ver
)
binary_path = _download_binary(url, "kubectl")
# Symlink for kubectl globally
if system:
_symlink_global_bin(binary_path, "kubectl")
@task
def install_kn(ctx, system=False):
"""
Install the knative CLI (kn)
"""
url = "https://github.com/knative/client/releases/download/v{}/kn-linux-amd64".format(
KNATIVE_VERSION
)
binary_path = _download_binary(url, "kn-linux-amd64")
# Symlink for kn command locally
run("rm -f kn", shell=True, check=True, cwd=BIN_DIR)
run("ln -s {} kn".format(binary_path), shell=True, check=True, cwd=BIN_DIR)
# Symlink for kn command globally
if system:
_symlink_global_bin(binary_path, "kn")
@task
def install_k9s(ctx, system=False):
"""
Install the K9s CLI
"""
tar_name = "k9s_Linux_x86_64.tar.gz"
url = "https://github.com/derailed/k9s/releases/download/v{}/{}".format(
K9S_VERSION, tar_name
)
# Download the TAR
workdir = "/tmp/k9s"
makedirs(workdir, exist_ok=True)
cmd = "curl -LO {}".format(url)
run(cmd, shell=True, check=True, cwd=workdir)
# Untar
run("tar -xf {}".format(tar_name), shell=True, check=True, cwd=workdir)
# Copy k9s into place
binary_path = join(BIN_DIR, "k9s")
copy(join(workdir, "k9s"), binary_path)
# Remove tar
rmtree(workdir)
# Symlink for k9s command globally
if system:
_symlink_global_bin(binary_path, "k9s")
|
nilq/baby-python
|
python
|
import sys
class ModelSearchCriteria:
def __init__(self, datatable_names: [str], column_names: [str], search_text: str ):
self.datatable_names = datatable_names
self.column_names = column_names
self.search_text = search_text
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: utf_8 -*-
"""Access and query Twitter's API with the simplistic twitter package (`pip install twitter`).
"""
from __future__ import print_function
from __future__ import unicode_literals
import csv
import os
import time
from twitter import OAuth
from twitter import Twitter
def setup_twitter(config_file='config.py'):
"""Setup auth keys and session with Twitter client."""
config = {}
execfile(config_file, config)
twitter_obj = Twitter(auth=OAuth(config["access_key"],
config["access_secret"],
config["consumer_key"],
config["consumer_secret"]))
return twitter_obj
def search_twitter(twitter_session, query, count=100, status='popular'):
"""Submit query to Twitter API via twitter package."""
status_options = ['mixed', 'recent', 'popular']
assert status in status_options, "'status' must be in {}.".format(status_options)
query = twitter_session.search.tweets(q=query,
lang='en',
result=status,
count=count,
retry=True)
return query
def parse_twitter_response(twitter_response, min_rts=500, strip_non_ascii=True):
"""Extract requested variables from Twitter API response. Yield each tweet
one at a time with a generator. Available keys:
[u'contributors', u'truncated', u'text', u'is_quote_status',
u'in_reply_to_status_id', u'id', u'favorite_count', u'source',
u'retweeted', u'coordinates', u'entities', u'in_reply_to_screen_name',
u'in_reply_to_user_id', u'retweet_count', u'id_str', u'favorited',
u'retweeted_status', u'user', u'geo', u'in_reply_to_user_id_str',
u'possibly_sensitive', u'lang', u'created_at',
u'in_reply_to_status_id_str', u'place', u'metadata']
"""
for result in twitter_response['statuses']:
tweet_datetime = result['created_at']
text = result['text'].encode('utf_8')
if strip_non_ascii:
text = ''.join([i if ord(i) < 128 else ' ' for i in text])
# Strip 'RT ' from head of retweets, redundant
if text.startswith('RT '):
text = text[3:]
# Ch newlines to spaces
text = ''.join([' ' if c == '\n' else c for c in text])
rt_count = result['retweet_count']
yield {'_tweet_datetime': tweet_datetime,
'_text': text,
'_rt_count': rt_count}
def search_parse_write_tweets(query_str,
total_to_fetch,
status,
minimum_rts,
low_rt_threshold):
twitter = setup_twitter()
query_response = search_twitter(twitter_session=twitter,
query=query_disjunction,
count=TWEETS_TO_FETCH,
status=status)
print("Search complete ({} seconds)".format(query_response["search_metadata"]["completed_in"]))
tweets_data = parse_twitter_response(query_response, min_rts=minimum_rts) # yields generator
fieldnames = []
if not fieldnames:
for row in tweets_data:
fieldnames = row.keys()
fieldnames_len = len(row.keys())
break
# Set up csv writers
file1 = 'tweets/tweets_popular.csv'
f1_write_header = False
if not os.path.isfile(file1):
f1_write_header = True
csv_popular_open = open(file1, 'ab')
csv_popular_writer = csv.DictWriter(csv_popular_open, delimiter=b'|', fieldnames=fieldnames)
if f1_write_header:
csv_popular_writer.writeheader()
file2 = 'tweets/tweets_not_popular.csv'
f2_write_header = False
if not os.path.isfile(file2):
f2_write_header = True
csv_not_popular_open = open(file2, 'ab')
csv_not_popular_writer = csv.DictWriter(csv_not_popular_open, delimiter=b'|', fieldnames=fieldnames)
if f2_write_header:
csv_not_popular_writer.writeheader()
# Loop thru generator of dicts, write row to right file
for tweet_data in tweets_data:
if tweet_data['rt_count'] >= minimum_rts:
if len(tweet_data.keys()) == fieldnames_len:
csv_popular_writer.writerow(tweet_data)
elif tweet_data['rt_count'] <= low_rt_threshold:
if len(tweet_data.keys()) == fieldnames_len:
csv_not_popular_writer.writerow(tweet_data)
if __name__ == '__main__':
TWEETS_TO_FETCH = 1000
query_string = 'the a u i me she you he they for rt at tweet'.split(' ')
query_disjunction = ' OR '.join(query_string)
#status = 'popular' # ['mixed', 'recent', 'popular']
minimum_rts = 500
low_rt_threshold = 10
while True:
time.sleep(60)
search_parse_write_tweets(query_str=query_disjunction,
total_to_fetch=TWEETS_TO_FETCH,
status='popular',
minimum_rts=minimum_rts,
low_rt_threshold=low_rt_threshold)
search_parse_write_tweets(query_str=query_disjunction,
total_to_fetch=TWEETS_TO_FETCH,
status='mixed',
minimum_rts=minimum_rts,
low_rt_threshold=low_rt_threshold)
|
nilq/baby-python
|
python
|
import jieba
import jieba.analyse
from gensim.test.utils import get_tmpfile
import gensim.models.word2vec as word2vec
from path import Path
import argparse
from utils import readlines,SeqSubSeq,toarry
#文件位置需要改为自己的存放路径
#将文本分词
parser = argparse.ArgumentParser(description="precess tree file to a doc")
parser.add_argument('--doc', default='./doc6.txt', help="Input file")
parser.add_argument('--doc_post',default='./doc_post.txt',help='output an txt to descripe file_in')
parser.add_argument('--seged_file',default='./conv19_segments.txt')
parser.add_argument('--stop_words_file',default='./stopwords.txt')
parser.add_argument('--model_file',default='./word2vec.model')
parser.add_argument('--node_list_file',default='./array.txt',help='output an txt to descripe file_in')
parser.add_argument('--seq_sub_seq_file',default='./seqsubseq.txt',help='output an txt to descripe file_in')
args = parser.parse_args()
file = Path('./doc_post.txt')
topn = 10
save_model=True
load_model=True
def stopwordslist(filepath):
stopwords = [line.strip() for line in open(filepath, 'r', encoding='utf-8').readlines()]
return stopwords
def preprocess(args):
'''
:param file_name: line_cluster file
:return:
'''
toarry(file_in=args.doc,file_out=args.node_list_file)
SeqSubSeq(file_in = args.node_list_file,file_out=args.seq_sub_seq_file)
file_name = Path(args.seq_sub_seq_file)
if not file_name.exists():
return
seq_sub_seq_file = Path(args.seq_sub_seq_file)
seq_sub_seq = readlines(seq_sub_seq_file)
src_file = Path(args.doc)
lines = readlines(src_file)
ret_line=[]
for line in lines:
ret_line.append(line[2:])
print(ret_line)
post_process_txt=[]
post_pcoess_line=[]
for line_arr in seq_sub_seq:
if line_arr!='':
for num_line in line_arr.split(','):
post_pcoess_line.append(ret_line[int(num_line)])
post_process_txt.append(post_pcoess_line.copy())
post_pcoess_line=[]
doc_post = 'doc_post.txt'
with open(doc_post,'w',encoding='UTF-8') as txt:
txt.write(str(post_process_txt[0] )[1:-1])
for item in post_process_txt[1:]:
txt.write('\n'+str(item)[1:-1])
def Segment(args):
'''
根据停词表, 利用jieba对源文档进行分词
:return: none, 生成txt文件 seg_file
'''
stop_words_file = Path(args.stop_words_file)
seged_file = Path(args.seged_file)
stopwords = stopwordslist(stop_words_file)
outstr=''
with open(file,encoding='utf-8') as f:
document = f.read()
document_cut = jieba.cut_for_search(document)
for word in document_cut:
if word not in stopwords:
if word != '\t':
outstr += word
outstr += " "
with open(seged_file, 'w',encoding="utf-8") as f2:
f2.write(outstr)
def run(args):
seged_file = Path(args.seged_file)
sentences = word2vec.LineSentence(seged_file)
model_file = Path(args.model_file)
if load_model==True and model_file.exists():
model = word2vec.Word2Vec.load("word2vec.model")
else:
model = word2vec.Word2Vec(sentences, hs=3, min_count=5, window=10, size=100)
if save_model == True:
#path = get_tmpfile("word2vec.model") # 创建临时文件
model.save(model_file)
vocabulary = model.wv.similar_by_word('治疗', topn=100)
for key in vocabulary:
print(key)
if __name__=='__main__':
preprocess(args)
Segment(args)
#run(args)
|
nilq/baby-python
|
python
|
"""functions for working with tensorboard"""
from pathlib import Path
import pandas as pd
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
def logdir2df(logdir):
"""convert tensorboard events files in a logs directory into a pandas DataFrame
events files are created by SummaryWriter from PyTorch or Tensorflow
Parameters
----------
logdir : str, Path
path to directory containing tfevents file(s) saved by a SummaryWriter
Returns
-------
df : pandas.Dataframe
with columns 'step', 'wall_time', and all Scalars from the tfevents file
Notes
-----
adapted from
https://stackoverflow.com/questions/42355122/can-i-export-a-tensorflow-summary-to-csv
"""
if issubclass(type(logdir), Path): # subclass, because could be PosixPath or WindowsPath
logdir = str(logdir)
ea = EventAccumulator(path=logdir)
ea.Reload() # load all data written so far
scalar_tags = ea.Tags()['scalars'] # list of tags for values written to scalar
dfs = {}
for scalar_tag in scalar_tags:
dfs[scalar_tag] = pd.DataFrame(ea.Scalars(scalar_tag),
columns=["wall_time",
"step",
scalar_tag.replace('val/', '')])
dfs[scalar_tag] = dfs[scalar_tag].set_index("step")
dfs[scalar_tag].drop("wall_time", axis=1, inplace=True)
return pd.concat([v for k, v in dfs.items()], axis=1)
def logdir2csv(logdir):
"""convert tensorboard events files in a logs directory into a .csv file
Parameters
----------
logdir : str, Path
path to directory containing tfevents file(s) saved by a SummaryWriter
Returns
-------
None
"""
logdir = Path(logdir)
events_files = sorted(logdir.glob('*tfevents*'))
# remove .csv files -- we can just overwrite them
events_files = [path for path in events_files if not str(path).endswith('.csv')]
if len(events_files) != 1:
if len(events_files) < 1:
raise ValueError(
f'did not find any events files in {logdir}'
)
elif len(events_files) > 1:
raise ValueError(
f'found multiple events files in {logdir}:\n{events_files}.'
'Please ensure there is only one events file in the directory, '
'unclear which to use.'
)
else:
events_file = events_files[0]
df = logdir2df(logdir)
csv_path = events_file.stem + '.csv'
df.to_csv(logdir.joinpath(csv_path))
|
nilq/baby-python
|
python
|
import json
import logging
import os
from datetime import datetime
def coco_evaluation(dataset, predictions, output_dir, iteration=None):
coco_results = []
for i, prediction in enumerate(predictions):
img_info = dataset.get_img_info(i)
prediction = prediction.resize((img_info['width'], img_info['height'])).numpy()
boxes, labels, scores = prediction['boxes'], prediction['labels'], prediction['scores']
image_id, annotation = dataset.get_annotation(i)
class_mapper = dataset.contiguous_id_to_coco_id
if labels.shape[0] == 0:
continue
boxes = boxes.tolist()
labels = labels.tolist()
scores = scores.tolist()
coco_results.extend(
[
{
"image_id": image_id,
"category_id": class_mapper[labels[k]],
"bbox": [box[0], box[1], box[2] - box[0], box[3] - box[1]], # to xywh format
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
iou_type = 'bbox'
json_result_file = os.path.join(output_dir, iou_type + ".json")
logger = logging.getLogger("SSD.inference")
logger.info('Writing results to {}...'.format(json_result_file))
with open(json_result_file, "w") as f:
json.dump(coco_results, f)
from pycocotools.cocoeval import COCOeval
coco_gt = dataset.coco
coco_dt = coco_gt.loadRes(json_result_file)
coco_eval = COCOeval(coco_gt, coco_dt, iou_type)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
result_strings = []
keys = ["AP", "AP50", "AP75", "APs", "APm", "APl"]
metrics = {}
for i, key in enumerate(keys):
metrics[key] = coco_eval.stats[i]
logger.info('{:<10}: {}'.format(key, round(coco_eval.stats[i], 3)))
result_strings.append('{:<10}: {}'.format(key, round(coco_eval.stats[i], 3)))
if iteration is not None:
result_path = os.path.join(output_dir, 'result_{:07d}.txt'.format(iteration))
else:
result_path = os.path.join(output_dir, 'result_{}.txt'.format(datetime.now().strftime('%Y-%m-%d_%H-%M-%S')))
with open(result_path, "w") as f:
f.write('\n'.join(result_strings))
return dict(metrics=metrics)
|
nilq/baby-python
|
python
|
from snovault import upgrade_step
@upgrade_step('suspension', '1', '2')
def suspension_1_2(value, system):
if 'biosample_ontology' in value:
del value['biosample_ontology']
@upgrade_step('suspension', '2', '3')
def suspension_2_3(value, system):
if 'url' in value:
value['urls'] = [value['url']]
del value['url']
@upgrade_step('suspension', '3', '4')
def suspension_3_4(value, system):
if 'dissociation_time' in value:
value['dissociation_time'] = str(value['dissociation_time'])
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from django.conf.urls import url
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase, override_settings
from django.views.generic import CreateView, UpdateView, DeleteView, DetailView
class CreateUserView(CreateView):
model = User
fields = '__all__'
class UpdateUserView(UpdateView):
model = User
fields = '__all__'
class DeleteUserView(DeleteView):
model = User
class DetailUserView(DetailView):
model = User
fields = '__all__'
urlpatterns = [
url(r'^create-user/$', view=CreateUserView.as_view(), name='create-user'),
url(r'^update-user/(?P<pk>\d+)/$', view=UpdateUserView.as_view(), name='update-user'),
url(r'^delete-user/(?P<pk>\d+)/$', view=DeleteUserView.as_view(), name='delete-user'),
url(r'^detail-user/(?P<pk>\d+)/$', view=DetailUserView.as_view(), name='detail-user')
]
@override_settings(ROOT_URLCONF='tests.test_django_forms_mixin')
class AccessLogModelFormMixinTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user('testuser', 'testuser@example.com', 'test123.')
self.client.login(username=self.user.username, password='test123.')
def test_create_view_object_is_logged(self):
response = self.client.post(reverse('create-user'), data={
'username': 'another-user',
'email': 'another-user@example.com',
'password': 'test123.'
})
self.assertEqual(response.status_code, 200)
def test_detail_view_object_is_logged(self):
response = self.client.get(reverse('detail-user', kwargs={'pk': self.user.pk}))
self.assertEqual(response.status_code, 200)
|
nilq/baby-python
|
python
|
"""
This set of functions is for analyzing all the articles in the PLOS corpus. A Jupyter Notebook is provided with
examples of analysis. It can:
* compare the articles indexed in Solr, PMC, and article pages
* spot-check individual JATS fields for irregularities
* create summaries of articles by type, publication date, etc
* generate lists of retracted or corrected articles
"""
import collections
import csv
import os
import random
import requests
from tqdm import tqdm
from .. import get_corpus_dir, newarticledir
from ..plos_regex import (validate_doi, full_doi_regex_match, validate_url, validate_filename)
from ..transformations import (filename_to_doi, doi_to_url)
from ..plos_corpus import (listdir_nohidden, uncorrected_proofs_text_list,
download_updated_xml, get_all_solr_dois,
download_check_and_move)
from ..article import Article
counter = collections.Counter
pmcdir = "pmc_articles"
max_invalid_files_to_print = 100
def validate_corpus(directory=None):
"""
For every local article file and DOI listed on Solr, validate file names, DOIs, URLs in terms of
regular expressions.
Stops checking as soon as encounters problem and prints it
:return: boolean of whether corpus passed validity checks
"""
if directory is None:
directory = get_corpus_dir()
# check DOIs
plos_dois = get_all_plos_dois()
plos_valid_dois = [doi for doi in plos_dois if validate_doi(doi)]
if set(plos_dois) == set(plos_valid_dois):
pass
else:
print("Invalid DOIs: {}".format(set(plos_dois) - set(plos_valid_dois)))
return False
# check urls
plos_urls = [doi_to_url(doi) for doi in plos_valid_dois]
plos_valid_urls = [url for url in plos_urls if validate_url(url)]
if set(plos_urls) == set(plos_valid_urls) and len(plos_valid_urls) == len(plos_valid_dois):
pass
else:
print("Invalid URLs: {}".format(set(plos_urls) - set(plos_valid_urls)))
return False
# check files and filenames
plos_files = listdir_nohidden(directory)
if plos_files:
plos_valid_filenames = [article for article in plos_files if validate_filename(article)]
if len(plos_valid_dois) == len(plos_valid_filenames):
pass
else:
print("Invalid filenames: {}".format(set(plos_valid_dois) - set(plos_valid_filenames)))
return False
plos_valid_files = [article for article in plos_valid_filenames if os.path.isfile(article)]
if set(plos_valid_filenames) == set(plos_valid_files):
return True
else:
invalid_files = set(plos_valid_filenames) - set(plos_valid_files)
if len(invalid_files) > max_invalid_files_to_print:
print("Too many invalid files to print: {}".format(len(invalid_files)))
else:
print("Invalid files: {}".format(invalid_files))
return False
else:
print("Corpus directory empty. Re-download by running create_local_plos_corpus()")
return False
# These functions are for getting the article types of all PLOS articles.
def get_jats_article_type_list(article_list=None, directory=None):
"""Makes a list of of all JATS article types in the corpus
Sorts them by frequency of occurrence
:param article_list: list of articles, defaults to None
:param directory: directory of articles, defaults to get_corpus_dir()
:returns: dictionary with each JATS type matched to number of occurrences
:rtype: dict
"""
if directory is None:
directory = get_corpus_dir()
if article_list is None:
article_list = listdir_nohidden(directory)
jats_article_type_list = []
for article_file in tqdm(article_list):
article = Article.from_filename(article_file, directory=directory)
jats_article_type_list.append(article.type_)
print(len(set(jats_article_type_list)), 'types of articles found.')
article_types_structured = counter(jats_article_type_list).most_common()
return article_types_structured
def get_plos_article_type_list(article_list=None, directory=None):
"""Makes a list of of all internal PLOS article types in the corpus
Sorts them by frequency of occurrence
:param article_list: list of articles, defaults to None
:param directory: directory of articles, defaults to get_corpus_dir()
:returns: dictionary with each PLOS type matched to number of occurrences
:rtype: dict
"""
if directory is None:
directory = get_corpus_dir()
if article_list is None:
article_list = listdir_nohidden(directory)
PLOS_article_type_list = []
for article_file in tqdm(article_list):
article = Article.from_filename(article_file, directory=directory)
PLOS_article_type_list.append(article.plostype)
print(len(set(PLOS_article_type_list)), 'types of articles found.')
PLOS_article_types_structured = counter(PLOS_article_type_list).most_common()
return PLOS_article_types_structured
def get_article_types_map(article_list=None, directory=None):
"""Maps the JATS and PLOS article types onto the XML DTD.
Used for comparing how JATS and PLOS article types are assigned
:param article_list: list of articles, defaults to None
:param directory: directory of articles, defaults to get_corpus_dir()
:returns: list of tuples of JATS, PLOS, DTD for each article in the corpus
:rtype: list
"""
if directory is None:
directory = get_corpus_dir()
if article_list is None:
article_list = listdir_nohidden(directory)
article_types_map = []
for i, article_file in tqdm(article_list):
article = Article.from_filename(article_file)
article.directory = directory
types = [article.type_, article.plostype, article.dtd]
types = tuple(types)
article_types_map.append(types)
return article_types_map
def article_types_map_to_csv(article_types_map):
"""put the `get_article_types_map.()` list of tuples into a csv.
:param article_types_map: output of `get_article_types_map()`
"""
with open('articletypes.csv', 'w') as out:
csv_out = csv.writer(out)
csv_out.writerow(['type', 'count'])
for row in article_types_map:
csv_out.writerow(row)
# These functions are for getting retracted articles
def get_retracted_doi_list(article_list=None, directory=None):
"""
Scans through articles in a directory to see if they are retraction notifications,
scans articles that are that type to find DOIs of retracted articles
:return: tuple of lists of DOIs for retractions articles, and retracted articles
"""
if directory is None:
directory = get_corpus_dir()
retractions_doi_list = []
retracted_doi_list = []
if article_list is None:
article_list = listdir_nohidden(directory)
for art in tqdm(article_list):
article = Article.from_filename(art)
article.directory = directory
if article.type_ == 'retraction':
retractions_doi_list.append(article.doi)
# Look in those articles to find actual articles that are retracted
retracted_doi_list.extend(article.related_dois)
# check linked DOI for accuracy
for doi in article.related_dois:
if bool(full_doi_regex_match.search(doi)) is False:
print("{} has incorrect linked DOI field: '{}'".format(art, doi))
print(len(retracted_doi_list), 'retracted articles found.')
return retractions_doi_list, retracted_doi_list
def get_amended_article_list(article_list=None, directory=None):
"""
Scans through articles in a directory to see if they are amendment notifications,
scans articles that are that type to find DOI substrings of amended articles
:param article: the filename for a single article
:param directory: directory where the article file is, default is get_corpus_dir()
:return: list of DOIs for articles issued a correction
"""
if directory is None:
directory = get_corpus_dir()
amendments_article_list = []
amended_article_list = []
if article_list is None:
article_list = listdir_nohidden(directory)
# check for amendments article type
for art in tqdm(article_list):
article = Article.from_filename(art)
article.directory = directory
if article.amendment:
amendments_article_list.append(article.doi)
# get the linked DOI of the amended article
amended_article_list.extend(article.related_dois)
# check linked DOI for accuracy
for doi in article.related_dois:
if bool(full_doi_regex_match.search(doi)) is False:
print(article.doi, "has incorrect linked DOI:", doi)
print(len(amended_article_list), 'amended articles found.')
return amendments_article_list, amended_article_list
# These functions are for checking for silent XML updates
def create_pubdate_dict(directory=None):
"""
For articles in directory, create a dictionary mapping them to their pubdate.
Used for truncating the revisiondate_sanity_check to more recent articles only
:param directory: directory of articles
:return: a dictionary mapping article files to datetime objects of their pubdates
"""
if directory is None:
directory = get_corpus_dir()
articles = listdir_nohidden(directory)
pubdates = {art: Article.from_filename(art).pubdate for art in articles}
return pubdates
def revisiondate_sanity_check(article_list=None, tempdir=newarticledir, directory=None, truncated=True):
"""
:param truncated: if True, restrict articles to only those with pubdates from the last year or two
"""
if directory is None:
directory = get_corpus_dir()
list_provided = bool(article_list)
if article_list is None and truncated is False:
article_list = listdir_nohidden(directory)
if article_list is None and truncated:
pubdates = create_pubdate_dict(directory=directory)
article_list = sorted(pubdates, key=pubdates.__getitem__, reverse=True)
article_list = article_list[:30000]
try:
os.mkdir(tempdir)
except FileExistsError:
pass
articles_different_list = []
for article_file in tqdm(article_list):
updated = download_updated_xml(article_file=article_file)
if updated:
articles_different_list.append(article_file)
if list_provided:
article_list.remove(article_file) # helps save time if need to restart process
print(len(article_list), "article checked for updates.")
print(len(articles_different_list), "articles have updates.")
return articles_different_list
def check_solr_doi(doi):
'''
For an article doi, see if there's a record of it in Solr.
:rtype: bool
'''
solr_url = 'http://api.plos.org/search?q=*%3A*&fq=doc_type%3Afull&fl=id,&wt=json&indent=true&fq=id:%22{}%22'.format(doi)
article_search = requests.get(solr_url).json()
return bool(article_search['response']['numFound'])
def get_all_local_dois(directory=None):
"""Get all local DOIs in a corpus directory.
:param directory: directory of articles, defaults to get_corpus_dir()
:returns: list of DOIs
:rtype: list
"""
if directory is None:
directory = get_corpus_dir()
local_dois = [filename_to_doi(art) for art in listdir_nohidden(directory)]
return local_dois
def get_all_plos_dois(local_articles=None, solr_articles=None):
'''
Collects lists of articles for local and solr, calculates the difference.
Missing local downloads easily solved by re-running plos_corpus.py.
Missing solr downloads require attention.
:return: every DOI in PLOS corpus, across local and remote versions
'''
if solr_articles is None:
solr_articles = get_all_solr_dois()
if local_articles is None:
local_articles = get_all_local_dois()
missing_local_articles = set(solr_articles) - set(local_articles)
if missing_local_articles:
print('re-run plos_corpus.py to download latest {0} PLOS articles locally.'
.format(len(missing_local_articles)))
missing_solr_articles = set(local_articles) - set(solr_articles)
plos_articles = set(solr_articles + local_articles)
if missing_solr_articles:
print('\033[1m' + 'Articles that needs to be re-indexed on Solr:')
print('\033[0m' + '\n'.join(sorted(missing_solr_articles)))
return plos_articles
def get_random_list_of_dois(directory=None, count=100):
'''
Gets a list of random DOIs. Tries first to construct from local files in
directory, otherwise tries Solr DOI list as backup.
:param directory: defaults to get_corpus_dir()
:param count: specify how many DOIs are to be returned
:return: a list of random DOIs for analysis
'''
if directory is None:
directory = get_corpus_dir()
try:
article_list = listdir_nohidden(directory)
sample_file_list = random.sample(article_list, count)
sample_doi_list = [filename_to_doi(f) for f in sample_file_list]
except OSError:
doi_list = get_all_solr_dois()
sample_doi_list = random.sample(doi_list, count)
return sample_doi_list
def get_article_metadata(article_file, size='small'):
"""
For an individual article in the PLOS corpus, create a tuple of a set of metadata fields sbout that corpus.
Make it small, medium, or large depending on number of fields desired.
:param article_file: individual local PLOS XML article
:param size: small, medium or large, aka how many fields to return for each article
:return: tuple of metadata fields tuple, wrong_date_strings dict
"""
article = Article.from_filename(article_file)
doi = article.doi
filename = os.path.basename(article.filename).rstrip('.xml')
title = article.title
journal = article.journal
jats_article_type = article.type_
plos_article_type = article.plostype
dtd_version = article.dtd
dates = article.get_dates()
(pubdate, collection, received, accepted, revdate) = ('', '', '', '', '')
pubdate = article.pubdate
revdate = article.revdate
counts = article.counts
(fig_count, table_count, page_count) = ('', '', '')
body_word_count = article.word_count
related_articles = article.related_dois
abstract = article.abstract
try:
collection = dates['collection']
except KeyError:
pass
try:
received = dates['received']
except KeyError:
pass
try:
accepted = dates['accepted']
except KeyError:
pass
try:
fig_count = counts['fig-count']
except KeyError:
pass
try:
table_count = counts['table-count']
except KeyError:
pass
try:
page_count = counts['page-count']
except KeyError:
pass
metadata = [doi, filename, title, journal, jats_article_type, plos_article_type, dtd_version, pubdate, revdate, received,
accepted, collection, fig_count, table_count, page_count, body_word_count, related_articles, abstract]
metadata = tuple(metadata)
if len(metadata) == 18:
return metadata
else:
print('Error in {}: {} items'.format(article_file, len(metadata)))
return False
def get_corpus_metadata(article_list=None, directory=None):
"""
Run get_article_metadata() on a list of files, by default every file in directory
Includes a progress bar
TODO: this does not return a tuple, other parts of the code expect it to return a tuple, and its docs expect a tuple
:param article_list: list of articles to run it on
:return: list of tuples for each article; list of dicts for wrong date orders
"""
if directory is None:
directory = get_corpus_dir()
if article_list is None:
article_list = listdir_nohidden(directory)
corpus_metadata = []
for article_file in tqdm(article_list):
metadata = get_article_metadata(article_file)
corpus_metadata.append(metadata)
return corpus_metadata
def corpus_metadata_to_csv(corpus_metadata=None,
article_list=None,
wrong_dates=None,
csv_file='allofplos_metadata.csv',
directory=None
):
"""
Convert list of tuples from get_article_metadata to csv
:param corpus_metadata: the list of tuples, defaults to None
:param article_list: TODO: needs documentation, defaults to None
:param wrong_dates: TODO: needs documentation, defaults to None
:csv_file: string, TODO: needs more documentation, defaults to 'allofplos_metadata.csv'
:directory:
:return: None
"""
if directory is None:
directory = get_corpus_dir()
if corpus_metadata is None:
corpus_metadata, wrong_dates = get_corpus_metadata(article_list, directory=directory)
# write main metadata csv file
with open(csv_file, 'w') as out:
csv_out = csv.writer(out)
csv_out.writerow(['doi', 'filename', 'title', 'journal', 'jats_article_type', 'plos_article_type',
'dtd_version', 'pubdate', 'revdate', 'received', 'accepted', 'collection', 'fig_count', 'table_count',
'page_count', 'body_word_count', 'related_article', 'abstract'])
for row in corpus_metadata:
csv_out.writerow(row)
# write wrong dates csv file, with longest dict providing the keys
if wrong_dates:
keys = max(wrong_dates, key=len).keys()
with open('wrong_dates.csv', 'w') as out:
dict_writer = csv.DictWriter(out, keys)
dict_writer.writeheader()
dict_writer.writerows(wrong_dates)
def read_corpus_metadata_from_csv(csv_file='allofplos_metadata.csv'):
"""
reads in a csv of data, excluding the header row
:param csv_file: csv file of data, defaults to 'allofplos_metadata.csv'
:return: list of tuples of article metadata
"""
with open(csv_file, 'r') as csv_file:
reader = csv.reader(csv_file)
next(reader, None)
corpus_metadata = [tuple(line) for line in reader]
return corpus_metadata
def update_corpus_metadata_csv(csv_file='allofplos_metadata.csv', comparison_dois=None, directory=None):
"""
Incrementally update the metadata of PLOS articles in the csv file
:param csv_file: csv file of data, defaults to 'allofplos_metadata.csv'
:comparison_dois: list of DOIs to check whether their metadats is included
return updated corpus metadata
"""
if directory is None:
directory = get_corpus_dir()
# Step 1: get metadata and DOI list from existing csv file
try:
corpus_metadata = read_corpus_metadata_from_csv(csv_file)
csv_doi_list = [row[0] for row in corpus_metadata]
except FileNotFoundError:
corpus_metadata = []
csv_doi_list = []
# Step 2: compare DOI list with master list
if comparison_dois is None:
comparison_dois = get_all_solr_dois()
dois_needed_list = list(set(comparison_dois) - set(csv_doi_list))
# Step 3: compare to local file list
local_doi_list = [filename_to_doi(article_file) for article_file in listdir_nohidden(directory)]
files_needed_list = list(set(dois_needed_list) - set(local_doi_list))
if files_needed_list:
print('Local corpus must be updated before .csv metadata can be updated.\nUpdating local corpus now')
download_check_and_move(files_needed_list,
uncorrected_proofs_text_list,
tempdir=newarticledir,
destination=directory)
# Step 4: append new data to existing list
new_corpus_metadata, wrong_dates = get_corpus_metadata(article_list=dois_needed_list)
corpus_metadata.extend(new_corpus_metadata)
# Step 5: write new dataset to .csv
corpus_metadata_to_csv(corpus_metadata=corpus_metadata, csv_file='allofplos_metadata_updated.csv')
return corpus_metadata
|
nilq/baby-python
|
python
|
import json
import jk_json
import jk_typing
import jk_prettyprintobj
from thaniya_common.cfg import CfgKeyValueDefinition
from thaniya_common.cfg import AbstractCfgComponent
from .BackupVolumeID import BackupVolumeID
class _Magic(AbstractCfgComponent):
MAGIC = "thaniya-volume-cfg"
__VALID_KEYS = [
CfgKeyValueDefinition("magic", str, False),
CfgKeyValueDefinition("version", int, False),
]
def __init__(self):
super().__init__(_Magic.__VALID_KEYS)
self._magic = _Magic.MAGIC # str
self._version = 1 # int
self._comment = "This file is part of the Thaniya backup volume management system! Please do not edit this file manually!"
#
#
class _DataV1(AbstractCfgComponent):
__VALID_KEYS = [
#CfgKeyValueDefinition("volumeGroup", str, False), # NOTE: for future implementation; not yet used
CfgKeyValueDefinition("volumeID", BackupVolumeID, False, BackupVolumeID.parseFromStr, str),
CfgKeyValueDefinition("backupBaseDirPath", str, True),
CfgKeyValueDefinition("isActive", bool, False),
]
def __init__(self):
super().__init__(_DataV1.__VALID_KEYS)
#self._volumeGroup = None # str # NOTE: for future implementation; not yet used
self._volumeID = None # BackupVolumeID
self._backupBaseDirPath = None # str
self._isActive = None # bool
#
#
#
# Represents the contents of a backup volume information file.
#
class BackupVolumeCfgFile(jk_prettyprintobj.DumpMixin):
################################################################################################################################
## Constructor Method
################################################################################################################################
def __init__(self):
self._magic = _Magic()
self._data = _DataV1()
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def data(self) -> _DataV1:
return self._groups["data"]
#
################################################################################################################################
## Helper Methods
################################################################################################################################
def _dumpVarNames(self) -> list:
return [
"_magic",
"_data",
]
#
################################################################################################################################
## Public Methods
################################################################################################################################
def writeToFile(self, filePath:str):
assert isinstance(filePath, str)
jk_json.saveToFilePretty(self.toJSON(), filePath)
#
def toJSON(self) -> dict:
ret = {
"magic": self._magic.toJSON(),
"data": self._data.toJSON(),
}
return ret
#
def __str__(self):
return json.dumps(self.toJSON(), indent="\t", sort_keys=True)
#
@staticmethod
def loadFromFile(filePath:str):
assert isinstance(filePath, str)
jData = jk_json.loadFromFile(filePath)
return BackupVolumeCfgFile.loadFromJSON(jData)
#
@staticmethod
def loadFromJSON(jData:dict):
assert isinstance(jData, dict)
ret = BackupVolumeCfgFile()
ret._magic.loadFromJSON(jData["magic"])
assert ret._magic._magic == _Magic.MAGIC
assert ret._magic._version == 1
ret._data.loadFromJSON(jData["data"])
return ret
#
#
# Use this method to set a data value.
#
@jk_typing.checkFunctionSignature()
def setValue(self, name:str, value):
self._data.setValue(name, value)
#
#
# Use this method to read a data value.
#
@jk_typing.checkFunctionSignature()
def getValue(self, name:str):
return self._data.getValue(name)
#
#
|
nilq/baby-python
|
python
|
import os
from threading import Thread
from sh import tail
from pymouse import PyMouse
from datetime import datetime, timedelta
import subprocess
WAS_MOVED_SCATTER = 0
# in seconds
SHORT_PRESS = .15
MEDIUM_PRESS = .4
LONG_PRESS = .6
VERY_LONG_PRESS = 1
SCROLL_SENSITIVITY = 10
MOVE_SENSITIVITY = .8
MOVE_SCALING = 1.4
def get_time_delta_in_microseconds(t1, t2):
if t1 == None or t2 == None:
return 666
t_d = t2 - t1
return t_d.seconds + t_d.microseconds / 10 ** 6
m = PyMouse()
cur_start_pos = list(m.position())
cur_anchor_x = None
cur_anchor_y = None
pre_x = None
pre_y = None
def wasnt_moved():
if cur_anchor_y == None and cur_anchor_x == None:
return True
return False
c_p = m.position()
print(cur_start_pos)
print(c_p)
d_x = abs(cur_start_pos[0] - c_p[0])
d_y = abs(cur_start_pos[1] - c_p[1])
return d_x < WAS_MOVED_SCATTER and d_y < WAS_MOVED_SCATTER
scroll_counter = 0
def scroll(val):
global scroll_counter
scroll_counter += val
if scroll_counter > SCROLL_SENSITIVITY:
while scroll_counter > SCROLL_SENSITIVITY:
scroll_counter -= SCROLL_SENSITIVITY
m.click((4))
elif scroll_counter < -SCROLL_SENSITIVITY:
while scroll_counter < -SCROLL_SENSITIVITY:
scroll_counter += SCROLL_SENSITIVITY
m.click((5))
# Thread(target = os.system, args = (adb_cmd, )).start()
horizontal = False
press_down_time = None
press_up_time = None
count_click = 0
count_hold = 0
is_holded = False
is_scrolled = False
is_stop = False
adb_cmd = 'adb shell getevent -l'.split()
# adb_cmd = 'adb shell getevent -l > event.log'
def sing(val):
return -1 if val < 0 else 1
def execute(cmd):
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
for stdout_line in iter(popen.stdout.readline, ""):
yield stdout_line
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
# Example
# for path in execute(["locate", "a"]):
# print(path, end="")
# runs forever
# for line in tail("-f", "event.log", _iter=True):
for line in execute(adb_cmd):
if not is_stop and (
'ABS_MT_POSITION_X' in line and horizontal or
'ABS_MT_POSITION_Y' in line and not horizontal):
unpress_time = get_time_delta_in_microseconds(
press_down_time, datetime.now()
)
if unpress_time > SHORT_PRESS:
count_click = 0
if (count_click == 1 and wasnt_moved() and not is_holded):
is_holded = True
m.press()
val = int(line.split()[3], 16)
if (pre_x != None):
d_x = val - pre_x
pre_x = val
else:
pre_x = val
if not is_scrolled:
continue
if not cur_anchor_x:
cur_anchor_x = val
elif not is_scrolled:
d_x = pow(abs(d_x) * MOVE_SENSITIVITY, MOVE_SCALING) * sing(d_x)
m.move_dx(round(d_x))
elif not is_stop and (
'ABS_MT_POSITION_Y' in line and horizontal or
'ABS_MT_POSITION_X' in line and not horizontal):
unpress_time = get_time_delta_in_microseconds(
press_down_time, datetime.now()
)
if unpress_time > SHORT_PRESS:
count_click = 0
if (count_click == 0 and wasnt_moved() and not is_holded and press_down_time != None):
press_time = get_time_delta_in_microseconds(
press_down_time, datetime.now()
)
if press_time > LONG_PRESS:
is_holded = True
m.press()
val = int(line.split()[3], 16)
if (pre_y != None):
d_y = val - pre_y
pre_y = val
else:
pre_y = val
continue
if not cur_anchor_y:
cur_anchor_y = val
elif is_scrolled:
scroll(d_y)
else:
rev = -1 if not horizontal else 1
d_y = pow(abs(d_y) * MOVE_SENSITIVITY, MOVE_SCALING) * sing(d_y) * rev
m.move_dy(round(d_y))
pre_y = val
elif 'BTN_TOUCH' in line:
cur_start_pos = list(m.position())
val = line.split()[3]
if val == 'UP':
if wasnt_moved() and not is_scrolled:
press_time = get_time_delta_in_microseconds(
press_down_time, datetime.now()
)
if press_time < SHORT_PRESS:
count_click += 1
elif not is_stop and press_time < MEDIUM_PRESS:
m.click(2)
else:
count_click = 0
if not is_stop and count_click == 1:
m.click()
if not is_stop and count_click == 2:
m.click()
elif count_click == 5:
is_stop = not is_stop
else:
count_click = 0
if is_holded:
is_holded = False
m.release()
is_scrolled = False
press_up_time = datetime.now()
press_down_time = None
else:
unpress_time = get_time_delta_in_microseconds(
press_up_time, datetime.now()
)
if unpress_time > SHORT_PRESS:
count_click = 0
if not horizontal and pre_x > 1600:
is_scrolled = True
press_down_time = datetime.now()
press_up_time = None
# count_hold += 1
# m.press()
cur_anchor_x = None
cur_anchor_y = None
elif 'ABS_MT_PRESSURE' in line:
pass
# print(line, end = '')
# print(line)
|
nilq/baby-python
|
python
|
#Write a Python program to add leading zeroes to a string.
string = '5699'
print()
print(string.ljust(7, '0'))
|
nilq/baby-python
|
python
|
from django.apps import AppConfig
class DbSearchConfig(AppConfig):
name = 'db_search'
|
nilq/baby-python
|
python
|
import os
import shutil
import unittest
import pandas as pd
from python_tools.workflow_tools.qc.fingerprinting import (
read_csv,
plot_genotyping_matrix
)
class FingerprintingTestCase(unittest.TestCase):
def setUp(self):
"""
Set some constants used for testing
:return:
"""
# CD into this test module if running all tests together
if os.path.isdir('test__fingerprinting'):
os.chdir('test__fingerprinting')
# Set up test outputs directory
os.mkdir('./test_output')
def tearDown(self):
"""
Remove test outputs after each test
:return:
"""
shutil.rmtree('./test_output')
# Move back up to main test dir
os.chdir('..')
def test_plot_genotpying_matrix(self):
geno_compare = read_csv('./test_data/Geno_compare.txt')
title_file = pd.read_csv('./test_data/title_file.txt')
plot_genotyping_matrix(geno_compare, './test_output/', title_file)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
# SPDX-License-Identifier: Apache-2.0
# Copyright © 2020 VMware, Inc.
import os
import sys
import subprocess
import time
import shutil
import configparser
import pytest
import collections
import unittest
networkd_unit_file_path = '/etc/systemd/network'
network_config_manager_ci_path = '/run/network-config-manager-ci'
network_config_manager_ci_yaml_path = '/run/network-config-manager-ci/yaml'
network_config_manager_config_path = '/etc/network-config-manager'
network_config_manager_yaml_config_path = '/etc/network-config-manager/yaml'
network_config_manager_wpa_supplilant_conf_file = '/etc/network-config-manager/wpa_supplicant.conf'
units = ["10-test99.network",
"10-test98.network",
'10-test-99.network',
"10-wlan1.network",
"10-wlan0.network",
'10-test98.network',
'10-vlan-98.network',
'10-vlan-98.netdev',
'10-vlan-98.network',
'10-vxlan-98.network',
'10-vxlan-98.netdev',
'10-bridge-98.netdev',
'10-bridge-98.network',
'10-bond-98.netdev',
'10-bond-98.network'
'10-macvlan-98.netdev',
'10-macvlan-98.network'
'10-macvtap-98.netdev',
'10-macvtap-98.network'
'10-ipvlan-98.netdev',
'10-ipvtap-98.network',
'10-vrf-98.netdev',
'10-vrf-98.network',
'10-veth-98.netdev',
'10-veth-98.network'
'10-ipip-98.netdev',
'10-ipip-98.network'
'10-sit-98.netdev',
'10-sit-98.network'
'10-gre-98.netdev',
'10-gre-98.network'
'10-vti-98.netdev',
'10-vri-98.network'
'10-wg99.netdev',
'10-wg99.network']
def link_exits(link):
return os.path.exists(os.path.join('/sys/class/net', link))
def link_remove(link):
if os.path.exists(os.path.join('/sys/class/net', link)):
subprocess.call(['ip', 'link', 'del', 'dev', link])
def link_add_dummy(link):
subprocess.call(['ip', 'link', 'add', 'dev', link, 'type', 'dummy'])
def unit_exits(unit):
return os.path.exists(os.path.join(networkd_unit_file_path, unit))
def wifi_wpa_supplilant_conf_exits():
return os.path.exists(network_config_manager_wpa_supplilant_conf_file)
def remove_units_from_netword_unit_path():
for i in units:
if (os.path.exists(os.path.join(networkd_unit_file_path, i))):
os.remove(os.path.join(networkd_unit_file_path, i))
def restart_networkd():
subprocess.call(['systemctl', 'restart', 'systemd-networkd'])
subprocess.check_call(['sleep', '5'])
def dequote(s):
if len(s) < 2:
return v
s = s.replace('"', '')
return s
def read_wpa_supplicant_conf(conf_file):
networks = None
if not os.path.isfile(conf_file):
print("File path {} does not exist".format(conf_file))
return None
with open(conf_file) as fp:
for line in fp:
line = line.strip()
if not line or line.startswith('#'):
continue
if line.startswith('network'):
networks = collections.OrderedDict()
continue
if line.startswith('}'):
break
if (networks is None):
continue
x = line.split('=', 1)
k = x[0].strip()
v = dequote(x[1].strip())
networks[k] = v
return networks
class TestNetworkConfigManagerYAML:
yaml_configs = [
"dhcp.yaml",
"dhcp-client-identifier.yaml",
"network-section-dhcp-section.yaml",
"static-network.yaml",
"static-route-network.yaml",
]
def copy_yaml_file_to_netmanager_yaml_path(self, config_file):
shutil.copy(os.path.join(network_config_manager_ci_yaml_path, config_file), network_config_manager_yaml_config_path)
def remove_units_from_netmanager_yaml_path(self):
for config_file in self.yaml_configs:
if (os.path.exists(os.path.join(network_config_manager_yaml_config_path, config_file))):
os.remove(os.path.join(network_config_manager_yaml_config_path, config_file))
def setup_method(self):
link_remove('test99')
link_add_dummy('test99')
restart_networkd()
def teardown_method(self):
self.remove_units_from_netmanager_yaml_path()
remove_units_from_netword_unit_path()
def test_basic_dhcp(self):
self.copy_yaml_file_to_netmanager_yaml_path('dhcp.yaml')
subprocess.check_call(['nmctl', 'apply-yaml-config'])
assert(unit_exits('10-test99.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'DHCP') == 'yes')
def test_dhcp_client_identifier(self):
self.copy_yaml_file_to_netmanager_yaml_path('dhcp-client-identifier.yaml')
subprocess.check_call(['nmctl', 'apply-yaml-config'])
assert(unit_exits('10-test99.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'DHCP') == 'yes')
assert(parser.get('DHCPv4', 'ClientIdentifier') == 'mac')
def test_network_and_dhcp4_section(self):
self.copy_yaml_file_to_netmanager_yaml_path('network-section-dhcp-section.yaml')
subprocess.check_call(['nmctl', 'apply-yaml-config'])
assert(unit_exits('10-test99.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'DHCP') == 'yes')
assert(parser.get('Network', 'LLDP') == 'yes')
assert(parser.get('Network', 'LinkLocalAddressing') == 'yes')
assert(parser.get('Network', 'IPv6AcceptRA') == 'yes')
assert(parser.get('DHCPv4', 'UseDNS') == 'yes')
assert(parser.get('DHCPv4', 'UseDomains') == 'yes')
assert(parser.get('DHCPv4', 'UseMTU') == 'yes')
assert(parser.get('DHCPv4', 'UseNTP') == 'yes')
def test_network_and_dhcp6_section(self):
self.copy_yaml_file_to_netmanager_yaml_path('network-section-dhcp6-section.yaml')
subprocess.check_call(['nmctl', 'apply-yaml-config'])
assert(unit_exits('10-test99.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'DHCP') == 'yes')
assert(parser.get('Network', 'LinkLocalAddressing') == 'yes')
assert(parser.get('Network', 'IPv6AcceptRA') == 'yes')
assert(parser.get('DHCPv6', 'UseDNS') == 'yes')
assert(parser.get('DHCPv6', 'UseNTP') == 'yes')
@pytest.mark.skip(reason="skipping")
def test_network_static_configuration(self):
self.copy_yaml_file_to_netmanager_yaml_path('static-network.yaml')
subprocess.check_call(['nmctl', 'apply-yaml-config'])
assert(unit_exits('10-test99.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'DNS') == "8.8.8.8 192.168.0.1")
assert(parser.get('Network', 'NTP') == "8.8.8.1 192.168.0.2")
assert(parser.get('Address', 'Address') == '192.168.1.45/24')
assert(parser.get('Route', 'Gateway') == '192.168.1.1/24')
assert(parser.get('Route', 'GatewayOnlink') == 'yes')
@pytest.mark.skip(reason="skipping")
def test_network_static_route_configuration(self):
self.copy_yaml_file_to_netmanager_yaml_path('static-route-network.yaml')
subprocess.check_call(['nmctl', 'apply-yaml-config'])
assert(unit_exits('10-test99.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Address', 'Address') == '192.168.1.101/24')
assert(parser.get('Route', 'Gateway') == '9.0.0.1')
class TestKernelCommandLine:
def teardown_method(self):
remove_units_from_netword_unit_path()
@pytest.mark.skip(reason="skipping")
def test_network_kernel_command_line_ip_dhcp(self):
''' ip=<interface>:{dhcp|on|any|dhcp6|auto6} '''
subprocess.check_call(['nmctl', 'generate-config-from-cmdline', 'ip=test99:dhcp'])
assert(unit_exits('10-test99.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'DHCP') == 'ipv4')
@pytest.mark.skip(reason="skipping")
def test_network_kernel_command_line_multiple_ip_dhcp(self):
''' ip=<interface>:{dhcp|on|any|dhcp6|auto6} '''
subprocess.check_call(['nmctl', 'generate-config-from-cmdline', 'ip=test99:dhcp ip=test98:dhcp'])
assert(unit_exits('10-test99.network') == True)
assert(unit_exits('10-test98.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'DHCP') == 'ipv4')
parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(parser.get('Match', 'Name') == 'test98')
assert(parser.get('Network', 'DHCP') == 'ipv4')
@pytest.mark.skip(reason="skipping")
def test_network_kernel_command_line_ip_static(self):
''' ip=<client-IP>:[ <server-id>]:<gateway-IP>:<netmask>:<client_hostname>:<interface>:{none|off}'''
subprocess.check_call(['nmctl', 'generate-config-from-cmdline', 'ip=192.168.1.34::192.168.1.1:::test99:dhcp'])
assert(unit_exits('10-test99.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'DHCP') == 'ipv4')
assert(parser.get('Route', 'Gateway') == '192.168.1.1/32')
assert(parser.get('Address', 'Address') == '192.168.1.34')
class TestCLINetwork:
def setup_method(self):
link_remove('test99')
link_add_dummy('test99')
restart_networkd()
def teardown_method(self):
remove_units_from_netword_unit_path()
link_remove('test99')
def test_cli_set_mtu(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-mtu', 'test99', '1400'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Link', 'MTUBytes') == '1400')
def test_cli_set_mac(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-mac', 'test99', '00:0c:29:3a:bc:11'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Link', 'MACAddress') == '00:0c:29:3a:bc:11')
def test_cli_set_dhcp_type(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-dhcp-mode', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'DHCP') == 'yes')
def test_cli_set_dhcp_iaid(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-dhcp-mode', 'test99', 'ipv4'])
subprocess.check_call(['nmctl', 'set-dhcp-iaid', 'test99', '5555'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('DHCPv4', 'IAID') == '5555')
def test_cli_add_static_address(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'add-link-address', 'test99', 'address', '192.168.1.45/24', 'peer',
'192.168.1.46/24', 'dad', 'ipv4', 'scope', 'link', 'pref-lifetime', 'forever',
'prefix-route', 'yes', 'label', '3434'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Address', 'Address') == '192.168.1.45/24')
assert(parser.get('Address', 'Peer') == '192.168.1.46/24')
assert(parser.get('Address', 'Scope') == 'link')
assert(parser.get('Address', 'PreferredLifetime') == 'forever')
assert(parser.get('Address', 'AddPrefixRoute') == 'yes')
assert(parser.get('Address', 'DuplicateAddressDetection') == 'ipv4')
assert(parser.get('Address', 'Label') == '3434')
def test_cli_add_default_gateway(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'add-link-address', 'test99', 'address', '192.168.1.45/24', 'peer',
'192.168.1.46/24', 'dad', 'ipv4', 'scope', 'link', 'pref-lifetime', 'forever',
'prefix-route', 'yes', 'label', '3434'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Address', 'Address') == '192.168.1.45/24')
subprocess.check_call(['nmctl', 'add-default-gateway', 'test99', 'gw', '192.168.1.1', 'onlink', 'true'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Route', 'Gateway') == '192.168.1.1')
assert(parser.get('Route', 'GatewayOnLink') == 'yes')
def test_cli_add_route(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['nmctl', 'add-link-address', 'test99', 'address', '192.168.1.45/24', 'peer',
'192.168.1.46/24', 'dad', 'ipv4', 'scope', 'link', 'pref-lifetime', 'forever',
'prefix-route', 'yes', 'label', '3434'])
subprocess.check_call(['sleep', '5'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Address', 'Address') == '192.168.1.45/24')
subprocess.check_call(['nmctl', 'add-route', 'test99', 'gw', '192.168.1.1', 'dest', '192.168.1.2', 'metric', '111', 'scope',
'link', 'mtu', '1400', 'table', 'local', 'proto', 'static', 'type', 'unicast', 'onlink', 'yes', 'ipv6-pref',
'medium', 'src', '192.168.1.4'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Route', 'Destination') == '192.168.1.2')
assert(parser.get('Route', 'Gateway') == '192.168.1.1')
assert(parser.get('Route', 'GatewayOnLink') == 'yes')
assert(parser.get('Route', 'Metric') == '111')
assert(parser.get('Route', 'MTUBytes') == '1400')
assert(parser.get('Route', 'Protocol') == 'static')
assert(parser.get('Route', 'Scope') == 'link')
assert(parser.get('Route', 'Table') == 'local')
assert(parser.get('Route', 'IPv6Preference') == 'medium')
assert(parser.get('Route', 'Source') == '192.168.1.4')
def test_cli_add_routing_policy_rule(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'add-rule', 'test99', 'table', '10', 'to', '192.168.1.2/24', 'from', '192.168.1.3/24',
'oif', 'test99', 'iif', 'test99', 'tos','0x12'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('RoutingPolicyRule', 'Table') == '10')
assert(parser.get('RoutingPolicyRule', 'From') == '192.168.1.3/24')
assert(parser.get('RoutingPolicyRule', 'To') == '192.168.1.2/24')
assert(parser.get('RoutingPolicyRule', 'TypeOfService') == '0x12')
assert(parser.get('RoutingPolicyRule', 'OutgoingInterface') == 'test99')
assert(parser.get('RoutingPolicyRule', 'IncomingInterface') == 'test99')
def test_cli_add_dns(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '30'])
subprocess.check_call(['nmctl', 'add-dns', 'test99', '192.168.1.45', '192.168.1.46'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
def test_cli_add_domain(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'add-domain', 'test99', 'domain1', 'domain2'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'Domains') == 'domain2 domain1')
def test_cli_add_ntp(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'add-ntp', 'test99', '192.168.1.34', '192.168.1.45'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'NTP') == '192.168.1.45 192.168.1.34')
def test_cli_set_ntp(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-ntp', 'test99', '192.168.1.34', '192.168.1.45'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'NTP') == '192.168.1.45 192.168.1.34')
def test_cli_set_ip_v6_router_advertisement(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-ipv6acceptra', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'IPv6AcceptRA') == 'true')
def test_cli_set_link_local_addressing(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['nmctl', 'set-link-local-address', 'test99', 'yes'])
subprocess.check_call(['sleep', '5'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'LinkLocalAddressing') == 'true')
def test_cli_set_ipv4_link_local_route(self):
assert(link_exits('test99') == True);
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-ipv4ll-route', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'IPv4LLRoute') == 'true')
def test_cli_set_llmnr(self):
assert(link_exits('test99') == True);
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['nmctl', 'set-llmnr', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'LLMNR') == 'true')
def test_cli_set_multicast_dns(self):
assert(link_exits('test99') == True);
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-multicast-dns', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'MulticastDNS') == 'true')
def test_cli_set_ip_masquerade(self):
assert(link_exits('test99') == True);
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-ipmasquerade', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'IPMasquerade') == 'true')
def test_cli_set_dhcp4_client_identifier(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-dhcp4-client-identifier', 'test99', 'mac'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('DHCPv4', 'ClientIdentifier') == 'mac')
def test_cli_set_dhcp4_use_dns(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-dhcp4-use-dns', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('DHCPv4', 'UseDNS') == 'true')
def test_cli_set_dhcp4_use_mtu(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-dhcp4-use-mtu', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
def test_cli_set_dhcp4_use_domains(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-dhcp4-use-domains', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('DHCPv4', 'UseDomains') == 'true')
def test_cli_set_dhcp4_use_ntp(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-dhcp4-use-ntp', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('DHCPv4', 'UseNTP') == 'true')
def test_cli_set_dhcp4_use_routes(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['nmctl', 'set-dhcp4-use-routes', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('DHCPv4', 'UseRoutes') == 'true')
def test_cli_set_link_lldp(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-lldp', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'LLDP') == 'true')
def test_cli_set_link_emit_lldp(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-emit-lldp', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'EmitLLDP') == 'true')
class TestCLIDHCPv4Server:
def setup_method(self):
link_remove('test99')
link_add_dummy('test99')
restart_networkd()
def teardown_method(self):
remove_units_from_netword_unit_path()
link_remove('test99')
def test_cli_configure_dhcpv4_server(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['nmctl', 'add-dhcpv4-server', 'test99', 'pool-offset',
'10', 'pool-size', '20', 'default-lease-time', '100',
'max-lease-time', '200', 'emit-dns', 'yes', 'dns', '192.168.1.1',
'emit-router', 'yes'])
subprocess.check_call(['sleep', '3'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'DHCPServer') == 'yes')
assert(parser.get('DHCPServer', 'PoolOffset') == '10')
assert(parser.get('DHCPServer', 'PoolSize') == '20')
assert(parser.get('DHCPServer', 'DefaultLeaseTimeSec') == '100')
assert(parser.get('DHCPServer', 'MaxLeaseTimeSec') == '200')
assert(parser.get('DHCPServer', 'EmitDNS') == 'yes')
assert(parser.get('DHCPServer', 'DNS') == '192.168.1.1')
assert(parser.get('DHCPServer', 'EmitRouter') == 'yes')
class TestCLIIPv6RA:
def setup_method(self):
link_remove('test99')
link_add_dummy('test99')
restart_networkd()
def teardown_method(self):
remove_units_from_netword_unit_path()
link_remove('test99')
def test_cli_configure_ipv6ra(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['nmctl', 'add-ipv6ra', 'test99', 'prefix', '2002:da8:1:0::/64',
'pref-lifetime', '100', 'valid-lifetime', '200', 'assign', 'yes',
'managed', 'yes', 'emit-dns', 'yes', 'dns', '2002:da8:1:0::1',
'domain', 'test.com', 'emit-domain', 'yes', 'dns-lifetime', '100', 'router-pref', 'medium',
'route-prefix', '2001:db1:fff::/64', 'route-lifetime', '1000'])
subprocess.check_call(['sleep', '3'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'IPv6SendRA') == 'yes')
assert(parser.get('IPv6Prefix', 'Prefix') == '2002:da8:1::/64')
assert(parser.get('IPv6Prefix', 'PreferredLifetimeSec') == '100')
assert(parser.get('IPv6Prefix', 'ValidLifetimeSec') == '200')
assert(parser.get('IPv6SendRA', 'RouterPreference') == 'medium')
assert(parser.get('IPv6SendRA', 'DNS') == '2002:da8:1::1')
assert(parser.get('IPv6SendRA', 'EmitDNS') == 'yes')
assert(parser.get('IPv6SendRA', 'Assign') == 'yes')
assert(parser.get('IPv6SendRA', 'DNSLifetimeSec') == '100')
assert(parser.get('IPv6SendRA', 'Domains') == 'test.com')
assert(parser.get('IPv6RoutePrefix', 'LifetimeSec') == '1000')
assert(parser.get('IPv6RoutePrefix', 'Route') == '2001:db1:fff::/64')
class TestCLINetDev:
def setup_method(self):
link_remove('test98')
link_add_dummy('test98')
restart_networkd()
def teardown_method(self):
remove_units_from_netword_unit_path()
link_remove('test98')
def test_cli_create_vlan(self):
assert(link_exits('test98') == True)
subprocess.check_call(['nmctl', 'create-vlan', 'vlan-98', 'dev', 'test98', 'id', '11'])
assert(unit_exits('10-test98.network') == True)
assert(unit_exits('10-vlan-98.netdev') == True)
assert(unit_exits('10-vlan-98.network') == True)
restart_networkd()
subprocess.check_call(['sleep', '15'])
assert(link_exits('vlan-98') == True)
vlan_parser = configparser.ConfigParser()
vlan_parser.read(os.path.join(networkd_unit_file_path, '10-vlan-98.netdev'))
assert(vlan_parser.get('NetDev', 'Name') == 'vlan-98')
assert(vlan_parser.get('NetDev', 'kind') == 'vlan')
assert(vlan_parser.get('VLAN', 'id') == '11')
vlan_network_parser = configparser.ConfigParser()
vlan_network_parser.read(os.path.join(networkd_unit_file_path, '10-vlan-98.network'))
assert(vlan_network_parser.get('Match', 'Name') == 'vlan-98')
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(parser.get('Match', 'Name') == 'test98')
assert(parser.get('Network', 'VLAN') == 'vlan-98')
link_remove('vlan-98')
def test_cli_create_macvlan(self):
assert(link_exits('test98') == True)
subprocess.check_call(['nmctl', 'create-macvlan', 'macvlan-98', 'dev', 'test98', 'mode', 'private'])
assert(unit_exits('10-macvlan-98.netdev') == True)
assert(unit_exits('10-macvlan-98.network') == True)
assert(unit_exits('10-test98.network') == True)
restart_networkd()
subprocess.check_call(['sleep', '3'])
assert(link_exits('macvlan-98') == True)
macvlan_parser = configparser.ConfigParser()
macvlan_parser.read(os.path.join(networkd_unit_file_path, '10-macvlan-98.netdev'))
assert(macvlan_parser.get('NetDev', 'Name') == 'macvlan-98')
assert(macvlan_parser.get('NetDev', 'kind') == 'macvlan')
assert(macvlan_parser.get('MACVLAN', 'Mode') == 'private')
macvlan_network_parser = configparser.ConfigParser()
macvlan_network_parser.read(os.path.join(networkd_unit_file_path, '10-macvlan-98.network'))
assert(macvlan_network_parser.get('Match', 'Name') == 'macvlan-98')
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(parser.get('Match', 'Name') == 'test98')
assert(parser.get('Network', 'MACVLAN') == 'macvlan-98')
link_remove('macvlan-98')
def test_cli_create_macvtap(self):
assert(link_exits('test98') == True)
subprocess.check_call(['nmctl', 'create-macvtap', 'macvtap-98', 'dev', 'test98', 'mode', 'private'])
assert(unit_exits('10-macvtap-98.netdev') == True)
assert(unit_exits('10-macvtap-98.network') == True)
assert(unit_exits('10-test98.network') == True)
restart_networkd()
subprocess.check_call(['sleep', '3'])
assert(link_exits('macvtap-98') == True)
macvlan_parser = configparser.ConfigParser()
macvlan_parser.read(os.path.join(networkd_unit_file_path, '10-macvtap-98.netdev'))
assert(macvlan_parser.get('NetDev', 'Name') == 'macvtap-98')
assert(macvlan_parser.get('NetDev', 'kind') == 'macvtap')
assert(macvlan_parser.get('MACVTAP', 'Mode') == 'private')
macvlan_network_parser = configparser.ConfigParser()
macvlan_network_parser.read(os.path.join(networkd_unit_file_path, '10-macvtap-98.network'))
assert(macvlan_network_parser.get('Match', 'Name') == 'macvtap-98')
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(parser.get('Match', 'Name') == 'test98')
assert(parser.get('Network', 'MACVTAP') == 'macvtap-98')
link_remove('macvtap-98')
def test_cli_create_ipvlan(self):
assert(link_exits('test98') == True)
subprocess.check_call(['nmctl', 'create-ipvlan', 'ipvlan-98', 'dev', 'test98', 'mode', 'l2'])
assert(unit_exits('10-ipvlan-98.netdev') == True)
assert(unit_exits('10-ipvlan-98.network') == True)
assert(unit_exits('10-test98.network') == True)
restart_networkd()
subprocess.check_call(['sleep', '3'])
assert(link_exits('ipvlan-98') == True)
ipvlan_parser = configparser.ConfigParser()
ipvlan_parser.read(os.path.join(networkd_unit_file_path, '10-ipvlan-98.netdev'))
assert(ipvlan_parser.get('NetDev', 'Name') == 'ipvlan-98')
assert(ipvlan_parser.get('NetDev', 'kind') == 'ipvlan')
assert(ipvlan_parser.get('IPVLAN', 'Mode') == 'L2')
ipvlan_network_parser = configparser.ConfigParser()
ipvlan_network_parser.read(os.path.join(networkd_unit_file_path, '10-ipvlan-98.network'))
assert(ipvlan_network_parser.get('Match', 'Name') == 'ipvlan-98')
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(parser.get('Match', 'Name') == 'test98')
assert(parser.get('Network', 'IPVLAN') == 'ipvlan-98')
link_remove('ipvlan-98')
def test_cli_create_ipvtap(self):
assert(link_exits('test98') == True)
subprocess.check_call(['nmctl', 'create-ipvtap', 'ipvtap-98', 'dev', 'test98', 'mode', 'l2'])
assert(unit_exits('10-ipvtap-98.netdev') == True)
assert(unit_exits('10-ipvtap-98.network') == True)
assert(unit_exits('10-test98.network') == True)
restart_networkd()
subprocess.check_call(['sleep', '3'])
assert(link_exits('ipvtap-98') == True)
ipvtap_parser = configparser.ConfigParser()
ipvtap_parser.read(os.path.join(networkd_unit_file_path, '10-ipvtap-98.netdev'))
assert(ipvtap_parser.get('NetDev', 'Name') == 'ipvtap-98')
assert(ipvtap_parser.get('NetDev', 'kind') == 'ipvtap')
assert(ipvtap_parser.get('IPVTAP', 'Mode') == 'L2')
ipvtap_network_parser = configparser.ConfigParser()
ipvtap_network_parser.read(os.path.join(networkd_unit_file_path, '10-ipvtap-98.network'))
assert(ipvtap_network_parser.get('Match', 'Name') == 'ipvtap-98')
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(parser.get('Match', 'Name') == 'test98')
assert(parser.get('Network', 'IPVTAP') == 'ipvtap-98')
link_remove('ipvtap-98')
@pytest.mark.skip(reason="skipping")
def test_cli_create_vrf(self):
subprocess.check_call(['nmctl', 'create-vrf', 'vrf-98', 'table', '11'])
assert(unit_exits('10-vrf-98.netdev') == True)
assert(unit_exits('10-vrf-98.network') == True)
restart_networkd()
subprocess.check_call(['sleep', '3'])
assert(link_exits('vrf-98') == True)
vrf_parser = configparser.ConfigParser()
vrf_parser.read(os.path.join(networkd_unit_file_path, '10-vrf-98.netdev'))
assert(vrf_parser.get('NetDev', 'Name') == 'vrf-98')
assert(vrf_parser.get('NetDev', 'kind') == 'vrf')
assert(vrf_parser.get('VRF', 'Table') == '11')
vrf_network_parser = configparser.ConfigParser()
vrf_network_parser.read(os.path.join(networkd_unit_file_path, '10-vrf-98.network'))
assert(vrf_network_parser.get('Match', 'Name') == 'vrf-98')
link_remove('vrf-98')
def test_cli_create_veth(self):
subprocess.check_call(['nmctl', 'create-veth', 'veth-98', 'peer', 'veth-99'])
assert(unit_exits('10-veth-98.netdev') == True)
assert(unit_exits('10-veth-98.network') == True)
restart_networkd()
subprocess.check_call(['sleep', '3'])
assert(link_exits('veth-98') == True)
assert(link_exits('veth-99') == True)
vrf_parser = configparser.ConfigParser()
vrf_parser.read(os.path.join(networkd_unit_file_path, '10-veth-98.netdev'))
assert(vrf_parser.get('NetDev', 'Name') == 'veth-98')
assert(vrf_parser.get('NetDev', 'kind') == 'veth')
assert(vrf_parser.get('Peer', 'Name') == 'veth-99')
vrf_network_parser = configparser.ConfigParser()
vrf_network_parser.read(os.path.join(networkd_unit_file_path, '10-veth-98.network'))
assert(vrf_network_parser.get('Match', 'Name') == 'veth-98')
link_remove('veth-98')
def test_cli_create_ipip(self):
assert(link_exits('test98') == True)
subprocess.check_call(['nmctl', 'create-ipip', 'ipip-98', 'dev', 'test98', 'local', '192.168.1.2', 'remote', '192.168.1.3'])
assert(unit_exits('10-ipip-98.netdev') == True)
assert(unit_exits('10-ipip-98.network') == True)
assert(unit_exits('10-test98.network') == True)
restart_networkd()
subprocess.check_call(['sleep', '3'])
assert(link_exits('ipip-98') == True)
ipip_parser = configparser.ConfigParser()
ipip_parser.read(os.path.join(networkd_unit_file_path, '10-ipip-98.netdev'))
assert(ipip_parser.get('NetDev', 'Name') == 'ipip-98')
assert(ipip_parser.get('NetDev', 'kind') == 'ipip')
assert(ipip_parser.get('Tunnel', 'Local') == '192.168.1.2')
assert(ipip_parser.get('Tunnel', 'Remote') == '192.168.1.3')
ipip_network_parser = configparser.ConfigParser()
ipip_network_parser.read(os.path.join(networkd_unit_file_path, '10-ipip-98.network'))
assert(ipip_network_parser.get('Match', 'Name') == 'ipip-98')
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(parser.get('Match', 'Name') == 'test98')
assert(parser.get('Network', 'Tunnel') == 'ipip-98')
link_remove('ipip-98')
def test_cli_create_gre(self):
assert(link_exits('test98') == True)
subprocess.check_call(['nmctl', 'create-gre', 'gre-98', 'dev', 'test98', 'local', '192.168.1.2', 'remote', '192.168.1.3'])
assert(unit_exits('10-gre-98.netdev') == True)
assert(unit_exits('10-gre-98.network') == True)
assert(unit_exits('10-test98.network') == True)
restart_networkd()
subprocess.check_call(['sleep', '3'])
assert(link_exits('gre-98') == True)
gre_parser = configparser.ConfigParser()
gre_parser.read(os.path.join(networkd_unit_file_path, '10-gre-98.netdev'))
assert(gre_parser.get('NetDev', 'Name') == 'gre-98')
assert(gre_parser.get('NetDev', 'kind') == 'gre')
assert(gre_parser.get('Tunnel', 'Local') == '192.168.1.2')
assert(gre_parser.get('Tunnel', 'Remote') == '192.168.1.3')
gre_network_parser = configparser.ConfigParser()
gre_network_parser.read(os.path.join(networkd_unit_file_path, '10-gre-98.network'))
assert(gre_network_parser.get('Match', 'Name') == 'gre-98')
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(parser.get('Match', 'Name') == 'test98')
assert(parser.get('Network', 'Tunnel') == 'gre-98')
link_remove('gre-98')
def test_cli_create_gre(self):
assert(link_exits('test98') == True)
subprocess.check_call(['nmctl', 'create-gre', 'gre-98', 'dev', 'test98', 'local', '192.168.1.2', 'remote', '192.168.1.3'])
assert(unit_exits('10-gre-98.netdev') == True)
assert(unit_exits('10-gre-98.network') == True)
assert(unit_exits('10-test98.network') == True)
restart_networkd()
subprocess.check_call(['sleep', '3'])
assert(link_exits('gre-98') == True)
gre_parser = configparser.ConfigParser()
gre_parser.read(os.path.join(networkd_unit_file_path, '10-gre-98.netdev'))
assert(gre_parser.get('NetDev', 'Name') == 'gre-98')
assert(gre_parser.get('NetDev', 'kind') == 'gre')
assert(gre_parser.get('Tunnel', 'Local') == '192.168.1.2')
assert(gre_parser.get('Tunnel', 'Remote') == '192.168.1.3')
gre_network_parser = configparser.ConfigParser()
gre_network_parser.read(os.path.join(networkd_unit_file_path, '10-gre-98.network'))
assert(gre_network_parser.get('Match', 'Name') == 'gre-98')
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(parser.get('Match', 'Name') == 'test98')
assert(parser.get('Network', 'Tunnel') == 'gre-98')
link_remove('gre-98')
def test_cli_create_vti(self):
assert(link_exits('test98') == True)
subprocess.check_call(['nmctl', 'create-vti', 'vti-98', 'dev', 'test98', 'local', '192.168.1.2', 'remote', '192.168.1.3'])
assert(unit_exits('10-vti-98.netdev') == True)
assert(unit_exits('10-vti-98.network') == True)
assert(unit_exits('10-test98.network') == True)
restart_networkd()
subprocess.check_call(['sleep', '3'])
assert(link_exits('vti-98') == True)
vti_parser = configparser.ConfigParser()
vti_parser.read(os.path.join(networkd_unit_file_path, '10-vti-98.netdev'))
assert(vti_parser.get('NetDev', 'Name') == 'vti-98')
assert(vti_parser.get('NetDev', 'kind') == 'vti')
assert(vti_parser.get('Tunnel', 'Local') == '192.168.1.2')
assert(vti_parser.get('Tunnel', 'Remote') == '192.168.1.3')
vti_network_parser = configparser.ConfigParser()
vti_network_parser.read(os.path.join(networkd_unit_file_path, '10-vti-98.network'))
assert(vti_network_parser.get('Match', 'Name') == 'vti-98')
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(parser.get('Match', 'Name') == 'test98')
assert(parser.get('Network', 'Tunnel') == 'vti-98')
link_remove('vti-98')
@pytest.mark.skip(reason="skipping")
def test_cli_create_wireguard(self):
subprocess.check_call(['nmctl', 'create-wg', 'wg99', 'private-key', 'EEGlnEPYJV//kbvvIqxKkQwOiS+UENyPncC4bF46ong=', 'listen-port', '32', 'public-key', 'RDf+LSpeEre7YEIKaxg+wbpsNV7du+ktR99uBEtIiCA', 'endpoint', '192.168.3.56:2000', 'allowed-ips', '192.168.1.2'])
assert(unit_exits('10-wg99.netdev') == True)
assert(unit_exits('10-wg99.network') == True)
restart_networkd()
subprocess.check_call(['sleep', '15'])
assert(link_exits('wg99') == True)
wg_parser = configparser.ConfigParser()
wg_parser.read(os.path.join(networkd_unit_file_path, '10-wg99.netdev'))
assert(wg_parser.get('NetDev', 'Name') == 'wg99')
assert(wg_parser.get('NetDev', 'kind') == 'wireguard')
assert(wg_parser.get('WireGuard', 'PrivateKey') == 'EEGlnEPYJV//kbvvIqxKkQwOiS+UENyPncC4bF46ong=')
assert(wg_parser.get('WireGuard', 'ListenPort') == '32')
assert(wg_parser.get('WireGuardPeer', 'PublicKey') == 'RDf+LSpeEre7YEIKaxg+wbpsNV7du+ktR99uBEtIiCA')
assert(wg_parser.get('WireGuardPeer', 'Endpoint') == '192.168.3.56:2000')
assert(wg_parser.get('WireGuardPeer', 'AllowedIPs') == '192.168.1.2')
network_parser = configparser.ConfigParser()
network_parser.read(os.path.join(networkd_unit_file_path, '10-wg99.network'))
assert(network_parser.get('Match', 'Name') == 'wg99')
link_remove('wg99')
def test_cli_create_vxlan(self):
assert(link_exits('test98') == True)
subprocess.check_call(['nmctl', 'create-vxlan', 'vxlan-98', 'dev', 'test98', 'vni', '32', 'local', '192.168.1.2', 'remote', '192.168.1.3', 'port', '7777'])
assert(unit_exits('10-test98.network') == True)
assert(unit_exits('10-vxlan-98.network') == True)
assert(unit_exits('10-vxlan-98.netdev') == True)
restart_networkd()
subprocess.check_call(['sleep', '15'])
assert(link_exits('vxlan-98') == True)
vxlan_parser = configparser.ConfigParser()
vxlan_parser.read(os.path.join(networkd_unit_file_path, '10-vxlan-98.netdev'))
assert(vxlan_parser.get('NetDev', 'Name') == 'vxlan-98')
assert(vxlan_parser.get('NetDev', 'kind') == 'vxlan')
assert(vxlan_parser.get('VXLAN', 'VNI') == '32')
assert(vxlan_parser.get('VXLAN', 'Local') == '192.168.1.2')
assert(vxlan_parser.get('VXLAN', 'Remote') == '192.168.1.3')
assert(vxlan_parser.get('VXLAN', 'DestinationPort') == '7777')
vxlan_network_parser = configparser.ConfigParser()
vxlan_network_parser.read(os.path.join(networkd_unit_file_path, '10-vxlan-98.network'))
assert(vxlan_network_parser.get('Match', 'Name') == 'vxlan-98')
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(parser.get('Match', 'Name') == 'test98')
assert(parser.get('Network', 'VXLAN') == 'vxlan-98')
link_remove('vxlan-98')
def test_cli_create_bridge(self):
link_add_dummy('test-99')
assert(link_exits('test98') == True)
assert(link_exits('test-99') == True)
subprocess.check_call(['nmctl', 'create-bridge', 'bridge-98', 'test98', 'test-99'])
assert(unit_exits('10-test98.network') == True)
assert(unit_exits('10-test-99.network') == True)
assert(unit_exits('10-bridge-98.network') == True)
assert(unit_exits('10-bridge-98.netdev') == True)
subprocess.check_call(['sleep', '3'])
assert(link_exits('bridge-98') == True)
bridge_parser = configparser.ConfigParser()
bridge_parser.read(os.path.join(networkd_unit_file_path, '10-bridge-98.netdev'))
assert(bridge_parser.get('NetDev', 'Name') == 'bridge-98')
assert(bridge_parser.get('NetDev', 'kind') == 'bridge')
bridge_network_parser = configparser.ConfigParser()
bridge_network_parser.read(os.path.join(networkd_unit_file_path, '10-bridge-98.network'))
assert(bridge_network_parser.get('Match', 'Name') == 'bridge-98')
test98_parser = configparser.ConfigParser()
test98_parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(test98_parser.get('Match', 'Name') == 'test98')
assert(test98_parser.get('Network', 'Bridge') == 'bridge-98')
test99_parser = configparser.ConfigParser()
test99_parser.read(os.path.join(networkd_unit_file_path, '10-test-99.network'))
assert(test99_parser.get('Match', 'Name') == 'test-99')
assert(test99_parser.get('Network', 'Bridge') == 'bridge-98')
link_remove('bridge-98')
link_remove('test-99')
def test_cli_create_bond(self):
link_add_dummy('test-99')
assert(link_exits('test98') == True)
assert(link_exits('test-99') == True)
subprocess.check_call(['nmctl', 'create-bond', 'bond-98', 'mode', 'balance-rr', 'test98', 'test-99'])
assert(unit_exits('10-test98.network') == True)
assert(unit_exits('10-test-99.network') == True)
assert(unit_exits('10-bond-98.network') == True)
assert(unit_exits('10-bond-98.netdev') == True)
subprocess.check_call(['sleep', '3'])
assert(link_exits('bond-98') == True)
bond_parser = configparser.ConfigParser()
bond_parser.read(os.path.join(networkd_unit_file_path, '10-bond-98.netdev'))
assert(bond_parser.get('NetDev', 'Name') == 'bond-98')
assert(bond_parser.get('NetDev', 'kind') == 'bond')
assert(bond_parser.get('Bond', 'Mode') == 'balance-rr')
bond_network_parser = configparser.ConfigParser()
bond_network_parser.read(os.path.join(networkd_unit_file_path, '10-bond-98.network'))
assert(bond_network_parser.get('Match', 'Name') == 'bond-98')
test98_parser = configparser.ConfigParser()
test98_parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(test98_parser.get('Match', 'Name') == 'test98')
assert(test98_parser.get('Network', 'Bond') == 'bond-98')
test99_parser = configparser.ConfigParser()
test99_parser.read(os.path.join(networkd_unit_file_path, '10-test-99.network'))
assert(test99_parser.get('Match', 'Name') == 'test-99')
assert(test99_parser.get('Network', 'Bond') == 'bond-98')
link_remove('bond-98')
link_remove('test-99')
class TestCLIGlobalDNSDomain:
def test_cli_configure_global_dns_server(self):
subprocess.check_call(['nmctl', 'add-dns', 'global', '8.8.4.4', '8.8.8.8', '8.8.8.1', '8.8.8.2'])
subprocess.check_call(['sleep', '3'])
parser = configparser.ConfigParser()
parser.read('/etc/systemd/resolved.conf')
assert(parser.get('Resolve', 'DNS') == '8.8.4.4 8.8.8.1 8.8.8.2 8.8.8.8')
def test_cli_configure_global_domain_server(self):
subprocess.check_call(['nmctl', 'add-domain', 'global', 'test1', 'test2'])
subprocess.check_call(['sleep', '3'])
parser = configparser.ConfigParser()
parser.read('/etc/systemd/resolved.conf')
assert(parser.get('Resolve', 'Domains') == 'test1 test2')
class TestCLINetworkProxy:
def test_cli_configure_network_proxy(self):
if not os.path.exists("/etc/sysconfig/"):
os.mkdir("/etc/sysconfig/")
f = open("/etc/sysconfig/proxy", "w")
f.write("PROXY_ENABLED=\"no\"\nHTTP_PROXY=""\nHTTPS_PROXY=""\nNO_PROXY=\"localhost, 127.0.0.1\"\n")
f.close()
subprocess.check_call(['nmctl', 'set-proxy', 'enable', 'yes', 'http', 'http://test.com:123', 'https', 'https://test.com:123'])
dictionary = {}
file = open("/etc/sysconfig/proxy")
lines = file.read().split('\n')
for line in lines:
if line == '':
continue
pair = line.split('=')
dictionary[pair[0].strip('\'\'\"\"')] = pair[1].strip('\'\'\"\"')
assert(dictionary["HTTP_PROXY"] == "http://test.com:123")
assert(dictionary["HTTPS_PROXY"] == "https://test.com:123")
assert(dictionary["PROXY_ENABLED"] == "yes")
subprocess.check_call(['nmctl', 'set-proxy', 'enable', 'yes', 'http', 'http://test.com:123', 'ftp', 'https://test.com123'])
class TestWifiWPASupplicantConf:
yaml_configs = [
"name-password-wifi-dhcp.yaml",
"name-password-wifi-static.yaml",
"wpa-eap-tls-wifi.yaml",
"wpa-eap-ttls.yaml",
]
def copy_yaml_file_to_netmanager_yaml_path(self, config_file):
shutil.copy(os.path.join(network_config_manager_ci_yaml_path, config_file), network_config_manager_yaml_config_path)
def remove_units_from_netmanager_yaml_path(self):
for config_file in self.yaml_configs:
if (os.path.exists(os.path.join(network_config_manager_yaml_config_path, config_file))):
os.remove(os.path.join(network_config_manager_yaml_config_path, config_file))
def teardown_method(self):
remove_units_from_netword_unit_path()
self.remove_units_from_netmanager_yaml_path()
def test_wifi_wpa_supplicant_name_password_dhcp(self):
self.copy_yaml_file_to_netmanager_yaml_path('name-password-wifi-dhcp.yaml')
subprocess.check_call(['nmctl', 'apply-yaml-config'])
assert(unit_exits('10-wlan1.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-wlan1.network'))
assert(parser.get('Match', 'Name') == 'wlan1')
assert(parser.get('Network', 'DHCP') == 'yes')
assert(wifi_wpa_supplilant_conf_exits() == True)
network = read_wpa_supplicant_conf(network_config_manager_wpa_supplilant_conf_file)
assert(network["ssid"] == "network_ssid_name1")
assert(network["password"] == "test123")
def test_wifi_wpa_supplicant_name_password_static(self):
self.copy_yaml_file_to_netmanager_yaml_path('name-password-wifi-static.yaml')
subprocess.check_call(['nmctl', 'apply-yaml-config'])
assert(unit_exits('10-wlan1.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-wlan1.network'))
assert(parser.get('Match', 'Name') == 'wlan1')
assert(parser.get('Route', 'Gateway') == '192.168.1.1/24')
assert(parser.get('Route', 'GatewayOnlink') == 'yes')
assert(wifi_wpa_supplilant_conf_exits() == True)
network = read_wpa_supplicant_conf(network_config_manager_wpa_supplilant_conf_file)
if network is None:
assert(False)
assert(network["ssid"] == "network_ssid_name1")
assert(network["password"] == "test123")
@pytest.mark.skip(reason="skipping")
def test_wifi_wpa_supplicant_eap_tls_dhcp(self):
self.copy_yaml_file_to_netmanager_yaml_path('wpa-eap-tls-wifi.yaml')
subprocess.check_call(['nmctl', 'apply-yaml-config'])
assert(unit_exits('10-wlan1.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-wlan1.network'))
assert(parser.get('Match', 'Name') == 'wlan1')
assert(parser.get('Network', 'DHCP') == 'yes')
assert(wifi_wpa_supplilant_conf_exits() == True)
network = read_wpa_supplicant_conf(network_config_manager_wpa_supplilant_conf_file)
if network is None:
assert(False)
assert(network["ssid"] == "network_ssid_name1")
assert(network["eap"] == "PEAP")
assert(network["identity"] == "cert-max@test.example.com")
assert(network["anonymous_identity"] == "@test.example.com")
assert(network["ca_cert"] == "/etc/ssl/cust-cacrt.pem")
assert(network["client_cert"] == "/etc/ssl/cust-crt.pem")
assert(network["private_key"] == "/etc/ssl/cust-key.pem")
assert(network["private_key_passwd"] == "QZTrSEtq:h_d.W7_")
def test_wifi_wpa_supplicant_eap_ttls_dhcp(self):
self.copy_yaml_file_to_netmanager_yaml_path('wpa-eap-ttls.yaml')
subprocess.check_call(['nmctl', 'apply-yaml-config'])
assert(unit_exits('10-wlan0.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-wlan0.network'))
assert(parser.get('Match', 'Name') == 'wlan0')
assert(parser.get('Network', 'DHCP') == 'yes')
assert(wifi_wpa_supplilant_conf_exits() == True)
network = read_wpa_supplicant_conf(network_config_manager_wpa_supplilant_conf_file)
if network is None:
assert(False)
assert(network["ssid"] == "network_ssid_name1")
assert(network["identity"] == "max@internal.example.com")
assert(network["anonymous_identity"] == "@test.example.com")
assert(network["password"] == "test123")
class TestNFTable(unittest.TestCase):
def tearDown(self):
subprocess.call(['nft', 'delete', 'table', 'testtable99'])
def test_nmctl_add_table(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
output = subprocess.check_output(['nft', 'list', 'tables'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'table ip testtable99')
def test_nmctl_show_table(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
output = subprocess.check_output(['nmctl', 'show-nft-tables'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
def test_nmctl_delete_table(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
output = subprocess.check_output(['nft', 'list', 'tables'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'table ip testtable99')
subprocess.check_call(['nmctl', 'delete-nft-table', 'ipv4', 'testtable99'])
output = subprocess.check_output(['nft', 'list', 'tables'], universal_newlines=True).rstrip()
print(output)
self.assertNotRegex(output, 'table ip testtable99')
def test_nmctl_add_chain(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
subprocess.check_call(['nmctl', 'add-nft-chain', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nft', 'list', 'table', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
self.assertRegex(output, 'testchain99')
def test_nmctl_show_chain(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
subprocess.check_call(['nmctl', 'add-nft-chain', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nmctl', 'show-nft-chains', 'ipv4', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
self.assertRegex(output, 'testchain99')
def test_nmctl_delete_chain(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
subprocess.check_call(['nmctl', 'add-nft-chain', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nmctl', 'show-nft-chains', 'ipv4', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
self.assertRegex(output, 'testchain99')
subprocess.check_call(['nmctl', 'delete-nft-chain', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nft', 'list', 'table', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
self.assertNotRegex(output, 'testchain99')
def test_nmctl_add_rule_tcp_accept(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
subprocess.check_call(['nmctl', 'add-nft-chain', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nmctl', 'show-nft-chains', 'ipv4', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
self.assertRegex(output, 'testchain99')
subprocess.check_call(['nmctl', 'add-nft-rule', 'ipv4', 'testtable99', 'testchain99', 'tcp', 'dport', '9999', 'accept'])
output = subprocess.check_output(['nft', 'list', 'table', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'tcp dport 9999 counter packets 0 bytes 0 accept')
def test_nmctl_add_rule_tcp_drop(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
subprocess.check_call(['nmctl', 'add-nft-chain', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nmctl', 'show-nft-chains', 'ipv4', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
self.assertRegex(output, 'testchain99')
subprocess.check_call(['nmctl', 'add-nft-rule', 'ipv4', 'testtable99', 'testchain99', 'tcp', 'dport', '9999', 'drop'])
output = subprocess.check_output(['nft', 'list', 'table', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'tcp dport 9999 counter packets 0 bytes 0 drop')
def test_nmctl_add_rule_tcp_drop_sport(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
subprocess.check_call(['nmctl', 'add-nft-chain', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nmctl', 'show-nft-chains', 'ipv4', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
self.assertRegex(output, 'testchain99')
subprocess.check_call(['nmctl', 'add-nft-rule', 'ipv4', 'testtable99', 'testchain99', 'tcp', 'sport', '9999', 'drop'])
output = subprocess.check_output(['nft', 'list', 'table', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'tcp sport 9999 counter packets 0 bytes 0 drop')
def test_nmctl_add_rule_tcp_drop_accept_sport(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
subprocess.check_call(['nmctl', 'add-nft-chain', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nmctl', 'show-nft-chains', 'ipv4', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
self.assertRegex(output, 'testchain99')
subprocess.check_call(['nmctl', 'add-nft-rule', 'ipv4', 'testtable99', 'testchain99', 'tcp', 'sport', '9999', 'accept'])
output = subprocess.check_output(['nft', 'list', 'table', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'tcp sport 9999 counter packets 0 bytes 0 accept')
def test_nmctl_add_rule_udp_accept_sport(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
subprocess.check_call(['nmctl', 'add-nft-chain', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nmctl', 'show-nft-chains', 'ipv4', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
self.assertRegex(output, 'testchain99')
subprocess.check_call(['nmctl', 'add-nft-rule', 'ipv4', 'testtable99', 'testchain99', 'udp', 'sport', '9999', 'accept'])
output = subprocess.check_output(['nft', 'list', 'table', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'udp sport 9999 counter packets 0 bytes 0 accept')
def test_nmctl_add_rule_udp_drop_dport(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
subprocess.check_call(['nmctl', 'add-nft-chain', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nmctl', 'show-nft-chains', 'ipv4', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
self.assertRegex(output, 'testchain99')
subprocess.check_call(['nmctl', 'add-nft-rule', 'ipv4', 'testtable99', 'testchain99', 'udp', 'dport', '9999', 'drop'])
output = subprocess.check_output(['nft', 'list', 'table', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'udp dport 9999 counter packets 0 bytes 0 drop')
def test_nmctl_add_rule_udp_accept_dport(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
subprocess.check_call(['nmctl', 'add-nft-chain', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nmctl', 'show-nft-chains', 'ipv4', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
self.assertRegex(output, 'testchain99')
subprocess.check_call(['nmctl', 'add-nft-rule', 'ipv4', 'testtable99', 'testchain99', 'udp', 'dport', '9999', 'accept'])
output = subprocess.check_output(['nft', 'list', 'table', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'udp dport 9999 counter packets 0 bytes 0 accept')
def test_nmctl_delete_rule(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
subprocess.check_call(['nmctl', 'add-nft-chain', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nmctl', 'show-nft-chains', 'ipv4', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
self.assertRegex(output, 'testchain99')
subprocess.check_call(['nmctl', 'add-nft-rule', 'ipv4', 'testtable99', 'testchain99', 'udp', 'dport', '9999', 'accept'])
output = subprocess.check_output(['nft', 'list', 'table', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'udp dport 9999 counter packets 0 bytes 0 accept')
subprocess.check_call(['nmctl', 'delete-nft-rule', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nft', 'list', 'table', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertNotRegex(output, 'udp dport 9999 counter packets 0 bytes 0 accept')
def setUpModule():
if not os.path.exists(network_config_manager_yaml_config_path):
os.makedirs(network_config_manager_yaml_config_path)
if not os.path.exists(network_config_manager_yaml_config_path):
shutil.mkdirs(network_config_manager_yaml_config_path)
def tearDownModule():
if os.path.exists(network_config_manager_ci_path):
shutil.rmtree(network_config_manager_ci_path)
|
nilq/baby-python
|
python
|
from project.appliances.appliance import Appliance
class TV(Appliance):
def __init__(self):
self.cost = 1.5
super().__init__(self.cost)
|
nilq/baby-python
|
python
|
# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test module for tfq.python.optimizers.rotosolve_minimizer optimizer."""
# Remove PYTHONPATH collisions for protobuf.
# pylint: disable=wrong-import-position
import sys
NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x]
sys.path = NEW_PATH
# pylint: enable=wrong-import-position
from operator import mul
from functools import reduce
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
import cirq
import sympy
from tensorflow_quantum.python.layers.high_level import pqc
from tensorflow_quantum.python import util
from tensorflow_quantum.python.optimizers import rotosolve_minimizer
def loss_function_with_model_parameters(model, loss, train_x, train_y):
"""Create a new function that assign the model parameter to the model
and evaluate its value.
Args:
model : an instance of `tf.keras.Model` or its subclasses.
loss : a function with signature loss_value = loss(pred_y, true_y).
train_x : the input part of training data.
train_y : the output part of training data.
Returns:
A function that has a signature of:
loss_value = f(model_parameters).
"""
# obtain the shapes of all trainable parameters in the model
shapes = tf.shape_n(model.trainable_variables)
count = 0
sizes = []
# Record the shape of each parameter
for shape in shapes:
n = reduce(mul, shape)
sizes.append(n)
count += n
# Function accept the parameter and evaluate model
@tf.function
def func(params):
"""A function that can be used by tfq.optimizer.rotosolve_minimize.
Args:
params [in]: a 1D tf.Tensor.
Returns:
Loss function value
"""
# update the parameters of the model
start = 0
for i, size in enumerate(sizes):
model.trainable_variables[i].assign(
tf.reshape(params[start:start + size], shape))
start += size
# evaluate the loss
loss_value = loss(model(train_x, training=True), train_y)
return loss_value
return func
class RotosolveMinimizerTest(tf.test.TestCase, parameterized.TestCase):
"""Tests for the rotosolve optimization algorithm."""
def test_function_optimization(self):
"""Optimize a simple sinusoid function."""
n = 10 # Number of parameters to be optimized
coefficient = tf.random.uniform(shape=[n])
min_value = -tf.math.reduce_sum(tf.abs(coefficient))
func = lambda x: tf.math.reduce_sum(tf.sin(x) * coefficient)
result = rotosolve_minimizer.minimize(func, np.random.random(n))
self.assertAlmostEqual(func(result.position), min_value)
self.assertAlmostEqual(result.objective_value, min_value)
self.assertTrue(result.converged)
self.assertLess(result.num_iterations,
50) # 50 is the default max iteration
def test_nonlinear_function_optimization(self):
"""Test to optimize a non-linear function.
A non-linear function cannot be optimized by rotosolve,
therefore the optimization must never converge.
"""
func = lambda x: x[0]**2 + x[1]**2
result = rotosolve_minimizer.minimize(func,
tf.random.uniform(shape=[2]))
self.assertFalse(result.converged)
self.assertEqual(result.num_iterations,
50) # 50 is the default max iteration
def test_keras_model_optimization(self):
"""Optimizate a PQC based keras model."""
x = np.asarray([
[0, 0],
[0, 1],
[1, 0],
[1, 1],
], dtype=float)
y = np.asarray([[-1], [1], [1], [-1]], dtype=np.float32)
def convert_to_circuit(input_data):
"""Encode into quantum datapoint."""
values = np.ndarray.flatten(input_data)
qubits = cirq.GridQubit.rect(1, 2)
circuit = cirq.Circuit()
for i, value in enumerate(values):
if value:
circuit.append(cirq.X(qubits[i]))
return circuit
x_circ = util.convert_to_tensor([convert_to_circuit(x) for x in x])
# Create two qubits
q0, q1 = cirq.GridQubit.rect(1, 2)
# Create an anzatz on these qubits.
a, b = sympy.symbols('a b') # parameters for the circuit
circuit = cirq.Circuit(
cirq.rx(a).on(q0),
cirq.ry(b).on(q1), cirq.CNOT(control=q0, target=q1))
# Build the Keras model.
model = tf.keras.Sequential([
# The input is the data-circuit, encoded as a tf.string
tf.keras.layers.Input(shape=(), dtype=tf.string),
# The PQC layer returns the expected value of the
# readout gate, range [-1,1].
pqc.PQC(circuit, cirq.Z(q1)),
])
# Initial guess of the parameter from random number
result = rotosolve_minimizer.minimize(
loss_function_with_model_parameters(model, tf.keras.losses.Hinge(),
x_circ, y),
tf.random.uniform(shape=[2]) * 2 * np.pi)
self.assertAlmostEqual(result.objective_value, 0)
self.assertTrue(result.converged)
if __name__ == "__main__":
tf.test.main()
|
nilq/baby-python
|
python
|
from importlib import import_module
from importlib import resources
PLUGINS = dict()
def register_plugin(func):
"""Decorator to register plug-ins"""
name = func.__name__
PLUGINS[name] = func
return func
def __getattr__(name):
"""Return a named plugin"""
try:
return PLUGINS[name]
except KeyError:
_import_plugins()
if name in PLUGINS:
return PLUGINS[name]
else:
raise AttributeError(
f"module {__name__!r} has no attribute {name!r}"
) from None
def __dir__():
"""List available plug-ins"""
_import_plugins()
return list( PLUGINS.keys() )
def _import_plugins():
"""Import all resources to register plug-ins"""
for name in resources.contents(__name__):
if name.endswith(".py"):
import_module(f"{__name__}.{name[:-3]}")
|
nilq/baby-python
|
python
|
# import os
# os.environ["KIVY_WINDOW"] = "sdl2"
# uncomment the above lines to run on raspberrypi like it runs on windows.
import kivy
kivy.require('1.9.1')
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.clock import Clock
from time import sleep, time
from random import randint
from functools import partial
import threading
class SimonBoxLayout(BoxLayout):
""" Game logic goes inside this class."""
# when game is launched, start blinking new game button
def __init__(self):
super().__init__()
self.set_game_variables(init=True)
self.custom_animate_button(self.restart_button, "blink_loop")
# binded to newgame button
def start(self, *args):
''' start a new game thread '''
threading.Thread(target=self.setup, args=args).start()
# setup a new game
def setup(self, r, b, g, y):
''' Receives colored buttons objects.
Sets up all variables and starts game loop.'''
# blink once animation for start game button after clicked
self.custom_animate_button(self.restart_button, "down")
# handle player clicking "new game" before game is over
if not self.players_turn:
return
elif self.game_on:
self.aborted = True
else:
self.aborted = False
# init/reset variables for new game
self.set_game_variables(r, b, g, y)
# game starting animation
self.game_starting()
# setup game screen
self.update_current()
# start game loop
self.game_on = True
self.newgame()
# init/reset all game variables
def set_game_variables(self, *args, init=False):
''' information about the game is stored in these variables '''
# used to continue looping game
self.game_on = False
# kivy button objects for the colored squares
self.objcs = [i for i in args]
# starting lenght of the sequence
self.starting_size = 1
# random new sequence that player will try replicate
self.rand_list = [randint(0, 3) for i in range(self.starting_size - 1)]
# player current attempt to replicate sequence
self.player_moves = []
# current biggest successful sequence replicate
self.current_streak = 0
# current longest registered sequence replicate
self.longest_streak = self.load_record()
# in seconds, how long before next blinking square
self.speed = 1
# used to lock player input while showing sequence
self.players_turn = init
# if this game broke previous record
self.new_record_flag = False
# kill_thread_flag is used to kill python loops after game closes
self.kill_thread_flag = threading.Event()
# game loop
def newgame(self):
while self.game_on:
# check if program was closed
if self.kill_thread_flag.is_set():
# if yes kill loop
return
self.output_pattern()
self.intake_pattern()
self.update_current()
self.announce_gameover()
# schedule the sequence
def output_pattern(self):
# lock player input while sequence being shown
self.change_turn(turn="computer")
# add new value to sequence
self.rand_list.append(randint(0, 3))
# time buffer between events in order to not move too fast for humans:
buff = self.update_self_speed()
sleep(5 * buff)
# list of functions to blink (dim/turnon) each button in sequence
dim_list = []
turnon_list = []
for i in self.rand_list:
obj = self.objcs[i]
partial_func1 = partial(self.showpattern_dim, obj)
partial_func2 = partial(self.showpattern_high, obj)
dim_list.append(partial_func1)
turnon_list.append(partial_func2)
# scheduling the time of execution of each function,
# in order to create the sequence flow.
# the buffer is used to create the blink effect
for i in range(len(dim_list)):
# schedule turning button off
Clock.schedule_once(dim_list[i], i * (self.speed) + buff)
# schedule turning button back on
Clock.schedule_once(turnon_list[i], (i + 1) * (self.speed))
# allow player's input after entire sequence was shown
unlock_player = partial(self.change_turn, **{"turn": "player"})
Clock.schedule_once(unlock_player, (i + 1) * (self.speed))
# get player's input
def intake_pattern(self, *args):
# reset the players input from previous round
self.player_moves = []
# wait for players turn
while not self.players_turn:
# check if program was closed
if self.kill_thread_flag.is_set() or not self.game_on:
# if yes kill loop
return
# sleep and wait to check again
sleep(0.3)
# Player button clicks will append values to self.player_moves.
# This loop will check and make sure every click matches sequence.
# Will exit when player number of inputs equals the lenght of the
# sequence.
while True:
# check if program was closed or new game was pressed
if self.kill_thread_flag.is_set() or not self.game_on:
# if yes kill loop
return
# check if lists are equal
counter = 0
for x, y in zip(self.player_moves, self.rand_list):
if x != y:
# if different, declare game over
self.game_on = False
self.aborted = False
return
counter += 1
# return when player has reproduced the entire sequence
if counter == len(self.rand_list):
return
# wait a little before continuing loop
sleep(0.1)
# update screen after every turn
def update_current(self):
# define current streak
if not self.game_on:
self.current_streak = (len(self.rand_list) - 1 if
len(self.rand_list) > 0 else 0)
else:
self.current_streak = len(self.rand_list)
# if your streak is bigger than your record, update record
if self.current_streak > self.longest_streak:
self.new_record_flag = True
self.longest_streak = self.current_streak
# update the screen with your total streak and record
streak = 'Current streak: ' + str(self.current_streak)
record = 'All time best: ' + str(self.longest_streak)
self.streak.text = streak
self.record.text = record
# if game is over, announce it
def announce_gameover(self):
# if game was aborted skip announcing
if self.aborted:
return
# if there was a new record, update file, and congratulate
if self.new_record_flag:
with open("kivy.dll", mode="w") as f:
f.write(str(hex(self.current_streak)))
announce = "GAMEOVER\nCongratz!\nYour new record is "
announce += str(self.current_streak) + " repetitions."
else:
announce = "GAMEOVER\nYour record remains "
announce += str(self.longest_streak) + " repetitions."
self.turn.color = [1, 0, 0, 1]
self.turn.text = (announce)
# dim button (recieves *args because scheduling passes extra arg "dt")
def showpattern_dim(self, obj, *args):
obj.background_color[-1] = 0.2
# brighten button
def showpattern_high(self, obj, *args):
obj.background_color[-1] = 1
# update if it's player turn to play or not
def change_turn(self, *args, turn, **kwargs):
# make output message yellow
self.turn.color = [1, 1, 0, 1]
if turn == "player":
self.players_turn = True
self.turn.text = "YOUR TURN!"
elif turn == "computer":
self.players_turn = False
self.turn.text = ("REPEAT THIS SEQUENCE")
else:
raise ValueError("change turn error")
# load record from storage file
def load_record(self):
try:
with open("kivy.dll") as f:
data = f.readline()
return int(data, 16)
except FileNotFoundError:
with open("kivy.dll", mode="w") as f:
f.write("0")
return 0
# bound to colored buttons
def click_append(self, color_number):
# if its player turn, append to list else don't.
if self.players_turn and self.game_on:
self.player_moves.append(color_number)
elif not self.players_turn:
self.turn.color = [0 / 255, 95 / 255, 249 / 255, 1]
self.turn.text = "Not your turn yet!"
else:
pass
# increment speed with every move
def update_self_speed(self):
''' Updates the speed of the game in order to go faster as sequences get longer
Outputs the appropriate time buffer between blinks and other events
'''
self.speed = round(self.speed - self.speed / 10, 2)
self.speed = 0.4 if self.speed < 0.4 else self.speed
return round(self.speed / 10, 2)
# animate button so the user knows it was clicked
def custom_animate_button(self, button, high_or_low):
# turn button red when pressed
if high_or_low == "down":
button.color = [0 / 255, 95 / 255, 249 / 255, 1]
# turn yellow when released
elif high_or_low == "up":
def unpress(*args):
button.color = [1, 1, 0, 1]
Clock.schedule_once(unpress, 1)
# blinking effect when waiting for player to click
elif high_or_low == "blink_loop":
def blink(*args):
if self.game_on:
button.color = [1, 1, 0, 1]
elif button.color == [1, 1, 0, 1]:
button.color = [1, 0, 0, 1]
elif button.color == [1, 0, 0, 1]:
button.color = [1, 1, 0, 1]
for i in range(3600):
Clock.schedule_once(blink, i * 0.5)
else:
raise ValueError("Button state not recognized")
# game starting animation
def game_starting(self):
msg = "Starting game "
self.turn.color = [0 / 255, 95 / 255, 249 / 255, 1]
for i in range(5):
self.turn.text = msg
msg += ". "
sleep(0.2)
# .kv file must be <same name of this class without "App">.kv all lowercase
class SimonGameApp(App):
def on_stop(self):
self.root.kill_thread_flag.set()
def build(self):
return SimonBoxLayout()
myapp = SimonGameApp()
myapp.run()
|
nilq/baby-python
|
python
|
import torch
import unseal.transformers_util as tutil
from unseal.hooks import HookedModel
def test_load_model():
model, tokenizer, config = tutil.load_from_pretrained('gpt2')
assert model is not None
assert tokenizer is not None
assert config is not None
def test_load_model_with_dir():
model, tokenizer, config = tutil.load_from_pretrained('gpt-neo-125M', model_dir='EleutherAI')
assert model is not None
assert tokenizer is not None
assert config is not None
def test_load_model_eleuther_without_dir():
model, tokenizer, config = tutil.load_from_pretrained('gpt-neo-125M')
assert model is not None
assert tokenizer is not None
assert config is not None
def test_load_model_with_low_mem():
model, tokenizer, config = tutil.load_from_pretrained('gpt2', low_cpu_mem_usage=True)
assert model is not None
assert tokenizer is not None
assert config is not None
def test_get_num_layers_gpt2():
model, *_ = tutil.load_from_pretrained('gpt2')
model = HookedModel(model)
assert tutil.get_num_layers(model, 'transformer->h') == 12
def test_get_num_layers_transformer():
model = torch.nn.Transformer(d_model=10, nhead=2, num_encoder_layers=0, num_decoder_layers=10)
model = HookedModel(model)
assert tutil.get_num_layers(model, 'decoder->layers')
|
nilq/baby-python
|
python
|
import cv2
from image_manipulation import binarize_image, grayscale_image
class Camera(object):
"""
Class to take pictures.
:param width_size: camera's image width
:type width_size: int
:param height_size: camera's image height
:type height_size: int
:param input_cam_device: param to control camera's input
:type input_cam_device: int
:param height_param: param to set height on camera
:type height_param: int
:param width_param: param to set width on camera
:type width_param: int
:param mode: param to control type of image
:type mode: str
:param debug: param to enter debug mode
:type debug: bool
:param resize: param to control the image resizing
:type resize: float
"""
def __init__(self,
width_size=160,
height_size=90,
input_cam_device=0,
height_param=4,
width_param=3,
mode="pure",
debug=False,
resize=1.0):
self.cam = cv2.VideoCapture(input_cam_device)
self.cam.set(width_param, width_size)
self.cam.set(height_param, height_size)
assert mode == "pure" or mode == "green" or mode == "bin" or mode == "gray" # noqa
self.mode = mode
self.resize = resize
self.debug = debug
def save_image(self, path, img):
"""
Save image in path "path".
:param path: path to save image
:type path: str
:param img: image
:type img: np.ndarray
"""
cv2.imwrite(path, img)
def take_picture(self):
"""
Take picture according to the mode param.
:rtype: np.ndarray
"""
if self.mode == "pure":
return self.take_picture_rgb()
elif self.mode == "green":
return self.take_picture_green()
elif self.mode == "bin":
return self.take_picture_bin()
elif self.mode == "gray":
return self.take_picture_gray()
def take_picture_rgb(self):
"""
Take picture with no transformation.
:return: resized image
:rtype: np.ndarray, np.ndarray
"""
_, img = self.cam.read()
res = cv2.resize(img, (0, 0), fx=self.resize, fy=self.resize)
if self.debug:
return res, img
return res
def take_picture_gray(self):
"""
Take grayscale picture.
:return: gray and resized image
:rtype: np.ndarray, np.ndarray
"""
_, orig = self.cam.read()
img = cv2.resize(orig, (0, 0), fx=self.resize, fy=self.resize)
img = grayscale_image(img)
if self.debug:
return img, orig
return img
def take_picture_bin(self):
"""
Take binarized picture.
:return: binary and resized image
:rtype: np.ndarray, np.ndarray
"""
_, orig = self.cam.read()
img = cv2.resize(orig, (0, 0), fx=self.resize, fy=self.resize)
img = binarize_image(img)
if self.debug:
return img, orig
return img
def take_picture_green(self):
"""
Take picture with only the green channel.
:return: green and resized image
:rtype: np.ndarray, np.ndarray
"""
_, orig = self.cam.read()
img = cv2.resize(orig, (0, 0), fx=self.resize, fy=self.resize)
if self.debug:
return img[1], orig
return img[1]
|
nilq/baby-python
|
python
|
#Passing a List
def greet(names):
for name in names:
msg=f"Hello, {name.title()}"
print(msg)
username=['alice','beerus','cyrus']
greet(username)
|
nilq/baby-python
|
python
|
# coding:utf-8
# log utils
"""
切记: 不要重复创造日志对象,否则会重复打印
"""
# import os
from logging import (
handlers,
getLogger,)
from logging import Formatter as LoggingFormatter
from logging import StreamHandler as LoggingStreamHandler
from logging import FileHandler as LoggingFileHandler
from logging import ERROR as LOGGING_ERROR
from logging import DEBUG as LOGGING_DEBUG
__all__ = [
'set_logger'
]
CONSOLE_FORMATTER = '%(asctime)s [%(levelname)-6s] ➞ %(message)s'
FILE_FORMATTER = '%(asctime)s [%(levelname)-6s] at %(filename)s 出错函数%(funcName)s.%(lineno)d ↴\n %(message)s\n'
def set_logger(log_file_name,
console_log_level=LOGGING_DEBUG,
file_log_level=LOGGING_ERROR,
console_formatter=CONSOLE_FORMATTER,
file_formatter=FILE_FORMATTER,
logger_name='my_logger'):
# 创建一个logger,可以考虑如何将它封装
# 建议: 在有多个相互关联的文件都需要用到python的日志系统时,不要用默认的root logger。因为所有的名称都会继承root导致重复打印。用logger时一定要起名字!!
logger = getLogger(logger_name)
logger.setLevel(LOGGING_DEBUG)
# 创建一个handler,用于写入日志文件
# fh = LoggingFileHandler(os.path.join(os.getcwd(), './my_log.txt'))
# 通过下面这句话就可以输出中文, encoding='utf-8'
file_handler = handlers.RotatingFileHandler(
filename=log_file_name,
maxBytes=1024 * 1024,
backupCount=5,
encoding='utf-8',)
file_handler.setLevel(file_log_level)
# 再创建一个handler,用于输出到控制台
console_handler = LoggingStreamHandler()
console_handler.setLevel(console_log_level)
# 定义handler的输出格式
_console_formatter = LoggingFormatter(console_formatter)
_file_formatter = LoggingFormatter(file_formatter)
console_handler.setFormatter(_console_formatter)
file_handler.setFormatter(_file_formatter)
# 给logger添加handler
logger.addHandler(console_handler)
logger.addHandler(file_handler)
# 记录一条日志
# logger.info('hello world, i\'m log helper in python, may i help you')
return logger
|
nilq/baby-python
|
python
|
# Adapted from https://github.com/openai/triton/blob/master/python/triton/ops/blocksparse/softmax.py
import triton.language as tl
import triton
import torch
from src.models.attention.blocksparse_utils import sparsify_broadcast_tensor
def next_power_of_2(n):
n -= 1
n |= n >> 1
n |= n >> 2
n |= n >> 4
n |= n >> 8
n |= n >> 16
n += 1
return n
def num_warps(n):
if n < 512:
return 4
if n < 2048:
return 8
return 16
@triton.heuristics({'num_warps': lambda *args, **meta: num_warps(args[3] * meta['BLOCK'])})
@triton.heuristics({'TN': lambda *args, **meta: next_power_of_2(args[3] * meta['BLOCK'])})
@triton.jit
def _forward(
X, OUT, LUT, sizemax, stride_zx, stride_zout, stride_hout, **meta
):
TN = meta['TN']
BLOCK = meta['BLOCK']
pidhm = tl.program_id(0)
pidz = tl.program_id(1)
# create index ranges
rxm = pidhm % BLOCK
rbm = pidhm // BLOCK
rxn = tl.arange(0, TN) % BLOCK
rbn = tl.arange(0, TN) // BLOCK
# extract information from LUT
header = LUT + rbm * 2
size = tl.load(header + 0)
offset = tl.load(header + 1)
check = rbn < size
rbmn = tl.where(check, rbn, size - 1)
# block id and column id
blockid = tl.load(LUT + offset + rbmn * 4 + 0)
rowid = tl.load(LUT + offset + rbmn * 4 + 2)
headid = tl.load(LUT + offset + rbmn * 4 + 3)
# pointers to X
px = X + pidz * stride_zx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
x = tl.load(px, mask=check, other=0)
x = x.to(tl.float32)
# computation
out = tl.sum(x, axis=0)
# pointers to OUT
pout = OUT + pidz * stride_zout + headid * stride_hout + rowid * BLOCK + rxm
tl.store(pout, out)
@triton.heuristics({'num_warps': lambda *args, **meta: num_warps(args[3] * meta['BLOCK'])})
@triton.heuristics({'TN': lambda *args, **meta: next_power_of_2(args[3]) * meta['BLOCK']})
@triton.jit
def _backward(DX, DOUT, LUT, sizemax, stride_zdx, stride_zdout, stride_hdout, **meta):
pidhm = tl.program_id(0)
pidz = tl.program_id(1)
TN = meta['TN']
BLOCK = meta['BLOCK']
# create index ranges
rxm = pidhm % BLOCK
rbm = pidhm // BLOCK
rxn = tl.arange(0, TN) % BLOCK
rbn = tl.arange(0, TN) // BLOCK
# extract information from look-up table
header = LUT + rbm * 2
size = tl.load(header + 0)
offset = tl.load(header + 1)
# bounds checking on lut
check = rbn < size
rbmn = tl.where(check, rbn, size - 1)
# initialize pointers to block-sparse input
blockid = tl.load(LUT + offset + rbmn * 4)
rowid = tl.load(LUT + offset + rbmn * 4 + 2)
headid = tl.load(LUT + offset + rbmn * 4 + 3)
pdx = DX + pidz * stride_zdx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
pdout = DOUT + pidz * stride_zdout + headid * stride_hdout + rowid * BLOCK + rxm
# Load
# [2021-09-14] TD: Triton's broadcasting is very buggy, I have to read from dx (which is all
# zeros) just so that I can broadcast dout (a scalar).
dx_zeros = tl.load(pdx, mask=check, other=0)
dout = tl.load(pdout)
# Computation
dx = dout - dx_zeros
tl.store(pdx, dx, mask=check)
class _sum(torch.autograd.Function):
@staticmethod
def make_lut(layout, block, device):
_empty = torch.tensor([], dtype=torch.int64, device=layout.device)
sizes = _empty.clone()
# sizes along rows
for h in range(layout.shape[0]):
sizes = torch.cat((sizes, layout[h, :, :].sum(-1)))
# offsets in block format
offsets = torch.zeros_like(sizes)
offsets[1:] = torch.cumsum(sizes[:-1], dim=0)
# block indices
idx = torch.arange(layout.sum())
head = layout.nonzero(as_tuple=False)[:, 0]
rows = layout.nonzero(as_tuple=False)[:, 1]
columns = layout.nonzero(as_tuple=False)[:, 2]
core = torch.stack((idx, columns, rows, head), dim=1).view(-1)
# construct look-up table
offsets = offsets * 4 + 2 * sizes.numel()
header = torch.stack((sizes, offsets), dim=1).view(-1)
lut = torch.cat((header, core)).type(torch.int32).to(device)
n_head = layout.shape[0]
n_row = layout.shape[1] * block
return lut, int(sizes.max()), n_head, n_row
@staticmethod
def forward(ctx, x, spdims, block, lut, maxlut, n_head, n_row, layout, bench, time):
out = torch.zeros((x.shape[0], n_head, n_row), dtype=x.dtype, device=x.device)
# run kernel
M = x.shape[0]
meta = {'BLOCK': block}
grid = lambda opt: [spdims[0] * spdims[1] * block, M]
_forward[grid](x, out, lut, maxlut, x.stride(0), out.stride(0), out.stride(1),
force_nc_cache=True, **meta)
# save to context
ctx.save_for_backward(x, lut, layout)
ctx.spdims = spdims
ctx.block = block
ctx.maxlut = maxlut
return out
@staticmethod
def backward(ctx, dout):
# retrieve from context
x, lut, layout = ctx.saved_tensors
block = x.shape[-1]
dx = sparsify_broadcast_tensor(dout, layout, block).expand(-1, -1, -1, block)
# dx = torch.zeros_like(x)
# run kernel
# M = x.shape[0]
# grid = lambda opt: [ctx.spdims[0] * ctx.spdims[1] * ctx.block, M]
# _backward[grid](dx, dout, lut, ctx.maxlut, dx.stride(0), dout.stride(0), dout.stride(1),
# force_nc_cache=True, BLOCK=ctx.block)
return dx, None, None, None, None, None, None, None, None, None
class blocksparse_sum:
apply_sum = _sum.apply
def make_lut(self, device):
key = (device, )
if key not in self.lut_cache:
self.lut_cache[key] = _sum.make_lut(self.layout, self.block, device)
return self.lut_cache[key]
def __init__(self, layout, block, bench=False):
self.spdims = layout.shape
self.layout = layout
self.block = block
self.bench = bench
self.lut_cache = dict()
def __call__(self, x):
time_y = [None]
lut, maxlut, n_head, n_row = self.make_lut(x.device)
x = blocksparse_sum.apply_sum(
x, self.spdims, self.block, lut, maxlut, n_head, n_row, self.layout, self.bench, time_y
)
return x
|
nilq/baby-python
|
python
|
import time
import rich
from hurry.filesize import size
from tabulate import tabulate
class Format:
def __init__(self):
pass
@staticmethod
def cli(**kwargs):
"""
Handle in coming CLI Output Style
"""
if "standard" in kwargs["style"]:
return Format._default(**kwargs)
return "Error"
@classmethod
def _default(cls, **kwargs):
table = []
columns = []
rows = []
if kwargs["source"] == "dict":
columns, rows = cls.__dict(kwargs["data"])
table.append(rows)
elif kwargs["source"] == "list":
for d in kwargs["data"]["response"]:
columns, rows = cls.__list(d)
table.append(rows)
elif kwargs["source"] == "history":
for d in kwargs["data"]["response"]:
__columns, __rows = cls.__history(d)
""" Don't override existing table """
if len(__columns) != 0:
columns = __columns
if len(__rows) != 0:
rows = __rows
table.append(rows)
elif kwargs["source"] == "progress":
for d in kwargs["data"]["response"]:
columns, rows = cls.__progress(d)
table.append(rows)
if len(rows) != 0:
console = rich.get_console()
console.print(
tabulate(
table,
columns,
tablefmt="plain",
stralign="left",
disable_numparse=True,
),
soft_wrap=True,
)
return True
return False
@classmethod
def __dict(cls, data):
columns = []
rows = []
for k, v in data.items():
if k.lower() != "token":
columns.append(k.upper())
rows.append(v)
return columns, rows
@classmethod
def __list(cls, d):
excluded_columns = [
"versions",
"backup_services",
"compatible_error",
]
columns = []
rows = []
for k, v in d.items():
if k not in excluded_columns:
columns.append(k.upper())
if "time" in k:
v = str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(v)))
if "size" in k:
v = str(size(v))
if "percentage" in k:
v = "{}%".format(int(v))
if "_version" in k:
v = str(v)
rows.append(v)
return columns, rows
@classmethod
def __history(cls, d):
included_columns = [
"backup_id",
"start_timestamp",
"status",
"operation",
"id",
"progress_in_percentage",
"description",
"backup_size",
]
columns = []
rows = []
if "PENDING" not in d["status"]:
for k, v in d.items():
if k in included_columns:
columns.append(k.upper())
if "time" in k:
v = str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(v)))
if "size" in k:
v = str(size(v))
if "percentage" in k:
v = "{}%".format(int(v))
rows.append(v)
return columns, rows
@classmethod
def __progress(cls, d):
included_columns = [
"backup_id",
"start_timestamp",
"status",
"operation",
"id",
"progress_in_percentage",
"description",
"backup_size",
]
columns = []
rows = []
for k, v in d.items():
if k in included_columns:
columns.append(k.upper())
if "time" in k:
v = str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(v)))
if "size" in k:
v = str(size(v))
if "percentage" in k:
v = "{}%".format(int(v))
rows.append(v)
return columns, rows
|
nilq/baby-python
|
python
|
#!/usr/bin/env python2.7
import os
import sys
sys.path.insert(0, os.path.realpath(os.path.join(__file__, '../../../lib')))
import exatest
from exatest.utils import chdir
from exatest import (
useData,
)
class TestParameterized(exatest.TestCase):
@useData((x,) for x in range(10))
def test_parameterized(self, x):
self.assertRowsEqual([(None,)], self.query('select * from dual'))
@useData((x,) for x in range(1000))
def test_large_parameterized(self, x):
self.assertRowsEqual([(None,)], self.query('select * from dual'))
class TestSetUp(exatest.TestCase):
def setUp(self):
self.query('DROP SCHEMA t1 CASCADE', ignore_errors=True)
self.query('CREATE SCHEMA t1')
def test_1(self):
self.query('select * from dual')
def test_2(self):
self.query('select * from dual')
class ODBCTest(exatest.TestCase):
def test_find_odbcini_after_chdir(self):
self.assertTrue(os.path.exists('odbc.ini'))
with chdir('/'):
self.assertFalse(os.path.exists('odbc.ini'))
self.query('select * from dual')
if __name__ == '__main__':
# remove undefined option used in wrapper script
for i in range(len(sys.argv)):
if sys.argv[i].startswith('--jdbc-path='):
# --foo=bar
sys.argv.pop(i)
break
if sys.argv[i].startswith('--jdbc-path'):
# --foo bar
sys.argv.pop(i)
sys.argv.pop(i)
break
exatest.main()
# vim: ts=4:sts=4:sw=4:et:fdm=indent
|
nilq/baby-python
|
python
|
import sys, getopt
import run
def main(argv):
path_database = ''
path_cnn_trained = ''
path_folder_retrieval = ''
feature_extraction_method = ''
distance = ''
searching_method = ''
number_of_images = 0
list_of_parameters = []
try:
opts, args = getopt.getopt(argv,"hd:c:r:f:s:p:n:m:")
except getopt.GetoptError:
print ('cbir_cl.py -d <path_database> -c <path_cnn_trained> -r <path_folder_retrieval> -f <feature_extraction_method> -s <distance-similarity metric> -n <number_of_images> -m <list_of_parameters>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print ('cbir_cl.py -d <path_database> -c <path_cnn_trained> -r <path_folder_retrieval> -f <feature_extraction_method> -s <distance-similarity metric> -n <number_of_images> -m <list_of_parameters>')
sys.exit()
elif opt == '-d':
path_database = arg
elif opt == '-c':
path_cnn_trained = arg
elif opt == '-r':
path_folder_retrieval = arg
elif opt == '-f':
feature_extraction_method = arg
elif opt == '-s':
distance = arg
elif opt == '-p':
searching_method = arg
elif opt == '-n':
number_of_images = int(float(arg))
elif opt == '-m':
parameters = arg.split(',')
for i in parameters:
list_of_parameters.append(i)
run.run_command_line(path_database,path_folder_retrieval,path_cnn_trained,feature_extraction_method,distance,number_of_images,list_of_parameters)
if __name__ == "__main__":
main(sys.argv[1:])
|
nilq/baby-python
|
python
|
import sys
import torch
sys.path.insert(0, "../")
from linformer_pytorch import Linformer, Visualizer
model = Linformer(
input_size=512,
channels=16,
dim_k=128,
dim_ff=32,
nhead=4,
depth=3,
activation="relu",
checkpoint_level="C0",
parameter_sharing="layerwise",
k_reduce_by_layer=1,
)
x = torch.randn(1, 512, 16)
y = model(x, visualize=True)
vis = Visualizer(model)
vis.plot_all_heads(title="All P_bar matrices",
show=True,
save_file=None,
figsize=(8,6),
n_limit=256)
print(y) # (1, 512, 16)
|
nilq/baby-python
|
python
|
from typing import List
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
l, r = 0, len(nums) - 1
while l<=r:
m = (l+r)//2
if nums[m]==target:
return m
elif nums[m]>target:
r=m-1
else:
l=m+1
return l
arr= [1,3,5,6]
target = 0
ans = Solution().searchInsert(arr, target)
print(ans)
|
nilq/baby-python
|
python
|
import heapq
import math
import numpy as np
import nltk.probability
from nltk.classify import SklearnClassifier
from sklearn.svm import SVC
import re
import sys
sys.path.insert(0, '..')
from definitions import *
sys.path.insert(0, '../Wrapper/')
from helper import *
def get_svm_classifier(parameters):
print "Loading training data..."
# A dictionary whose keys are strings (words) and values are tweetclass objects
terms = {}
# Load training data
go_training_data = open(GO_TRAINING_DATA)
go_tweets = []
# Load stop words
sw = open(STOP_WORDS_DATA)
stop_words = {}
for line in sw:
stop_words[line.strip()] = True
# DEBUG
debug_counter = 0
positive_counter = 0
negative_counter = 0
# A debug limit for the number of positive and negative tweets
upto = parameters.upto
negative_counter = 0
positive_counter = 0
for line in go_training_data:
# Parse the line for the classification and the tweet
parts = line.split(",")
score = float(parts[0].replace('"', ""))
if score == 0:
if negative_counter >= upto:
continue
negative_counter = negative_counter + 1
else:
if positive_counter >= upto:
continue
positive_counter = positive_counter + 1
bag = get_words(parts[5], stop_words)
go_tweets.append((score, bag))
# Add all the words in the tweet to the list of all terms
for word in bag:
if word not in terms:
nt = tweetclass(word)
if score == 0:
nt.negative = 1
nt.positive = 0
else:
nt.positive = 1
nt.negative = 0
terms[word] = nt
else:
if score == 0:
terms[word].negative = terms[word].negative + 1
else:
terms[word].positive = terms[word].positive + 1
# Debug
debug_counter = debug_counter + 1
if debug_counter % 1000 == 0:
print "processed %d tweets" % debug_counter
negative_classifications = 0
for (score, bag) in go_tweets:
if score == 0:
negative_classifications = negative_classifications + 1
positive_classifications = len(go_tweets) - negative_classifications
print "Training data loaded!"
# Get the top number of terms
print "Getting top terms from mutual information"
scores = []
top_terms = []
term_limit = parameters.term_limit
heap_terms_processed = 0
for term in terms:
score = get_score(term, positive_classifications, negative_classifications, terms)
# Debug
#print "score: %f\tterm: %s" % (score, term)
if heap_terms_processed % 1000 == 0:
print "heap terms processed: %d" % heap_terms_processed
heapq.heappush(scores, (score, term))
if len(scores) > term_limit:
heapq.heappop(scores)
assert len(scores) <= term_limit
heap_terms_processed = heap_terms_processed + 1
for item in scores:
top_terms.append(item[1])
tt = top_terms
top_terms = {}
for t in tt:
top_terms[t] = True
print "Top terms found"
# Debug
print "Total number of terms: %d" % len(terms)
#assert False
#TODO
# Train
num_features = len(top_terms)
num_samples = len(go_tweets)
#X = np.zeros((num_samples, num_features))
train = []
#y = []
for (score, bag) in go_tweets:
fv = {}
# feature vector for this tweet
for word in bag:
if word in top_terms:
fv[word] = 1
train.append( (fv, score) )
print "Fitting data..."
classifier = SklearnClassifier(SVC(kernel=parameters.kernel, probability=True)).train(train)
return classifier, top_terms, stop_words
|
nilq/baby-python
|
python
|
# 1-TASK. Matnlardan iborat ro'yxat qabul qilib, ro'yxatdagi har bir matnning birinchi
# harfini katta harfga o'zgatiruvchi funksiya yozing.
def katta_harf(ismlar):
names = []
for i in range(len(ismlar)):
ismlar[i] = ismlar[i].title()
ismlar = ['ali', 'vali', 'hasan', 'husan']
katta_harf(ismlar)
print(ismlar)
# 2-TASK.Yuoqirdagi funksiyani asl ro'yxatni o'zgartirmaydigan va yangi ro'yxat qaytaradigan qilib o'zgartiring
def katta_harf(ismlar):
names = []
while ismlar:
ism = ismlar.pop()
names.append(ism.title())
return names
ismlar = ['ali', 'vali', 'hasan', 'husan']
yangi_ismlar = katta_harf(ismlar[:])
print(ismlar)
print(yangi_ismlar)
|
nilq/baby-python
|
python
|
import NNRequestHandler.Base
import NNRequestHandler.User
NN_REQUEST_CMD_USER_LOGIN = 1
class DispatchCenter(object):
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(DispatchCenter, cls).__new__(
cls, *args, **kwargs)
return cls._instance
def __init__(self):
self.cache = {}
map = self.commandMap()
for item in map:
for command in item:
self.installHandler(command, item[command])
def commandMap(self):
return [
{NN_REQUEST_CMD_USER_LOGIN : NNRequestHandler.User.LoginHandler}
]
def installHandler(self, command, HandlerClass):
if command <= 0:
raise "Command number less than 0"
if not issubclass(HandlerClass, NNRequestHandler.Base.RequestHandler):
raise "Request handler class must be subclass of RequestHandler"
self.cache[command] = HandlerClass
def dispatch(self, uin, command, body):
if command not in self.cache:
raise "Unknow Command %d" % (command)
handlerClass = self.cache[command]
handler = handlerClass(uin, body)
handler.checkParams()
handler.proccess()
handler.dump()
return (handler.retCode, handler.rspBody)
|
nilq/baby-python
|
python
|
import unittest
from streamlink.plugins.canlitv import Canlitv, _m3u8_re
class TestPluginCanlitv(unittest.TestCase):
def test_m3u8_re(self):
def test_re(text):
m = _m3u8_re.search(text)
self.assertTrue(m and len(m.group("url")) > 0)
test_re('file: "test" ')
test_re('file:"test"')
test_re('file : "test"')
test_re('file : "test" ')
test_re("file: 'test'")
test_re("file :'test'")
test_re("file : 'test'")
test_re("file : 'test'")
def test_can_handle_url(self):
# should match
self.assertTrue(Canlitv.can_handle_url("http://www.canlitv.plus/channel"))
self.assertTrue(Canlitv.can_handle_url("http://www.canlitv.com/channel"))
self.assertTrue(Canlitv.can_handle_url("http://www.canlitvlive.co/izle/channel.html"))
self.assertTrue(Canlitv.can_handle_url("http://www.canlitvlive.live/izle/channel.html"))
self.assertTrue(Canlitv.can_handle_url("http://www.canlitvlive.io/izle/channel.html"))
self.assertTrue(Canlitv.can_handle_url("http://www.canlitvlive.site/izle/channel.html"))
self.assertTrue(Canlitv.can_handle_url("http://www.ecanlitvizle.net/channel/"))
self.assertTrue(Canlitv.can_handle_url("http://www.ecanlitvizle.net/onizleme.php?kanal=channel"))
self.assertTrue(Canlitv.can_handle_url("http://www.ecanlitvizle.net/tv.php?kanal=channel"))
# shouldn't match
self.assertFalse(Canlitv.can_handle_url("http://www.canlitv.com"))
self.assertFalse(Canlitv.can_handle_url("http://www.canlitv.plus"))
self.assertFalse(Canlitv.can_handle_url("http://www.ecanlitvizle.net"))
self.assertFalse(Canlitv.can_handle_url("http://www.canlitvlive.co"))
self.assertFalse(Canlitv.can_handle_url("http://www.canlitvlive.live"))
self.assertFalse(Canlitv.can_handle_url("http://www.canlitvlive.io"))
self.assertFalse(Canlitv.can_handle_url("http://www.canlitvlive.site"))
self.assertFalse(Canlitv.can_handle_url("http://www.ecanlitvizle.net"))
|
nilq/baby-python
|
python
|
"""
Open Orchestrator Cloud Radio Access Network
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.contrib import messages
from django.shortcuts import get_object_or_404, redirect, render
from operators.models import Operator
from pools.forms import PoolForm, AlertForm, SchedulerForm
from .models import Pool
from ns.models import Ns, Nvf
from bbus.models import Bbu
from scenarios.models import Scenario
from django.contrib.auth.decorators import login_required
from oocran.global_functions import paginator
from ues.models import Ue
from schedulers.models import Scheduler
from .tasks import celery_launch, celery_shut_down
from django.contrib.sites.shortcuts import get_current_site
import uuid
from alerts.models import Alert
@login_required(login_url='/login/')
def list(request):
scenarios = Scenario.objects.filter(operator__user=request.user)
scenarios = paginator(request, scenarios)
context = {
"user": request.user,
"object_list": scenarios,
}
return render(request, "pools/list.html", context)
@login_required(login_url='/login/')
def create(request, id=None):
scenario = get_object_or_404(Scenario, id=id)
form = PoolForm(request.POST or None, request.FILES or None)
if form.is_valid():
try:
Ns.objects.get(operator__user=request.user, name=form.cleaned_data['name'])
messages.success(request, "Name repeated!", extra_tags="alert alert-danger")
except:
ns = form.save(commit=False)
ns.operator = get_object_or_404(Operator, user=request.user)
ns.scenario = scenario
[reply, tag] = ns.create()
messages.success(request, reply, extra_tags=tag)
return redirect("scenarios:scenario", id=id)
if form.errors:
messages.success(request, form.errors, extra_tags="alert alert-danger")
return redirect("scenarios:scenario", id=id)
context = {
"user": request.user,
"form": form,
"scenario": scenario,
}
return render(request, "pools/form.html", context)
@login_required(login_url='/login/')
def delete(request, id=None):
utran = get_object_or_404(Pool, id=id)
id = utran.scenario.id
try:
utran.delete_influxdb_database()
except:
print "database does not exist!"
utran.scenario.total_infras -= 1
utran.scenario.save()
if utran.status == "Running":
celery_shut_down.delay(id, action="delete")
utran.scenario.active_infras -= 1
else:
print "delete"
utran.delete()
messages.success(request, "Pool successfully deleted!", extra_tags="alert alert-success")
return redirect("scenarios:scenario", id=id)
@login_required(login_url='/login/')
def launch(request, id=None):
pool = get_object_or_404(Pool, id=id)
celery_launch.delay(id)
messages.success(request, "Pool successfully Launched!", extra_tags="alert alert-success")
return redirect("pools:details", id=pool.scenario.id)
@login_required(login_url='/login/')
def shut_down(request, id=None):
utran = get_object_or_404(Pool, id=id)
utran.scenario.active_infras -= 1
utran.scenario.save()
celery_shut_down.delay(id)
messages.success(request, "Pool shut down!", extra_tags="alert alert-success")
return redirect("pools:details", id=utran.scenario.id)
@login_required(login_url='/login/')
def details(request, id=None):
pool = get_object_or_404(Pool, id=id)
bbus = Bbu.objects.filter(ns=pool)
ues = Ue.objects.filter(scenario=pool.scenario)
schedulers = Scheduler.objects.filter(ns=pool)
schedulers = paginator(request, schedulers)
alerts = Alert.objects.filter(ns=pool)
alerts = paginator(request, alerts)
context = {
"user": request.user,
"utran": pool,
"ues": ues,
"alerts": alerts,
"bbus": bbus,
"schedulers": schedulers,
"url": get_current_site(request).domain.split(':')[0],
}
return render(request, "pools/detail.html", context)
@login_required(login_url='/login/')
def alert(request, id=None):
utran = get_object_or_404(Pool, id=id)
form = AlertForm(request.POST or None, nvfs=Bbu.objects.filter(ns=utran))
if form.is_valid():
try:
Alert.objects.get(operator__user=request.user, name=form.cleaned_data['name'])
messages.success(request, "Name repeated!", extra_tags="alert alert-danger")
except:
alert = form.save(commit=False)
alert.operator = get_object_or_404(Operator, user=request.user)
alert.scenario = utran.scenario
alert.ns = utran
alert.uuid = uuid.uuid4().hex
alert.save()
for id in form.cleaned_data['nvfs']:
alert.nvfs.add(get_object_or_404(Nvf, id=id))
messages.success(request, "Alert created successfully!", extra_tags="alert alert-success")
return redirect("pools:details", id=utran.id)
if form.errors:
messages.success(request, form.errors, extra_tags="alert alert-danger")
return redirect("pools:details", id=utran.id)
context = {
"user": request.user,
"utran": utran,
"form": form,
}
return render(request, "pools/alert.html", context)
@login_required(login_url='/login/')
def scheduler(request, id=None):
utran = get_object_or_404(Pool, id=id)
form = SchedulerForm(request.POST or None, nvfs=Bbu.objects.filter(ns=utran))
if form.is_valid():
try:
Scheduler.objects.get(operator__user=request.user, name=form.cleaned_data['name'])
messages.success(request, "Name repeated!", extra_tags="alert alert-danger")
except:
scheduler = form.save(commit=False)
scheduler.operator = get_object_or_404(Operator, user=request.user)
scheduler.scenario = utran.scenario
scheduler.type = "nvf"
scheduler.ns = utran
scheduler.save()
for id in form.cleaned_data['nvfs']:
scheduler.nvfs.add(get_object_or_404(Nvf, id=id))
messages.success(request, "Scheduler created successfully!", extra_tags="alert alert-success")
return redirect("pools:details", id=utran.id)
if form.errors:
messages.success(request, form.errors, extra_tags="alert alert-danger")
return redirect("pools:details", id=utran.id)
context = {
"user": request.user,
"utran": utran,
"form": form,
}
return render(request, "pools/scheduler.html", context)
|
nilq/baby-python
|
python
|
from greent.rosetta import Rosetta
import json
from collections import defaultdict
def setup():
rosetta = Rosetta()
neodriver = rosetta.type_graph.driver;
return neodriver
def dumpem(dtype = 'gene'):
driver = setup()
cypher = f'MATCH (a:{dtype})-[r]-(b) return a,r,b'
with driver.session() as session:
result = session.run(cypher)
records = list(result)
genes = defaultdict(list)
for record in records:
gid = record['a']['id']
edgetype= record['r'].type
other = record['b']['id']
genes[gid].append( {'predicate': edgetype, 'node': other})
with open('genedump.json','w') as outf:
json.dump(genes,outf,indent=4)
if __name__ == '__main__':
dumpem()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
GeometryWrapper
A QGIS plugin
Converts geometry longitude from [-180,180] to [0,360]
-------------------
begin : 2017-03-16
git sha : $Format:%H$
copyright : (C) 2017 by Jonah
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt5.QtCore import QFileInfo
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QAction, QFileDialog, QMessageBox
# Initialize Qt resources from file resources.py
from .resources import *
# Import the code for the dialog
from .geometry_wrapper_dialog import GeometryWrapperDialog
import os
from .utils import process_raster_file, process_vector_file
from .utils import process_vector_layer
from qgis.core import QgsRasterLayer, QgsVectorLayer, QgsProject, QgsVectorFileWriter
try:
from qgis.core import QgsMapLayerType
except ImportError:
from qgis.core import QgsMapLayer
QgsMapLayerType = QgsMapLayer.LayerType
class GeometryWrapper:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# Declare instance attributes
self.actions = []
self.menu = u'&Geometry Wrapper'
self.toolbar = self.iface.addToolBar(u'GeometryWrapper')
self.toolbar.setObjectName(u'GeometryWrapper')
# listen for browse button
self.dlg = GeometryWrapperDialog()
self.dlg.input_button.clicked.connect(self.set_in_dataset)
# initialise other variables
self.selected_tab = None
self.input_dataset = None
self.input_layer = None
self.data_type = None
self.longitude_range = None
self.output_file = None
self.output_layer = None
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
# Create the dialog (after translation) and keep reference
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_menu:
self.iface.addPluginToMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = os.path.join(self.plugin_dir, "icon.png")
self.add_action(
icon_path,
text=u'Geometry Wrapper',
callback=self.run,
parent=self.iface.mainWindow())
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
for action in self.actions:
self.iface.removePluginMenu(
u'&Geometry Wrapper',
action)
self.iface.removeToolBarIcon(action)
# remove the toolbar
del self.toolbar
# display file dialog to select input dataset
def set_in_dataset(self):
input_name = QFileDialog.getOpenFileName(None,
'Select input dataset',
'',
"raster or vector (*.shp *.tif)",
)
if input_name:
self.input_dataset = QFileInfo(input_name[0]).absoluteFilePath()
self.dlg.input_dataset.setText(QFileInfo(input_name[0]).absoluteFilePath())
def run(self):
"""Run method that performs all the real work"""
# clear the input_dataset field
self.dlg.input_dataset.clear()
# show the dialog
self.dlg.show()
# set up an empty message box
msg = QMessageBox()
msg.setIcon(QMessageBox.Warning)
msg.setWindowTitle("Geometry Wrapper")
msg.setStandardButtons(QMessageBox.Ok)
# Run the dialog event loop
result = self.dlg.exec_()
# See if OK was pressed
if result:
# set output longitude range
self.longitude_range = 0
if self.dlg.radio_button180.isChecked():
self.longitude_range = '180'
elif self.dlg.radio_button360.isChecked():
self.longitude_range = '360'
# check whether file or layer tab is selected
if self.dlg.file_layer_tab_widget.currentIndex() == 1:
self.selected_tab = "file"
else:
self.selected_tab = "layer"
if self.selected_tab == "file":
# process file
self.data_type = ''
file_name = self.input_dataset
file_info = QFileInfo(self.input_dataset)
raster_layer = QgsRasterLayer(file_name)
vector_layer = QgsVectorLayer(file_name, "ogr")
if raster_layer.isValid():
self.data_type = 'raster'
if not raster_layer.crs().isGeographic():
msg.setText("Input dataset must have geographic coordinate system (such as WGS84)")
msg.exec_()
self.run()
elif vector_layer.isValid():
self.data_type = 'vector'
if not vector_layer.crs().isGeographic():
msg.setText("Input dataset must have geographic coordinate system (such as WGS84)")
msg.exec_()
self.run()
# send data for processing
if self.data_type == 'vector':
self.output_file = self.input_dataset.split(os.extsep)[0] + "_" + str(self.longitude_range) + ".shp"
if os.path.exists(self.output_file):
msg.setText("Cannot overwrite existing file " + os.path.basename(self.output_file))
msg.exec_()
self.run()
else:
vector_layer = process_vector_file(self.input_dataset, self.longitude_range)
writer = QgsVectorFileWriter.writeAsVectorFormat(vector_layer,
self.output_file,
"utf-8",
vector_layer.crs(),
"ESRI Shapefile")
base_name = file_info.baseName() + "_" + str(self.longitude_range)
if self.dlg.add_to_toc.isChecked():
self.output_layer = QgsVectorLayer(self.output_file, base_name, "ogr")
if self.output_layer.isValid():
QgsProject.instance().addMapLayer(self.output_layer)
elif self.data_type == 'raster':
self.output_file = self.input_dataset.split(os.extsep)[0] + "_" + str(self.longitude_range) + ".tif"
if os.path.exists(self.output_file):
msg.setText("Cannot overwrite existing file " + os.path.basename(self.output_file))
msg.exec_()
self.run()
else:
process_raster_file(self.input_dataset, self.longitude_range, self.output_file)
file_info = QFileInfo(self.output_file)
base_name = file_info.baseName()
if self.dlg.add_to_toc.isChecked():
self.output_layer = QgsRasterLayer(self.output_file, base_name)
if self.output_layer.isValid():
QgsProject.instance().addMapLayer(self.output_layer)
elif self.selected_tab == "layer":
# process layer
self.input_layer = self.dlg.layer_combobox.currentLayer()
if self.input_layer.type() == QgsMapLayerType.VectorLayer:
self.data_type = "vector"
elif self.input_layer.type() == QgsMapLayerType.RasterLayer:
self.data_type = "raster"
else:
msg.setText("Input dataset must be vector or raster")
msg.exec_()
if not self.input_layer.crs().isGeographic():
msg.setText("Input dataset must have geographic coordinate system (such as WGS84)")
msg.exec_()
else:
if self.input_layer.isValid():
if self.data_type == "vector":
self.output_layer = process_vector_layer(self.input_layer, self.longitude_range)
else:
raster_in_file = self.input_layer.dataProvider().dataSourceUri()
if os.path.exists(raster_in_file):
raster_out_file = os.path.join(raster_in_file.split(os.extsep)[0] + "_" + str(self.longitude_range) + os.extsep + raster_in_file.split(os.extsep)[1])
self.output_layer = process_raster_file(raster_in_file, self.longitude_range, raster_out_file)
else:
msg.setText("Input layer is not valid for some reason")
msg.exec_()
QgsProject.instance().addMapLayer(self.output_layer)
|
nilq/baby-python
|
python
|
# Generated by Django 3.0.5 on 2020-04-06 15:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shortner', '0002_auto_20200331_0717'),
]
operations = [
migrations.AlterModelOptions(
name='entry',
options={'verbose_name_plural': 'entries'},
),
]
|
nilq/baby-python
|
python
|
from numpy import diff
def check_sorted(nu___):
di_ = diff(nu___.ravel())
return (di_ <= 0).all() or (0 <= di_).all()
|
nilq/baby-python
|
python
|
'''VGG for CIFAR10. FC layers are removed.
(c) YANG, Wei
'''
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
import torch
__all__ = ['vgg19_bn_para']
class VGG(nn.Module):
def __init__(self, features, gpu_num = 2, num_classes=1000, split_size=64):
super(VGG, self).__init__()
self.split_size = split_size
self.gpu_para = gpu_num
self._initialize_weights()
if gpu_num == 0:
self.features = features.cuda("cuda:0")
self.classifier = nn.Linear(512, num_classes).cuda("cuda:0")
if gpu_num == 2:
self.features_1 = features[0:27].cuda("cuda:0")
self.features_2 = features[27:].cuda("cuda:1")
self.classifier = nn.Linear(512, num_classes).cuda("cuda:1")
elif gpu_num == 4:
self.features_1 = features[0:14].cuda("cuda:0")
self.features_2 = features[14:27].cuda("cuda:1")
self.features_3 = features[27:40].cuda("cuda:2")
self.features_4 = features[40:].cuda("cuda:3")
self.classifier = nn.Linear(512, num_classes).cuda("cuda:3")
elif gpu_num == 10:
self.features_1 = features[0:3].cuda("cuda:0")
self.features_2 = features[3:7].cuda("cuda:1")
self.features_3 = features[7:10].cuda("cuda:2")
self.features_4 = features[10:14].cuda("cuda:3")
self.features_5 = features[14:20].cuda("cuda:4")
self.features_6 = features[20:27].cuda("cuda:5")
self.features_7 = features[27:33].cuda("cuda:6")
self.features_8 = features[33:40].cuda("cuda:7")
self.features_9 = features[40:49].cuda("cuda:8")
self.features_10 = features[49:].cuda("cuda:9")
self.classifier = nn.Linear(512, num_classes).cuda("cuda:9")
elif gpu_num == 5:
self.features_1 = features[0:10].cuda("cuda:0")
self.features_2 = features[10:20].cuda("cuda:1")
self.features_3 = features[20:30].cuda("cuda:2")
self.features_4 = features[30:40].cuda("cuda:3")
self.features_5 = features[40:].cuda("cuda:4")
self.classifier = nn.Linear(512, num_classes).cuda("cuda:4")
def forward(self, x):
if self.gpu_para == 4:
splits = iter(x.split(self.split_size, dim=0))
s_next = next(splits)
s_prev = self.features_1(s_next).cuda("cuda:1")
ret = []
for s_next in splits:
s_prev = self.features_2(s_prev).cuda("cuda:2")
s_prev = self.features_3(s_prev).cuda("cuda:3")
s_prev = self.features_4(s_prev)
s_prev = s_prev.view(s_prev.size(0), -1)
ret.append(self.classifier(s_prev))
s_prev = self.features_1(s_next).cuda("cuda:1")
s_prev = self.features_2(s_prev).cuda("cuda:2")
s_prev = self.features_3(s_prev).cuda("cuda:3")
s_prev = self.features_4(s_prev)
s_prev = s_prev.view(s_prev.size(0), -1)
ret.append(self.classifier(s_prev))
x = torch.cat(ret)
elif self.gpu_para == 0:
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
elif self.gpu_para == 2:
splits = iter(x.split(self.split_size, dim=0))
s_next = next(splits)
s_prev = self.features_1(s_next).cuda("cuda:1")
ret = []
for s_next in splits:
s_prev = self.features_2(s_prev)
s_prev = s_prev.view(s_prev.size(0), -1)
ret.append(self.classifier(s_prev))
s_prev = self.features_1(s_next).cuda("cuda:1")
s_prev = self.features_2(s_prev)
s_prev = s_prev.view(s_prev.size(0), -1)
ret.append(self.classifier(s_prev))
x = torch.cat(ret)
elif self.gpu_para == 10:
splits = iter(x.split(self.split_size, dim=0))
s_next = next(splits)
s_prev = self.features_1(s_next).cuda("cuda:1")
ret = []
for s_next in splits:
s_prev = self.features_2(s_prev).cuda("cuda:2")
s_prev = self.features_3(s_prev).cuda("cuda:3")
s_prev = self.features_4(s_prev).cuda("cuda:4")
s_prev = self.features_5(s_prev).cuda("cuda:5")
s_prev = self.features_6(s_prev).cuda("cuda:6")
s_prev = self.features_7(s_prev).cuda("cuda:7")
s_prev = self.features_8(s_prev).cuda("cuda:8")
s_prev = self.features_9(s_prev).cuda("cuda:9")
s_prev = self.features_10(s_prev)
s_prev = s_prev.view(s_prev.size(0), -1)
ret.append(self.classifier(s_prev))
s_prev = self.features_1(s_next).cuda("cuda:1")
s_prev = self.features_2(s_prev).cuda("cuda:2")
s_prev = self.features_3(s_prev).cuda("cuda:3")
s_prev = self.features_4(s_prev).cuda("cuda:4")
s_prev = self.features_5(s_prev).cuda("cuda:5")
s_prev = self.features_6(s_prev).cuda("cuda:6")
s_prev = self.features_7(s_prev).cuda("cuda:7")
s_prev = self.features_8(s_prev).cuda("cuda:8")
s_prev = self.features_9(s_prev).cuda("cuda:9")
s_prev = self.features_10(s_prev)
s_prev = s_prev.view(s_prev.size(0), -1)
ret.append(self.classifier(s_prev))
x = torch.cat(ret)
elif self.gpu_para == 5:
splits = iter(x.split(self.split_size, dim=0))
s_next = next(splits)
s_prev = self.features_1(s_next).cuda("cuda:1")
ret = []
for s_next in splits:
s_prev = self.features_2(s_prev).cuda("cuda:2")
s_prev = self.features_3(s_prev).cuda("cuda:3")
s_prev = self.features_4(s_prev).cuda("cuda:4")
s_prev = self.features_5(s_prev)
s_prev = s_prev.view(s_prev.size(0), -1)
ret.append(self.classifier(s_prev))
s_prev = self.features_1(s_next).cuda("cuda:1")
s_prev = self.features_2(s_prev).cuda("cuda:2")
s_prev = self.features_3(s_prev).cuda("cuda:3")
s_prev = self.features_4(s_prev).cuda("cuda:4")
s_prev = self.features_5(s_prev)
s_prev = s_prev.view(s_prev.size(0), -1)
ret.append(self.classifier(s_prev))
x = torch.cat(ret)
else:
x = self.features_1(x)
x = self.features_2(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, gpu_para, batch_norm=False):
layers = []
in_channels = 3
for index, v in enumerate(cfg):
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def vgg19_bn_para(num_classes=10, gpu_num = 2, split_size=64):
"""VGG 19-layer model (configuration 'E') with batch normalization"""
model = VGG(make_layers(cfg['E'], 2, batch_norm=True), gpu_num = gpu_num, num_classes=num_classes, split_size=split_size)
return model
|
nilq/baby-python
|
python
|
# variants, kā importēt garākus nosaukumus
import mape.helper as helper
# importē konkrētu funkciju, tā it kā tā būtu lokāli definēta
#from helper import ievadiSkaitli
def main():
sk1 = helper.ievadiSkaitli()
sk2 = helper.ievadiSkaitli()
print("Ievadīto skaitļu summa ir", sk1 + sk2)
helper.pievienotFailam("summa.dat", sk1 + sk2)
#print(__name__)
if __name__ == '__main__':
main()
else:
print("Šī programma nav domāta importam")
|
nilq/baby-python
|
python
|
import six
import time
from collections import defaultdict
import ujson as json
import pandas as pd
from oct.results.models import db, Result, Turret
class ReportResults(object):
"""Represent a report containing all tests results
:param int run_time: the run_time of the script
:param int interval: the time interval between each group of results
"""
def __init__(self, run_time, interval):
self.total_transactions = 0
self.total_errors = Result.select(Result.id).where(Result.error != "", Result.error != None).count()
self.total_timers = 0
self.timers_results = {}
self._timers_values = defaultdict(list)
self.turrets = []
self.main_results = {}
self.interval = interval
self._init_turrets()
def _init_dates(self):
"""Initialize all dates properties
"""
if self.total_transactions == 0:
return None
self.epoch_start = Result.select(Result.epoch).order_by(Result.epoch.asc()).limit(1).get().epoch
self.epoch_finish = Result.select(Result.epoch).order_by(Result.epoch.desc()).limit(1).get().epoch
self.start_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.epoch_start))
self.finish_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.epoch_finish))
def _init_dataframes(self):
"""Initialise the main dataframe for the results and the custom timers dataframes
"""
df = pd.read_sql_query("SELECT elapsed, epoch, scriptrun_time, custom_timers FROM result ORDER BY epoch ASC",
db.get_conn())
self._get_all_timers(df)
self.main_results = self._get_processed_dataframe(df)
# create all custom timers dataframes
for key, value in six.iteritems(self._timers_values):
df = pd.DataFrame(value, columns=['epoch', 'scriptrun_time'])
df.index = pd.to_datetime(df['epoch'], unit='s')
timer_results = self._get_processed_dataframe(df)
self.timers_results[key] = timer_results
# clear memory
del self._timers_values
def _get_all_timers(self, dataframe):
"""Get all timers and set them in the _timers_values property
:param pandas.DataFrame dataframe: the main dataframe with row results
"""
s = dataframe['custom_timers'].apply(json.loads)
s.index = dataframe['epoch']
for index, value in s.iteritems():
if not value:
continue
for key, value in six.iteritems(value):
self._timers_values[key].append((index, value))
self.total_timers += 1
del dataframe['custom_timers']
del s
def _get_processed_dataframe(self, dataframe):
"""Generate required dataframe for results from raw dataframe
:param pandas.DataFrame dataframe: the raw dataframe
:return: a dict containing raw, compiled, and summary dataframes from original dataframe
:rtype: dict
"""
dataframe.index = pd.to_datetime(dataframe['epoch'], unit='s', utc=True)
del dataframe['epoch']
summary = dataframe.describe(percentiles=[.80, .90, .95]).transpose().loc['scriptrun_time']
df_grp = dataframe.groupby(pd.TimeGrouper('{}S'.format(self.interval)))
df_final = df_grp.apply(lambda x: x.describe(percentiles=[.80, .90, .95])['scriptrun_time'])
return {
"raw": dataframe.round(2),
"compiled": df_final.round(2),
"summary": summary.round(2)
}
def _init_turrets(self):
"""Setup data from database
"""
for turret in Turret.select():
self.turrets.append(turret.to_dict())
def compile_results(self):
"""Compile all results for the current test
"""
self._init_dataframes()
self.total_transactions = len(self.main_results['raw'])
self._init_dates()
|
nilq/baby-python
|
python
|
# Code generated by `typeddictgen`. DO NOT EDIT.
"""V1SubjectRulesReviewStatusDict generated type."""
from typing import TypedDict, List
from kubernetes_typed.client import V1NonResourceRuleDict, V1ResourceRuleDict
V1SubjectRulesReviewStatusDict = TypedDict(
"V1SubjectRulesReviewStatusDict",
{
"evaluationError": str,
"incomplete": bool,
"nonResourceRules": List[V1NonResourceRuleDict],
"resourceRules": List[V1ResourceRuleDict],
},
total=False,
)
|
nilq/baby-python
|
python
|
# Copyright 2021 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides the Server class.
Classes:
Server - provides methods to start/stop the Proteus server
"""
import os
import subprocess
from typing import Optional
import shutil
from .exceptions import InvalidArgument
class Server:
"""
The Server class provides methods to control the Proteus server.
"""
def __init__(self, executable: Optional[str] = None, http_port: int = 8998):
"""
Construct a server object
Args:
http_port (int): HTTP port to use for the server
"""
self.pid: int = 0
self.http_port = http_port
self.executable = None
if executable is None:
root = os.getenv("PROTEUS_ROOT")
if root is not None:
try:
with open(root + "/build/config.txt", "r") as f:
build = f.read().replace("\n", "").split(" ")[0]
except FileNotFoundError:
build = "Debug" # try for Debug by default
local_server_path = f"{root}/build/{build}/src/proteus/proteus-server"
if os.path.exists(local_server_path):
self.executable = local_server_path
if self.executable is None:
if shutil.which("proteus-server") is None:
raise InvalidArgument(
"Path to proteus-server cannot be derived. Specify the path explicitly or add it to the PATH"
)
else:
# use the proteus-server that exists on the PATH
self.executable = "proteus-server"
else:
self.executable = executable
def start(self, quiet=False):
"""
Start the proteus server
Args:
quiet (bool, optional): Suppress all output if True. Defaults to False.
"""
proteus_command = [self.executable, "--http_port", str(self.http_port)]
if quiet:
p = subprocess.Popen(
proteus_command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL
)
else:
p = subprocess.Popen(proteus_command)
self.pid = p.pid
def stop(self, kill=False):
"""
Stop the proteus server
Args:
kill (bool, optional): Use signal 9 to kill. Defaults to False.
"""
signal = "-9" if kill else "-2"
if self.pid:
subprocess.call(["kill", signal, str(self.pid)])
self.pid = 0
|
nilq/baby-python
|
python
|
import os
from abc import abstractmethod
from collections import defaultdict
import PIL
import cv2
import math
import numpy as np
import sldc
import torch
from PIL import Image
from cytomine.models import Annotation
from rasterio.features import rasterize
from shapely import wkt
from shapely.affinity import translate, affine_transform
from shapely.geometry import box
from shapely.geometry.base import BaseGeometry
from sldc import TileTopology
from sldc.image import FixedSizeTileTopology, DefaultTileBuilder
from sldc_cytomine import CytomineTileBuilder, CytomineSlide
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
from torchvision import transforms
class PilImage(sldc.Image):
def __init__(self, filepath):
self._filepath = filepath
self._image = cv2.imread(self._filepath)[:, :, ::-1]
@property
def image(self):
return self._image
@property
def height(self):
return self.image.shape[0]
@property
def width(self):
return self.image.shape[1]
@property
def channels(self):
return self.image.shape[-1]
@property
def np_image(self):
if self.image.ndim == 0:
raise ValueError("image empty '{}'".format(self._filepath))
return self.image
def powdiv(v, p):
return v / (2 ** p)
def convert_poly(p, zoom, im_height):
"""Move a polygon to the correct zoom level and referential"""
polygon = affine_transform(p, [powdiv(1, zoom), 0, 0, powdiv(1, zoom), 0, 0])
return affine_transform(polygon, [1, 0, 0, -1, 0, im_height])
class BaseAnnotationCrop(object):
@abstractmethod
def random_crop_and_mask(self):
pass
@abstractmethod
def crop_and_mask(self):
pass
class AnnotationCrop(BaseAnnotationCrop):
def __init__(self, wsi, annotation, working_path, tile_size=512, zoom_level=0, n_jobs=0, intersecting=None):
self._annotation = annotation
self._tile_size = tile_size
self._wsi = CytomineSlide(wsi, zoom_level=zoom_level)
self._builder = CytomineTileBuilder(working_path, n_jobs=n_jobs)
self._working_path = working_path
self._zoom_level = zoom_level
self._other_annotations = [] if intersecting is None else intersecting
self._other_polygons = [self._annot2poly(a) for a in self._other_annotations]
@property
def tile_size(self):
return self._tile_size
@property
def wsi(self):
return self._wsi
@property
def image_instance(self):
return self._wsi.image_instance
@property
def annotation(self):
return self._annotation
@property
def polygon(self):
return self._polygon()
@property
def image_box(self):
return self._extract_image_box()
def _get_start_and_size_over_dimension(self, crop_start, crop_size, wsi_size):
start = crop_start
size = crop_size
if crop_size < self._tile_size:
start = crop_start + (crop_size - self._tile_size) // 2
size = self._tile_size
# make sure that the tile is in the image
start = max(0, start)
start = min(start, wsi_size - size)
if start < 0:
raise ValueError("image is smaller than the tile size")
return start, size
def _extract_image_box(self):
crop_width, crop_height = self._crop_dims()
crop_x_min, crop_y_min, crop_x_max, crop_y_max = self._crop_bounds()
image_x_min, image_width = self._get_start_and_size_over_dimension(crop_x_min, crop_width, self._wsi.width)
image_y_min, image_height = self._get_start_and_size_over_dimension(crop_y_min, crop_height, self._wsi.height)
return (image_x_min, image_y_min), image_width, image_height
def _get_image_filepath(self):
(x, y), width, height = self._extract_image_box()
return os.path.join(self._working_path, "{}-{}-{}-{}-{}-{}.png").format(self._zoom_level, self.image_instance.id, x, y, width, height)
def _download_image(self):
filepath = self._get_image_filepath()
if not os.path.isfile(filepath):
(x, y), width, height = self._extract_image_box()
tile = self._wsi.tile(self._builder, (x, y), width, height)
image = PIL.Image.fromarray(tile.np_image)
image.save(filepath)
return filepath
def download(self, verbose=False):
if verbose:
print("download '{}'".format(self._get_image_filepath()))
return self._download_image()
def _polygon(self):
return self._annot2poly(self._annotation)
def _annot2poly(self, annot):
polygon = wkt.loads(annot.location)
return convert_poly(polygon, self._zoom_level, self.wsi.height)
def _crop_bounds(self):
"""at the specified zoom level"""
x_min, y_min, x_max, y_max = self._polygon().bounds
return int(x_min), int(y_min), math.ceil(x_max), math.ceil(y_max)
def _crop_dims(self):
x_min, y_min, x_max, y_max = self._crop_bounds()
return x_max - x_min, y_max - y_min
def _robust_load_crop(self, x, y):
attempts = 0
filepath = self._get_image_filepath()
while True:
try:
return Image.open(filepath).crop([x, y, x + self._tile_size, y + self._tile_size])
except OSError as e:
if attempts > 3:
raise e
print("recreate '{}'".format(filepath))
os.remove(filepath)
self.download()
def _robust_load_image(self):
attempts = 0
filepath = self._get_image_filepath()
while True:
try:
return Image.open(filepath)
except OSError as e:
if attempts > 3:
raise e
print("recreate '{}'".format(filepath))
os.remove(filepath)
self.download()
def random_crop_and_mask(self):
"""in image coordinate system"""
(x_min, y_min), width, height = self._extract_image_box()
x = np.random.randint(0, width - self._tile_size + 1)
y = np.random.randint(0, height - self._tile_size + 1)
crop = self._robust_load_crop(x, y)
mask = self._mask(x, y, self._tile_size, self._tile_size)
return (x, y, self._tile_size, self._tile_size), crop, Image.fromarray(mask.astype(np.uint8))
def crop_and_mask(self):
"""in image coordinates system, get full crop and mask"""
_, width, height = self._extract_image_box()
image = self._robust_load_image()
mask = self._mask(0, 0, width, height)
return image, Image.fromarray(mask.astype(np.uint8))
def _mask(self, window_x, window_y, window_width, window_height):
(crop_x, crop_y), crop_width, crop_height = self.image_box
ground_truth = [self._polygon()] + self._other_polygons
window = box(0, 0, window_width, window_height)
fg = [translate(g, xoff=-(window_x + crop_x), yoff=-(window_y + crop_y)).intersection(window)
for g in ground_truth]
fg = [p for p in fg if not p.is_empty]
if len(fg) > 0:
mask = rasterize(fg, out_shape=(window_height, window_width), fill=0, dtype=np.uint8) * 255
else:
mask = np.zeros([window_height, window_width])
return mask
@property
def intersecting(self):
return self._other_annotations
@property
def sldc_image(self):
return PilImage(self._get_image_filepath())
@property
def sldc_window(self):
xmin, ymin, _, _ = self._crop_bounds()
width, height = self._crop_dims()
return self._wsi.window((xmin, ymin), width, height)
def topology(self, width, height, overlap=0):
base_topology = TileTopology(self.sldc_image, tile_builder=self.tile_builder, max_width=width, max_height=height, overlap=overlap)
return FixedSizeTileTopology(base_topology)
@property
def tile_builder(self):
return DefaultTileBuilder()
class AnnotationCropWithCue(BaseAnnotationCrop):
def __init__(self, crop: BaseAnnotationCrop, cue):
"""
Parameters
----------
crop: BaseAnnotationCrop
cue: ndarray
Probability map for the cue np.array of float in [0, 1]
"""
self._crop = crop
self._cue = (cue * 255)
def random_crop_and_mask(self):
crop_location, crop, mask = self._crop.random_crop_and_mask()
x, y, w, h = crop_location
final_mask = self._cue[y:(y+h), x:(x+w)]
final_mask[np.asarray(mask) > 0] = 255
return crop_location, crop, Image.fromarray(final_mask.astype(np.uint8), "L")
def crop_and_mask(self):
crop, mask = self._crop.crop_and_mask()
final_mask = self._cue
final_mask[np.asarray(mask) > 0] = 255
return crop, Image.fromarray(final_mask)
@property
def cue(self):
return self._cue
@property
def crop(self):
return self._crop
class RemoteAnnotationCropTrainDataset(Dataset):
def __init__(self, crops, image_trans=None, both_trans=None, mask_trans=None):
self._crops = crops
self._both_trans = both_trans
self._image_trans = image_trans
self._mask_trans = mask_trans
def __getitem__(self, item):
annotation_crop = self._crops[item]
_, image, mask = annotation_crop.random_crop_and_mask()
if self._both_trans is not None:
image, mask = self._both_trans([image, mask])
if self._image_trans is not None:
image = self._image_trans(image)
if self._mask_trans is not None:
mask = self._mask_trans(mask)
return image, mask
def __len__(self):
return len(self._crops)
class TileTopologyDataset(Dataset):
def __init__(self, topology, trans=None):
self._topology = topology
self._trans = trans
@property
def topology(self):
return self._topology
@property
def trans(self):
return self._trans
def __getitem__(self, item):
image = Image.fromarray(self._topology.tile(item + 1).np_image)
if self._trans is not None:
image = self._trans(image)
return item + 1, image
def __len__(self):
return len(self._topology)
def predict_roi(roi, ground_truth, model, device, in_trans=None, batch_size=1, tile_size=256, overlap=0, n_jobs=1, zoom_level=0):
"""
Parameters
----------
roi: AnnotationCrop
The polygon representing the roi to process
ground_truth: iterable of Annotation|Polygon
The ground truth annotations
model: nn.Module
Segmentation network. Takes a batch of _images as input and outputs the foreground probability for all pixels
device:
A torch device to transfer data to
in_trans: transforms.Transform
A transform to apply before forwarding _images into the network
batch_size: int
Batch size
tile_size: int
Tile size
overlap: int
Tile tile_overlap
n_jobs: int
Number of jobs available
zoom_level: int
Zoom level
Returns
-------
"""
# topology
tile_topology = roi.topology(width=tile_size, height=tile_size, overlap=overlap)
(x_min, y_min), width, height = roi.image_box
mask_dims = (height, width)
# build ground truth
roi_poly = roi.polygon
ground_truth = [(wkt.loads(g.location) if isinstance(g, Annotation) else g) for g in ground_truth]
ground_truth = [convert_poly(g, zoom_level, roi.wsi.height) for g in ground_truth]
translated_gt = [translate(g.intersection(roi_poly), xoff=-x_min, yoff=-y_min) for g in ground_truth]
y_true = rasterize(translated_gt, out_shape=mask_dims, fill=0, dtype=np.uint8)
y_pred = np.zeros(y_true.shape, dtype=np.double)
y_acc = np.zeros(y_true.shape, dtype=np.int)
# dataset and loader
dataset = TileTopologyDataset(tile_topology, trans=in_trans)
dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=n_jobs)
for ids, x in dataloader:
x = x.to(device)
y = model.forward(x, sigmoid=True)
# accumulate predictions
for i, identifier in enumerate(ids):
x_off, y_off = tile_topology.tile_offset(identifier.item())
y_pred[y_off:(y_off + tile_size), x_off:(x_off + tile_size)] += y[i].detach().cpu().squeeze().numpy()
y_acc[y_off:(y_off + tile_size), x_off:(x_off + tile_size)] += 1
# average multiple predictions
y_pred /= y_acc
# import cv2
# from datetime import datetime
# roi.annotation.dump("{}_image.png".format(roi.annotation.id), override=False)
# cv2.imwrite("{}_true.png".format(roi.annotation.id), y_true * 255)
# cv2.imwrite("{}_pred_{}.png".format(roi.annotation.id, datetime.now().timestamp()), (y_pred * 255).astype(np.uint8))
return y_pred, y_true
def datasets_size_cumsum(datasets):
sizes = np.array([len(d) for d in datasets])
cumsum = np.concatenate([np.array([0]), np.cumsum(sizes[:-1], dtype=np.int)])
return sizes, cumsum
def get_sample_indexes(index, cumsum):
dataset_index = np.searchsorted(cumsum, index, side="right") - 1
relative_index = index - cumsum[dataset_index]
return dataset_index, relative_index
class AnnotationCropTopoplogyDataset(Dataset):
def __init__(self, crop, overlap=0, in_trans=None):
self._dataset = TileTopologyDataset(crop.topology(crop.tile_size, crop.tile_size, overlap=overlap), trans=in_trans)
self._crop = crop
def __getitem__(self, item):
_id, tile = self._dataset[item]
x_off, y_off = self._dataset.topology.tile_offset(_id)
return _id, x_off, y_off, tile
def __len__(self):
return len(self._dataset)
class MultiCropsSet(Dataset):
def __init__(self, crops, in_trans, overlap=0):
"""
Parameters
----------
do_add_group: bool
True to append group identifier (optional), default: `False`.
kwargs: dict
Parameters to be transferred to the actual `ImageFolder`.
"""
super().__init__()
self._datasets = [
AnnotationCropTopoplogyDataset(crop, overlap=overlap, in_trans=in_trans)
for crop in crops]
self._sizes, self._cumsum_sizes = datasets_size_cumsum(self._datasets)
def __getitem__(self, index):
dataset_index, relative_index = get_sample_indexes(index, self._cumsum_sizes)
dataset = self._datasets[dataset_index]
return (dataset._crop.annotation.id,) + dataset[relative_index]
def __len__(self):
return self._cumsum_sizes[-1] + len(self._datasets[-1])
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def predict_annotation_crops_with_cues(net, crops, device, in_trans=None, overlap=0, batch_size=8, n_jobs=1):
if len(crops) == 0:
return 0
dataset = MultiCropsSet(crops, in_trans=in_trans, overlap=overlap)
loader = DataLoader(dataset, batch_size=batch_size, shuffle=False,
num_workers=n_jobs, pin_memory=True, drop_last=False)
tile_size = crops[0].tile_size
n_bytes = len(dataset) * tile_size * tile_size * 4
print("> annot with cues needs approx {} of memory".format(sizeof_fmt(n_bytes)), flush=True)
all_ys = defaultdict(list)
net.eval()
for annot_ids, tile_ids, xs, ys, tiles in loader:
t = tiles.to(device)
y = torch.sigmoid(net.forward(t))
detached = y.detach().cpu().numpy()
for i, (annot_id, tile_id, x_off, y_off) in enumerate(zip(annot_ids, tile_ids, xs, ys)):
all_ys[annot_id.item()].append((tile_id.item(), (x_off.item(), y_off.item()), detached[i].squeeze()))
awcues = list()
for crop in crops:
_, w, h = crop.image_box
cue = np.zeros([h, w], dtype=np.float)
acc = np.zeros([h, w], dtype=np.int)
for tile_id, (x_off, y_off), y_pred in all_ys[crop.annotation.id]:
cue[y_off:(y_off + tile_size), x_off:(x_off + tile_size)] += y_pred
acc[y_off:(y_off + tile_size), x_off:(x_off + tile_size)] += 1
cue /= acc
awcues.append(AnnotationCropWithCue(crop, cue=cue))
del(all_ys[crop.annotation.id])
return awcues
|
nilq/baby-python
|
python
|
#
# Copyright (c) 2019 UCT Prague.
#
# ec5cbec43ec8_initial_layout.py is part of Invenio Explicit ACLs
# (see https://github.com/oarepo/invenio-explicit-acls).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""Initial layout."""
import sqlalchemy as sa
from alembic import op
try:
from psycopg2 import apilevel
from sqlalchemy.dialects.postgresql import JSONB as JSON
from sqlalchemy.dialects.postgresql import ARRAY
from invenio_explicit_acls.utils import ArrayType as FallbackArrayType
fallback_StringArray = FallbackArrayType(sa.String())
StringArray = ARRAY(sa.String).with_variant(fallback_StringArray, 'sqlite')
except:
from sqlalchemy.types import JSON
from invenio_explicit_acls.utils import ArrayType as ARRAY
StringArray = ARRAY(sa.String())
# revision identifiers, used by Alembic.
revision = 'ec5cbec43ec8'
down_revision = None
branch_labels = ('invenio_explicit_acls',)
depends_on = None
def upgrade():
"""Upgrade database."""
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('explicit_acls_acl',
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=False),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('priority', sa.Integer(), nullable=True),
sa.Column('schemas', StringArray, nullable=True),
sa.Column('originator_id', sa.Integer(), nullable=False),
sa.Column('type', sa.String(length=50), nullable=True),
sa.Column('operation', sa.String(length=50), nullable=True),
sa.ForeignKeyConstraint(['originator_id'], ['accounts_user.id'], name=op.f('fk_explicit_acls_acl_originator_id_accounts_user'), ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', name=op.f('pk_explicit_acls_acl'))
)
op.create_index(op.f('ix_explicit_acls_acl_originator_id'), 'explicit_acls_acl', ['originator_id'], unique=False)
op.create_table('explicit_acls_actor',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('type', sa.String(length=50), nullable=True),
sa.Column('acl_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['acl_id'], ['explicit_acls_acl.id'], name=op.f('fk_explicit_acls_actor_acl_id_explicit_acls_acl')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_explicit_acls_actor'))
)
op.create_table('explicit_acls_defaultacl',
sa.Column('id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['id'], ['explicit_acls_acl.id'], name=op.f('fk_explicit_acls_defaultacl_id_explicit_acls_acl')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_explicit_acls_defaultacl'))
)
op.create_table('explicit_acls_elasticsearchacl',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('record_selector', JSON(astext_type=sa.Text()), nullable=True),
sa.ForeignKeyConstraint(['id'], ['explicit_acls_acl.id'], name=op.f('fk_explicit_acls_elasticsearchacl_id_explicit_acls_acl')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_explicit_acls_elasticsearchacl'))
)
op.create_table('explicit_acls_idacl',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('record_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['id'], ['explicit_acls_acl.id'], name=op.f('fk_explicit_acls_idacl_id_explicit_acls_acl')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_explicit_acls_idacl'))
)
op.create_table('explicit_acls_system_role',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('authenticated', sa.Boolean(), nullable=True),
sa.Column('anonymous', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['id'], ['explicit_acls_actor.id'], name=op.f('fk_explicit_acls_system_role_id_explicit_acls_actor')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_explicit_acls_system_role'))
)
op.create_table('explicit_acls_roleactor',
sa.Column('id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['id'], ['explicit_acls_actor.id'], name=op.f('fk_explicit_acls_roleactor_id_explicit_acls_actor')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_explicit_acls_roleactor'))
)
op.create_table('explicit_acls_useractor',
sa.Column('id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['id'], ['explicit_acls_actor.id'], name=op.f('fk_explicit_acls_useractor_id_explicit_acls_actor')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_explicit_acls_useractor'))
)
op.create_table('explicit_acls_roles_roleactors',
sa.Column('role_id', sa.Integer(), nullable=False),
sa.Column('actor_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['actor_id'], ['explicit_acls_roleactor.id'], name=op.f('fk_explicit_acls_roles_roleactors_actor_id_explicit_acls_roleactor')),
sa.ForeignKeyConstraint(['role_id'], ['accounts_role.id'], name=op.f('fk_explicit_acls_roles_roleactors_role_id_accounts_role')),
sa.PrimaryKeyConstraint('role_id', 'actor_id', name=op.f('pk_explicit_acls_roles_roleactors'))
)
op.create_table('explicit_acls_users_useractors',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('actor_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['actor_id'], ['explicit_acls_useractor.id'], name=op.f('fk_explicit_acls_users_useractors_actor_id_explicit_acls_useractor')),
sa.ForeignKeyConstraint(['user_id'], ['accounts_user.id'], name=op.f('fk_explicit_acls_users_useractors_user_id_accounts_user')),
sa.PrimaryKeyConstraint('user_id', 'actor_id', name=op.f('pk_explicit_acls_users_useractors'))
)
# ### end Alembic commands ###
def downgrade():
"""Downgrade database."""
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('explicit_acls_users_useractors')
op.drop_table('explicit_acls_roles_roleactors')
op.drop_table('explicit_acls_useractor')
op.drop_table('explicit_acls_roleactor')
op.drop_table('explicit_acls_system_role')
op.drop_table('explicit_acls_idacl')
op.drop_table('explicit_acls_elasticsearchacl')
op.drop_table('explicit_acls_defaultacl')
op.drop_table('explicit_acls_actor')
op.drop_index(op.f('ix_explicit_acls_acl_originator_id'), table_name='explicit_acls_acl')
op.drop_table('explicit_acls_acl')
# ### end Alembic commands ###
|
nilq/baby-python
|
python
|
from language_learner_env import secret_settings
import learner
app = learner.App(secret_settings)
app.listen()
|
nilq/baby-python
|
python
|
from maya import cmds as mc
from maya.api import OpenMaya as om
from dcc.maya.libs import transformutils
from . import transformmixin
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
class ConstraintMixin(transformmixin.TransformMixin):
"""
Overload of TransformMixin class used to interface with constraint nodes.
"""
__apitype__ = (om.MFn.kConstraint, om.MFn.kPluginConstraintNode)
__targets__ = {
'translateX': 'targetTranslateX',
'translateY': 'targetTranslateY',
'translateZ': 'targetTranslateZ',
'rotatePivotX': 'targetRotatePivotX',
'rotatePivotY': 'targetRotatePivotY',
'rotatePivotZ': 'targetRotatePivotZ',
'rotatePivotTranslateX': 'targetRotateTranslateX',
'rotatePivotTranslateY': 'targetRotateTranslateY',
'rotatePivotTranslateZ': 'targetRotateTranslateZ',
'scalePivotX': 'targetScalePivotX',
'scalePivotY': 'targetScalePivotY',
'scalePivotZ': 'targetScalePivotZ',
'scalePivotTranslateX': 'targetScaleTranslateX',
'scalePivotTranslateY': 'targetScaleTranslateY',
'scalePivotTranslateZ': 'targetScaleTranslateZ',
'rotateX': 'targetRotateX',
'rotateY': 'targetRotateY',
'rotateZ': 'targetRotateZ',
'rotateOrder': 'targetRotateOrder',
'jointOrientX': 'targetJointOrientX',
'jointOrientY': 'targetJointOrientY',
'jointOrientZ': 'targetJointOrientZ',
'scaleX': 'targetScaleX',
'scaleY': 'targetScaleY',
'scaleZ': 'targetScaleZ',
'inverseScale': 'targetInverseScale',
'segmentScaleCompensate': 'targetScaleCompensate'
}
__outputs__ = {
'constraintTranslateX': 'translateX',
'constraintTranslateY': 'translateY',
'constraintTranslateZ': 'translateZ',
'constraintRotateX': 'rotateX',
'constraintRotateY': 'rotateY',
'constraintRotateZ': 'rotateZ',
'constraintScaleX': 'scaleX',
'constraintScaleY': 'scaleY',
'constraintScaleZ': 'scaleZ'
}
def __init__(self, *args, **kwargs):
"""
Private method called after a new instance has been created.
"""
# Call parent method
#
super(ConstraintMixin, self).__init__(*args, **kwargs)
def constraintObject(self):
"""
Returns the object being driven by this constraint node.
The constraint parent inverse matrix plug is usually the common denominator in all constraint nodes.
It should be fairly safe to query the connection to find this object.
:rtype: mpynode.MPyNode
"""
# Check if plug has a connection
#
plug = self.findPlug('constraintParentInverseMatrix')
source = plug.source()
if not source.isNull:
return self.pyFactory(source.node())
else:
return None
def setConstraintObject(self, constraintObject, **kwargs):
"""
Updates the constraint object for this instance.
:type constraintObject: mpy.mpynode.MPyNode
:key maintainOffset: bool
:key skipTranslateX: bool
:key skipTranslateY: bool
:key skipTranslateZ: bool
:key skipRotateX: bool
:key skipRotateY: bool
:key skipRotateZ: bool
:key skipScaleX: bool
:key skipScaleY: bool
:key skipScaleZ: bool
:rtype: None
"""
# Check for redundancy
#
if constraintObject == self.constraintObject():
return
# Re-parent this constraint
#
self.setParent(constraintObject)
# Update constraint name
#
constraintName = '{nodeName}_{typeName}1'.format(nodeName=constraintObject.displayName(), typeName=self.typeName)
self.setName(constraintName)
# Update rest matrix
#
restMatrix = constraintObject.getAttr('matrix')
self.setRestMatrix(restMatrix)
# Connect output attributes
#
for (sourceName, destinationName) in self.__outputs__.items():
# Check if attribute should be skipped
#
attributeName = destinationName[0].upper() + destinationName[1:]
key = 'skip{attributeName}'.format(attributeName=attributeName)
skipAttribute = kwargs.get(key, False)
if skipAttribute:
log.info('Skipping constraint attribute: %s' % destinationName)
continue
# Check if attributes exist
#
if not self.hasAttr(sourceName) or not constraintObject.hasAttr(destinationName):
log.info('Unable to locate constraint attributes: %s and %s' % (sourceName, destinationName))
continue
# Get associated plugs
#
source = self.findPlug(sourceName)
destination = constraintObject.findPlug(destinationName)
# Connect plugs
#
self.breakConnections(source, source=False, destination=True)
self.connectPlugs(source, destination, force=True)
# Update constraint parent inverse matrix
#
source = constraintObject.findPlug('parentInverseMatrix[%s]' % constraintObject.instanceNumber())
destination = self.findPlug('constraintParentInverseMatrix')
constraintObject.connectPlugs(source, destination, force=True)
# Check if constraint supports rotation order
# This is only seen in orient and transform constraints
#
if self.hasAttr('constraintRotateOrder'):
constraintObject.connectPlugs('rotateOrder', self.findPlug('constraintRotateOrder'), force=True)
# Check if constraint supports joint orient
# This is only seen in orient and transform constraints
#
if self.hasAttr('constraintJointOrient') and constraintObject.hasAttr('jointOrient'):
# Connect child plugs
#
source = constraintObject.findPlug('jointOrient')
destination = self.findPlug('constraintJointOrient')
for i in range(source.numChildren()):
constraintObject.connectPlugs(source.child(i), destination.child(i), force=True)
def interpolationType(self):
"""
Getter method used to retrieve the interpolation type for this constraint.
:rtype: int
"""
return om.MPlug(self.object(), self.attribute('interpType')).asInt()
def setInterpolationType(self, interpolationType):
"""
Setter method used to update the interpolation type for this constraint.
:type interpolationType: int
:rtype: None
"""
om.MPlug(self.object(), self.attribute('interpType')).setInt(interpolationType)
def offset(self):
"""
Getter method used to retrieve the offset for this constraint.
Only a few constraints support this method such as point and orient constraints!
:rtype: om.MVector
"""
return om.MVector(
om.MPlug(self.object(), self.attribute('offsetX')).asFloat(),
om.MPlug(self.object(), self.attribute('offsetY')).asFloat(),
om.MPlug(self.object(), self.attribute('offsetZ')).asFloat()
)
def setOffset(self, offset):
"""
Setter method used to update the offset for this constraint.
Only a few constraints support this method such as point and orient constraints!
:type offset: om.MVector
:rtype: None
"""
om.MPlug(self.object(), self.attribute('offsetX')).setFloat(offset.x)
om.MPlug(self.object(), self.attribute('offsetY')).setFloat(offset.y)
om.MPlug(self.object(), self.attribute('offsetZ')).setFloat(offset.z),
def targets(self):
"""
Collects all of the available constraint targets.
:rtype: list[ConstraintTarget]
"""
return list(self.iterTargets())
def targetObjects(self):
"""
Retrieves the target objects driving this constraint.
:rtype: list[mpynode.MPyNode]
"""
return [x.targetObject() for x in self.iterTargets()]
def iterTargets(self):
"""
Generator method used to iterate through all available constraint targets.
:rtype: iter
"""
# Iterate through target indices
#
for i in range(self.targetCount()):
yield ConstraintTarget(self, index=i)
def targetCount(self):
"""
Evaluates the number of active target elements available.
:rtype: int
"""
return om.MPlug(self.object(), self.attribute('target')).evaluateNumElements()
def addTarget(self, target, maintainOffset=True):
"""
Adds a new target to this constraint.
:type target: mpynode.MPyNode
:type maintainOffset: bool
:rtype: int
"""
# Iterate through required target attributes
#
plug = self.findPlug('target')
index = plug.evaluateNumElements()
for (sourceName, destinationName) in self.__targets__.items():
# Check if constraint has attribute
#
if not target.hasAttr(sourceName) or not self.hasAttr(destinationName):
log.info('Unable to locate constraint attributes: %s and %s' % (sourceName, destinationName))
continue
# Connect plugs
#
source = target.findPlug(sourceName)
destination = self.findPlug('target[%s].%s' % (index, destinationName))
self.connectPlugs(source, destination)
# Connect parent matrix attribute
#
source = target.findPlug('parentMatrix[%s]' % target.instanceNumber())
destination = self.findPlug('target[%s].targetParentMatrix' % index)
self.connectPlugs(source, destination)
# Connect weight attributes
#
nodeName = target.displayName()
attribute = self.addAttr(
longName='{nodeName}W{index}'.format(nodeName=nodeName, index=index),
attributeType='float',
min=0.0, max=1.0
)
source = om.MPlug(self.object(), attribute)
destination = self.findPlug('target[%s].targetWeight' % index)
self.connectPlugs(source, destination)
# Enable weight attribute
#
source.setFloat(1.0)
# Return new target index
#
return index
def addTargets(self, targets, maintainOffset=False):
"""
Adds a list of new targets to this constraint.
:type targets: list[mpynode.MPyNode]
:type maintainOffset: bool
:rtype: int
"""
for target in targets:
self.addTarget(target, maintainOffset=maintainOffset)
def removeTarget(self, index):
pass
def restTranslate(self, context=om.MDGContext.kNormal):
"""
Returns the rest translate component from this constraint.
This value is used when there are no target weights.
:type context: om.MDGContext
:rtype: om.MVector
"""
return om.MVector(
self.findPlug('restTranslateX').asFloat(context=context),
self.findPlug('restTranslateY').asFloat(context=context),
self.findPlug('restTranslateZ').asFloat(context=context)
)
def setRestTranslate(self, restTranslate):
"""
Updates the rest translate for this constraint.
:type restTranslate: om.MVector
:rtype: None
"""
# Assign translation to plug
#
self.findPlug('restTranslateX').setFloat(restTranslate.x)
self.findPlug('restTranslateY').setFloat(restTranslate.y)
self.findPlug('restTranslateZ').setFloat(restTranslate.z)
def restRotate(self, context=om.MDGContext.kNormal):
"""
Returns the rest rotation component from this constraint.
This value is used when there are no target weights.
:type context: om.MDGContext
:rtype: om.MEulerRotation
"""
return om.MEulerRotation(
self.findPlug('restRotateX').asFloat(context=context),
self.findPlug('restRotateY').asFloat(context=context),
self.findPlug('restRotateZ').asFloat(context=context),
order=self.rotateOrder(context=context)
)
def setRestRotate(self, restRotation):
"""
Updates the rest rotate for this constraint.
:type restRotation: om.MEulerRotation
:rtype: None
"""
# Check if rotation needs reordering
#
rotateOrder = self.rotateOrder()
if restRotation.order != rotateOrder:
restRotation = restRotation.reorder(rotateOrder)
# Assign rotation to plugs
#
self.findPlug('restRotateX').setFloat(restRotation.x)
self.findPlug('restRotateY').setFloat(restRotation.y)
self.findPlug('restRotateZ').setFloat(restRotation.z)
def restScale(self, context=om.MDGContext.kNormal):
"""
Returns the rest translate component from this constraint.
This value is used when there are no target weights.
:type context: om.MDGContext
:rtype: list[float, float, float]
"""
return [
self.findPlug('restScaleX').asFloat(context=context),
self.findPlug('restScaleY').asFloat(context=context),
self.findPlug('restScaleZ').asFloat(context=context)
]
def setRestScale(self, restScale):
"""
Updates the rest translate for this constraint.
:type restScale: list[float, float, float]
:rtype: None
"""
# Assign scale to plugs
#
self.findPlug('restScaleX').setFloat(restScale[0])
self.findPlug('restScaleY').setFloat(restScale[1])
self.findPlug('restScaleZ').setFloat(restScale[2])
def restMatrix(self):
"""
Computes a transform matrix based off the rest components.
:rtype: om.MMatrix
"""
# Compose rest matrix
#
translateMatrix = transformutils.createTranslateMatrix(self.restTranslate())
rotateMatrix = transformutils.createRotationMatrix(self.restRotate())
scaleMatrix = transformutils.createScaleMatrix(1.0)
return scaleMatrix * rotateMatrix * translateMatrix
def setRestMatrix(self, restMatrix):
"""
Updates the rest matrix for this constraint by changing the rest components.
:type restMatrix: om.MMatrix
:rtype: None
"""
# Decompose rest matrix
#
translate, rotate, scale = transformutils.decomposeTransformMatrix(restMatrix, rotateOrder=self.rotateOrder())
# Check if constraint has rest translate
#
if self.hasAttr('restTranslate'):
self.setRestTranslate(translate)
# Check if constraint has rest rotate
#
if self.hasAttr('restRotate'):
self.setRestRotate(rotate)
# Check if constraint has rest scale
#
if self.hasAttr('restScale'):
self.setRestScale(scale)
def restInverseMatrix(self):
"""
Retrieves the inverse rest matrix.
:rtype: om.MMatrix
"""
return self.restMatrix().inverse()
def worldRestMatrix(self):
"""
Computes the world rest matrix for this constraint.
:rtype: om.MMatrix
"""
return self.restMatrix() * self.exclusiveMatrix()
def worldRestInverseMatrix(self):
"""
Retrieves the inverse world rest matrix for this constraint.
:rtype: om.MMatrix
"""
return self.worldRestMatrix().inverse()
class ConstraintTarget(object):
"""
Base class used to interface with constraint targets.
"""
__slots__ = ('_constraint', '_index')
def __init__(self, constraint, **kwargs):
"""
Private method called after a new instance has been created.
:type constraint: ConstraintMixin
:rtype: None
"""
# Call parent method
#
super(ConstraintTarget, self).__init__()
# Declare class variables
#
self._constraint = constraint.weakReference()
self._index = kwargs.get('index', 0)
@property
def constraint(self):
"""
Getter method used to retrieve the associated constraint for this target.
:rtype: ConstraintMixin
"""
return self._constraint()
@property
def index(self):
"""
Getter method used to retrieve the index for this constraint target.
:rtype: int
"""
return self._index
def targetPlug(self):
"""
Returns the element associated with this constraint target.
:rtype: om.MPlug
"""
return self.constraint.findPlug('target[{index}]'.format(index=self.index))
def targetChildPlug(self, name):
"""
Search method used to locate the child plug derived from this constraint target.
:type name: str
:rtype: om.MPlug
"""
return self.targetPlug().child(self.constraint.attribute(name))
def name(self):
"""
Returns the alias name for this constraint target.
:rtype: str
"""
return self.targetChildPlug('targetWeight').source().partialName(useLongNames=True)
def setName(self, name):
"""
Method used to change the alias name on the indexed weight attribute.
:type name: str
:rtype: bool
"""
# Get source connection from target weight plug
#
plug = self.targetChildPlug('targetWeight')
otherPlug = plug.source()
if otherPlug.isNull:
return
# Rename user attribute
#
fullPathName = self.constraint.fullPathName()
fnAttribute = om.MFnAttribute(otherPlug.attribute())
mc.renameAttr('%s.%s' % (fullPathName, fnAttribute.shortName), name)
mc.renameAttr('%s.%s' % (fullPathName, fnAttribute.name), name)
def weight(self):
"""
Returns the weight for this constraint target.
:rtype: float
"""
return self.targetChildPlug('targetWeight').asFloat()
def targetObject(self):
"""
Retrieves the target object driving this constraint channel.
If no source connection is found then none will be returned!
:rtype: mpynode.MPyNode
"""
plug = self.targetChildPlug('targetParentMatrix')
source = plug.source()
if not source.isNull:
return self.constraint.pyFactory(source.node())
else:
return None
def targetRotateOrder(self):
"""
Retrieves the rotate order for this constraint target.
:rtype: int
"""
return self.targetChildPlug('targetRotateOrder').asInt()
def targetOffsetTranslate(self):
"""
Retrieves the offset translation for this constraint target.
This method is only supported by parent constraints!
:rtype: om.MVector
"""
return om.MVector(
self.targetChildPlug('targetOffsetTranslateX').asFloat(),
self.targetChildPlug('targetOffsetTranslateY').asFloat(),
self.targetChildPlug('targetOffsetTranslateZ').asFloat()
)
def setTargetOffsetTranslate(self, translation):
"""
Updates the offset translation for this constraint target.
:type translation: om.MVector
:rtype: None
"""
self.targetChildPlug('targetOffsetTranslateX').setFloat(translation.x)
self.targetChildPlug('targetOffsetTranslateY').setFloat(translation.y)
self.targetChildPlug('targetOffsetTranslateZ').setFloat(translation.z)
def targetOffsetRotate(self):
"""
Retrieves the offset rotation for this constraint target.
This method is only supported by parent constraints!
:rtype: om.MEulerRotation
"""
return om.MEulerRotation(
self.targetChildPlug('targetOffsetRotateX').asFloat(),
self.targetChildPlug('targetOffsetRotateY').asFloat(),
self.targetChildPlug('targetOffsetRotateZ').asFloat(),
order=self.targetRotateOrder()
)
def setTargetOffsetRotate(self, rotation):
"""
Updates the offset rotation for this constraint target.
:type rotation: om.MEulerRotation
:rtype: None
"""
# Check if rotation needs reordering
#
rotateOrder = self.targetRotateOrder()
if rotation.order != rotateOrder:
rotation = rotation.reorder(rotateOrder)
# Assign rotation to plugs
#
self.targetChildPlug('targetOffsetRotateX').setFloat(rotation.x)
self.targetChildPlug('targetOffsetRotateY').setFloat(rotation.y)
self.targetChildPlug('targetOffsetRotateZ').setFloat(rotation.z)
def resetOffsetTransform(self):
pass
|
nilq/baby-python
|
python
|
# encoding=utf8
# This is temporary fix to import module from parent folder
# It will be removed when package is published on PyPI
import sys
sys.path.append('../')
# End of fix
import numpy as np
from NiaPy.task import StoppingTask, OptimizationType
from NiaPy.algorithms import BasicStatistics
from NiaPy.algorithms.basic import DifferentialEvolution
from NiaPy.benchmarks import Sphere
NUM_RUNS = 10 # define number of runs
stats = np.zeros(NUM_RUNS)
for i in range(NUM_RUNS):
task = StoppingTask(D=10, nFES=10000, optType=OptimizationType.MINIMIZATION, benchmark=Sphere())
print ("Working on run: " + str(i+1))
algo = DifferentialEvolution(NP=40, CR=0.9, F=0.5)
best = algo.run(task)
stats[i] = best[1] # save best
stat = BasicStatistics(stats)
print(stat.generate_standard_report()) # generate report
|
nilq/baby-python
|
python
|
# Copyright (c) 2014-2017, iocage
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted providing that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Migrate jails to the latest format (python-iocage)."""
import typing
import click
import libioc.events
import libioc.errors
import libioc.helpers
import libioc.Jails
import libioc.Logger
from .shared.click import IocClickContext
__rootcmd__ = True
class JailMigrationEvent(libioc.events.IocEvent):
"""CLI event that occurs when a jail is migrated from legacy format."""
def __init__(
self,
jail: 'libioc.Jail.JailGenerator'
) -> None:
self.identifier = jail.full_name
libioc.events.IocEvent.__init__(self)
@click.command(name="migrate", help="Migrate jails to the latest format.")
@click.pass_context
@click.argument("jails", nargs=-1)
def cli(
ctx: IocClickContext,
jails: typing.Tuple[str, ...]
) -> None:
"""Start one or many jails."""
logger = ctx.parent.logger
zfs = libioc.ZFS.get_zfs(logger=logger)
host = libioc.Host.HostGenerator(logger=logger, zfs=zfs)
filters = jails + ("template=no,-",)
ioc_jails = libioc.Jails.JailsGenerator(
filters,
logger=logger,
host=host,
zfs=zfs
)
if len(ioc_jails) == 0:
logger.error(f"No jails started your input: {jails}")
exit(1)
ctx.parent.print_events(_migrate_jails(
ioc_jails,
logger=logger,
zfs=zfs,
host=host
))
def _migrate_jails(
jails: 'libioc.Jails.JailsGenerator',
logger: 'libioc.Logger.Logger',
host: 'libioc.Host.HostGenerator',
zfs: 'libioc.ZFS.ZFS'
) -> typing.Generator['libioc.events.IocEvent', None, None]:
for jail in jails:
event = JailMigrationEvent(jail=jail)
yield event.begin()
if jail.config.legacy is False:
yield event.skip()
continue
if jail.running is True:
yield event.fail(libioc.errors.JailAlreadyRunning(
jail=jail,
logger=logger
))
continue
if libioc.helpers.validate_name(jail.config["tag"]):
name = jail.config["tag"]
temporary_name = name
else:
name = jail.humanreadable_name
temporary_name = "import-" + str(hash(name) % (1 << 32))
try:
new_jail = libioc.Jail.JailGenerator(
dict(name=temporary_name),
root_datasets_name=jail.root_datasets_name,
new=True,
logger=logger,
zfs=zfs,
host=host
)
if new_jail.exists is True:
raise libioc.errors.JailAlreadyExists(
jail=new_jail,
logger=logger
)
def _destroy_unclean_migration() -> typing.Generator[
'libioc.events.IocEvents',
None,
None
]:
_name = new_jail.humanreadable_name
logger.verbose(
f"Destroying unfinished migration target jail {_name}"
)
yield from new_jail.destroy(
force=True,
event_scope=event.scope
)
event.add_rollback_step(_destroy_unclean_migration)
yield from new_jail.clone_from_jail(jail, event_scope=event.scope)
new_jail.save()
new_jail.promote()
yield from jail.destroy(
force=True,
force_stop=True,
event_scope=event.scope
)
except libioc.errors.IocException as e:
yield event.fail(e)
continue
if name != temporary_name:
# the jail takes the old jails name
yield from new_jail.rename(name, event_scope=event.scope)
yield event.end()
|
nilq/baby-python
|
python
|
"""Anvil is a tool for automating the rigging process in a given DCC."""
from six import itervalues
import config
import utils
import colors
import meta_data
import log
import version
import interfaces
import runtime
import objects
import grouping
import node_types
import sub_rig_templates
import rig_templates
class AnvilLog(log.LogMixin):
LOG = log.obtain_logger(__name__)
LOG = AnvilLog
LOG.info('Auto-Loaded DCC %s', runtime.dcc)
LOG.info('Loaded logger config file %s successfully, writing to: %s',
log.LogInitializer.CFG_FILE, log.LogInitializer.LOG_DIR)
LOG.info('Anvil environment has been set to %s', config.ENV)
LOG.info('Successfully initiated Anvil %s.', version.__version__)
EXISTING_ENCAPSULATIONS = {}
def check_for_encapsulation(dag_path):
"""Helper for the factory method to check for a previously existing encapsulation."""
for node_encapsulation in itervalues(EXISTING_ENCAPSULATIONS):
if dag_path == node_encapsulation._dcc_id:
return node_encapsulation
return None
def factory(dag_path, **kwargs):
"""Factory method that checks for previous encapsulations to reduce memory footprint and encourages reuse."""
if dag_path is None:
raise IOError('Tried to factory encapsulate None.')
if is_anvil(dag_path):
return dag_path
existing = check_for_encapsulation(runtime.dcc.scene.get_persistent_id(str(dag_path)))
if existing is not None:
return existing
node_type = runtime.dcc.scene.get_type(dag_path)
if node_type in config.DCC_TYPES[config.TRANSFORM_TYPE]:
encapsulation_class = objects.Transform
elif node_type in config.DCC_TYPES[config.CURVE_TYPE]:
encapsulation_class = objects.Curve
elif node_type in config.DCC_TYPES[config.JOINT_TYPE]:
encapsulation_class = objects.Joint
else:
encapsulation_class = objects.Transform
encapsulation = encapsulation_class(dag_path, **kwargs)
register_encapsulation(encapsulation)
return encapsulation
def factory_list(dag_nodes):
"""Factory method that iterates over a list and returns a list."""
return [factory(node) for node in dag_nodes]
def register_encapsulation(anvil_class_instance):
"""Helper to register a given encapsulation with the encapsulation registry."""
EXISTING_ENCAPSULATIONS[len(EXISTING_ENCAPSULATIONS)] = anvil_class_instance
def is_achunk(node):
issubclass(type(node), node_types.BaseCollection)
def is_agrouping(node):
return issubclass(type(node), node_types.AbstractGrouping)
def is_aobject(node):
return issubclass(type(node), node_types.UnicodeDelegate)
def is_aiter(node):
return is_agrouping(node) or is_achunk(node)
def is_anvil(node):
return is_aiter(node) or is_aobject(node)
__all__ = ['config',
'meta_data',
'interfaces',
'log',
'version',
'node_types',
'runtime',
'objects',
'grouping',
'sub_rig_templates',
'rig_templates',
'utils',
'colors']
|
nilq/baby-python
|
python
|
import numpy as np
import pandas as pd
import os
from transformers import AutoModel
from inference_schema.schema_decorators import input_schema, output_schema
from inference_schema.parameter_types.pandas_parameter_type import PandasParameterType
from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType
def init():
global model
model_path = os.getenv("AZUREML_MODEL_DIR")
model = AutoModel.from_pretrained(model_path, from_tf=True)
input_sample = pd.DataFrame(data=[{'query': "AzureML is quite good."}])
output_sample = np.array([np.array(["POSITIVE", 0.95])])
@input_schema('data', PandasParameterType(input_sample))
@output_schema(NumpyParameterType(output_sample))
def run(data):
try:
text = data['query']
sentiment = model(text)
result = {}
result['sentiment'] = sentiment
return result
except Exception as e:
error = str(e)
return error
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Struct
from magma.common.rpc_utils import grpc_wrapper
from orc8r.protos import common_pb2, magmad_pb2, magmad_pb2_grpc
@grpc_wrapper
def start_services(client, args):
client.StartServices(common_pb2.Void())
@grpc_wrapper
def stop_services(client, args):
client.StopServices(common_pb2.Void())
@grpc_wrapper
def reboot(client, args):
client.Reboot(common_pb2.Void())
@grpc_wrapper
def restart_services(client, args):
client.RestartServices(
magmad_pb2.RestartServicesRequest(services=args.services)
)
@grpc_wrapper
def ping(client, args):
response = client.RunNetworkTests(
magmad_pb2.NetworkTestRequest(
pings=[
magmad_pb2.PingParams(
host_or_ip=host,
num_packets=args.packets,
) for host in args.hosts
]
)
)
print(response)
@grpc_wrapper
def traceroute(client, args):
response = client.RunNetworkTests(
magmad_pb2.NetworkTestRequest(
traceroutes=[
magmad_pb2.TracerouteParams(
host_or_ip=host,
max_hops=args.max_hops,
bytes_per_packet=args.bytes,
) for host in args.hosts
]
)
)
print(response)
@grpc_wrapper
def get_gateway_id(client, args):
response = client.GetGatewayId(common_pb2.Void())
print(response)
@grpc_wrapper
def generic_command(client, args):
params = json_format.Parse(args.params, Struct())
response = client.GenericCommand(
magmad_pb2.GenericCommandParams(command=args.command, params=params)
)
print(response)
@grpc_wrapper
def tail_logs(client, args):
stream = client.TailLogs(magmad_pb2.TailLogsRequest(service=args.service))
for log_line in stream:
print(log_line.line, end='')
def create_parser():
"""
Creates the argparse parser with all the arguments.
"""
parser = argparse.ArgumentParser(
description='Management CLI for Magmad',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Add subcommands
subparsers = parser.add_subparsers(title='subcommands', dest='cmd')
parser_start = subparsers.add_parser('start_services',
help='Start all magma services')
parser_stop = subparsers.add_parser('stop_services',
help='Stop all magma services')
parser_reboot = subparsers.add_parser('reboot',
help='Reboot the gateway device')
parser_restart = subparsers.add_parser('restart_services',
help='Restart specified magma services')
parser_ping = subparsers.add_parser(
'ping',
help='Ping a host from the gateway')
parser_traceroute = subparsers.add_parser(
'traceroute',
help='traceroute a host from the gateway')
parser_get_id = subparsers.add_parser('get_gateway_id',
help='Get gateway hardware ID')
parser_generic_command = subparsers.add_parser('generic_command',
help='Execute generic command')
parser_tail_logs = subparsers.add_parser('tail_logs',
help='Tail logs')
parser_ping.add_argument('hosts', nargs='+', type=str,
help='Hosts (URLs or IPs) to ping')
parser_ping.add_argument('--packets', type=int, default=4,
help='Number of packets to send with each ping')
parser_traceroute.add_argument('hosts', nargs='+', type=str,
help='Hosts (URLs or IPs) to traceroute')
parser_traceroute.add_argument('--max-hops', type=int, default=30,
help='Max TTL for packets, defaults to 30')
parser_traceroute.add_argument('--bytes', type=int, default=60,
help='Bytes per packet, defaults to 60')
parser_restart.add_argument('services', nargs='*', type=str,
help='Services to restart')
parser_generic_command.add_argument('command', type=str,
help='Command name')
parser_generic_command.add_argument('params', type=str,
help='Params (string)')
parser_tail_logs.add_argument('service', type=str, nargs='?',
help='Service')
# Add function callbacks
parser_start.set_defaults(func=start_services)
parser_stop.set_defaults(func=stop_services)
parser_reboot.set_defaults(func=reboot)
parser_restart.set_defaults(func=restart_services)
parser_ping.set_defaults(func=ping)
parser_traceroute.set_defaults(func=traceroute)
parser_get_id.set_defaults(func=get_gateway_id)
parser_generic_command.set_defaults(func=generic_command)
parser_tail_logs.set_defaults(func=tail_logs)
return parser
def main():
parser = create_parser()
# Parse the args
args = parser.parse_args()
if not args.cmd:
parser.print_usage()
exit(1)
# Execute the subcommand function
args.func(args, magmad_pb2_grpc.MagmadStub, 'magmad')
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
####################################################################################################
# ------------------------------------------------------------------------------------------------ #
# Functions for handling pool presence table analysis
# ------------------------------------------------------------------------------------------------ #
####################################################################################################
from kosudoku.utils import gSeparatorString, UpdateLogFileData
from kosudoku.output import generateOutputMatrixWithHeaders, writeOutputMatrix
from kosudoku.grid import SudokuGenomicCoord, CalculateSudokuGridOccupancyTaxonomy, \
PrintSudokuGridOccupancyTaxonomy
# ------------------------------------------------------------------------------------------------ #
def GenerateDTypeArrayForPoolPresenceTableImport(poolColumns):
import pdb
from numpy import int32
dtypeArray = []
i = 0
while i < len(poolColumns):
dtypeArray.append((poolColumns[i], int32))
i += 1
return dtypeArray
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def GenerateDTypeArrayForPoolPresenceDict(indexLookupTable):
import pdb
from numpy import int32
poolNames = indexLookupTable.values()
poolNames = sorted(poolNames)
dtypeArray = []
dtypeArray.append(('readAlignmentCoord', int32))
# Add in columns for pools
i = 0
while i < len(poolNames):
dtypeArray.append((poolNames[i], int32))
i += 1
return dtypeArray
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def GeneratePoolPresenceTable(uniqueCoords, sortedValidGenomeArray, indexLookupTable, dtypeDict):
import numpy
import pdb
try:
poolPresenceTable = numpy.zeros(len(uniqueCoords), dtype=dtypeDict)
except:
pdb.set_trace()
i = 0
while i < len(uniqueCoords):
poolPresenceTable[i] = uniqueCoords[i]
i += 1
i = 0
j = 0
while j < len(uniqueCoords):
while(i < len(sortedValidGenomeArray) and \
sortedValidGenomeArray[i]['readAlignmentCoord'] == poolPresenceTable[j]['readAlignmentCoord']):
index = str(sortedValidGenomeArray[i]['index'])
column = indexLookupTable[index]
poolPresenceTable[j][column] += 1
i += 1
j += 1
return poolPresenceTable
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def ImportPoolPresenceTable(poolPresenceTableFileName, poolColumns):
import numpy
import gc
from pdb import set_trace
import csv
fHandle = open(poolPresenceTableFileName, 'r')
poolColumnToHeaderIndexDict = {}
i = 0
datareader = csv.reader(fHandle)
for row in datareader:
if i == 0:
headers = row
poolColumnToHeaderIndexDict = {}
for col in poolColumns:
poolColumnToHeaderIndexDict[col] = headers.index(col)
i += 1
fHandle.close()
dtypeArray = GenerateDTypeArrayForPoolPresenceTableImport(poolColumns)
poolPresenceTable = numpy.zeros(i-1, dtype=dtypeArray)
i = 0
colKeys = list(poolColumnToHeaderIndexDict.keys())
colIndices = []
while i < len(colKeys):
colIndices.append(poolColumnToHeaderIndexDict[colKeys[i]])
i += 1
fHandle = open(poolPresenceTableFileName, 'r')
i = 0
datareader = csv.reader(fHandle)
for row in datareader:
if i > 0:
j = 0
while j < len(colKeys):
poolPresenceTable[i-1][colKeys[j]] = row[colIndices[j]]
j += 1
i += 1
fHandle.close()
return poolPresenceTable
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def WritePoolPresenceTable3(filename, poolPresenceTable, poolColumns):
fhandle = open(filename, 'w')
# Write out the header line
headerLine = ''
i = 0
while i < len(poolColumns):
headerLine += poolColumns[i]
if i < len(poolColumns) - 1:
headerLine += ','
i += 1
headerLine += '\n'
totalStr = ''
i = 0
while i < poolPresenceTable.shape[0]:
outputStr = ''
j = 0
while j < len(poolColumns):
outputStr += str(poolPresenceTable[i][poolColumns[j]])
if j < len(poolColumns) - 1:
outputStr += ','
j += 1
outputStr += '\n'
totalStr += outputStr
i += 1
writeStr = headerLine + totalStr
fhandle.write(writeStr)
fhandle.close()
return
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def GenerateBlankPhysicalAddressDict(poolPresenceTable):
uniqueCoords = poolPresenceTable['readAlignmentCoord']
physicalAddressDict = {}
i = 0
while i < len(uniqueCoords):
physicalAddressDict[int(uniqueCoords[i])] = []
i += 1
return physicalAddressDict
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def FindAddressCoords(poolPresenceTableLine, axisPools, threshold=5):
axisAddresses = []
i = 0
while i < len(axisPools):
if poolPresenceTableLine[axisPools[i]] >= threshold:
axisAddresses.append([axisPools[i], poolPresenceTableLine[axisPools[i]]])
i += 1
return axisAddresses
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def FindAddressCoords2(poolPresenceTableLine, axisPools, threshold=5):
# Very much the same as FindAddressCoords, but returns a list of just axis addresses and a
# dict of the read counts for each of these axis addresses, rather than combining this dict
# into the axisAddresses array
axisAddresses = []
axisAddressScoreDict = {}
i = 0
while i < len(axisPools):
if poolPresenceTableLine[axisPools[i]] >= threshold:
axisAddresses.append(axisPools[i])
axisAddressScoreDict[axisPools[i]] = poolPresenceTableLine[axisPools[i]]
i += 1
return [axisAddresses, axisAddressScoreDict]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculatePoolCoordsForLine(poolPresenceTableLine, rowPools, colPools, prPools, \
pcPools, controlPools, threshold=5):
# Very much like CalculatePhysicalAddressCoordsForLine but also reports contents of control pools
# as well.
# Used in generation of pool presence table taxonomy
addresses_r = FindAddressCoords2(poolPresenceTableLine, rowPools, threshold=threshold)
addresses_c = FindAddressCoords2(poolPresenceTableLine, colPools, threshold=threshold)
addresses_pr = FindAddressCoords2(poolPresenceTableLine, prPools, threshold=threshold)
addresses_pc = FindAddressCoords2(poolPresenceTableLine, pcPools, threshold=threshold)
if controlPools != None:
addresses_control = FindAddressCoords2(poolPresenceTableLine, controlPools, \
threshold=threshold)
else:
addresses_control = None
# Remember, each line in the addresses array is a 2 element list, the first containing the pool
# name, and the second containing the number of reads associated with it.
return [addresses_r, addresses_c, addresses_pr, addresses_pc, addresses_control]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculateNumberOfPoolAxesThatHaveMoreThanOneEntry(addresses_r, addresses_c, \
addresses_pr, addresses_pc, addresses_control):
nRowPoolAxisEntries = len(addresses_r[0])
nColPoolAxisEntries = len(addresses_c[0])
nPRPoolAxisEntries = len(addresses_pr[0])
nPCPoolAxisEntries = len(addresses_pc[0])
nAddressPoolAxesWithMoreThanOneEntry = 0
if nRowPoolAxisEntries > 1:
nAddressPoolAxesWithMoreThanOneEntry += 1
if nColPoolAxisEntries > 1:
nAddressPoolAxesWithMoreThanOneEntry += 1
if nPRPoolAxisEntries > 1:
nAddressPoolAxesWithMoreThanOneEntry += 1
if nPCPoolAxisEntries > 1:
nAddressPoolAxesWithMoreThanOneEntry += 1
return nAddressPoolAxesWithMoreThanOneEntry
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculateNumberOfPoolAxesThatHaveEntries(addresses_r, addresses_c, addresses_pr, addresses_pc, \
addresses_control):
# Used in generation of pool presence table taxonomy
# Used to calculate how many lines can be used to calculate library addresses
nRowPoolAxisEntries = len(addresses_r[0])
nColPoolAxisEntries = len(addresses_c[0])
nPRPoolAxisEntries = len(addresses_pr[0])
nPCPoolAxisEntries = len(addresses_pc[0])
nAddressPoolAxesWithEntries = 0
if nRowPoolAxisEntries > 0:
nAddressPoolAxesWithEntries += 1
if nColPoolAxisEntries > 0:
nAddressPoolAxesWithEntries += 1
if nPRPoolAxisEntries > 0:
nAddressPoolAxesWithEntries += 1
if nPCPoolAxisEntries > 0:
nAddressPoolAxesWithEntries += 1
if addresses_control != None:
nControlPoolsWithEntries = len(addresses_control[0])
else:
nControlPoolsWithEntries = 0
return [nAddressPoolAxesWithEntries, nControlPoolsWithEntries]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculatePossibleAddresses(addresses_r, addresses_c, addresses_pr, addresses_pc):
# Calculate the unambiguous library addresses that that a read alignment coord maps to
# The score is the total number of reads that are associated with the library address assignment
possibleAddresses = []
for address_r in addresses_r:
for address_c in addresses_c:
for address_pr in addresses_pr:
for address_pc in addresses_pc:
row = address_r[0]
row_score = address_r[1]
col = address_c[0]
col_score = address_c[1]
pr = address_pr[0]
pr_score = address_pr[1]
pc = address_pc[0]
pc_score = address_pc[1]
possibleAddress = row + '_' + col + '_' + pr + '_' + pc
possibleAddressScore = row_score + col_score + pr_score + pc_score
possibleAddresses.append([possibleAddress, possibleAddressScore])
return possibleAddresses
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculateLibraryAddressesForPoolPresenceTableLine(poolPresenceTableLine, rowPools, colPools, \
prPools, pcPools, threshold=5):
# Used in generation of pool presence table taxonomy
# Used to calculate possible addresses for pool presence table line
coord = int(poolPresenceTableLine['readAlignmentCoord'])
addresses_r = FindAddressCoords(poolPresenceTableLine, rowPools, threshold=threshold)
addresses_c = FindAddressCoords(poolPresenceTableLine, colPools, threshold=threshold)
addresses_pr = FindAddressCoords(poolPresenceTableLine, prPools, threshold=threshold)
addresses_pc = FindAddressCoords(poolPresenceTableLine, pcPools, threshold=threshold)
possibleAddresses = CalculatePossibleAddresses(addresses_r, addresses_c, addresses_pr, \
addresses_pc)
return possibleAddresses
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculateLibraryAddressesForPoolPresenceTableLine2(poolPresenceTableLine, rowPools, colPools, \
prPools, pcPools, logReadNumberRatioHistogramFitDict, logReadNumberRatioHistogramIntegralDict, \
threshold):
# Used in generation of pool presence table taxonomy
# Used to calculate possible addresses for pool presence table line
coord = int(poolPresenceTableLine['readAlignmentCoord'])
addresses_r = FindAddressCoords(poolPresenceTableLine, rowPools, threshold=threshold)
addresses_c = FindAddressCoords(poolPresenceTableLine, colPools, threshold=threshold)
addresses_pr = FindAddressCoords(poolPresenceTableLine, prPools, threshold=threshold)
addresses_pc = FindAddressCoords(poolPresenceTableLine, pcPools, threshold=threshold)
possibleAddressesAndScores = \
CalculatePossibleAddresses2(addresses_r, addresses_c, addresses_pr, addresses_pc, \
logReadNumberRatioHistogramFitDict, logReadNumberRatioHistogramIntegralDict)
return possibleAddressesAndScores
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculateNumberOfEntriesInPoolAxes(addresses_r, addresses_c, \
addresses_pr, addresses_pc):
import pdb
nRowCoords = len(addresses_r[0])
nColCoords = len(addresses_c[0])
nPRCoords = len(addresses_pr[0])
nPCCoords = len(addresses_pc[0])
# pdb.set_trace()
return [nRowCoords, nColCoords, nPRCoords, nPCCoords]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculateMaxNumberOfEntriesInSinglePoolAxis(addresses_r, addresses_c, \
addresses_pr, addresses_pc):
# This function finds the pool axis with the most coordinate entries and reports this number
# This number is important for guessing how many library addresses an ambiguous line might map to
# It is the minimum number of addresses that the line could map to.
nRowPoolAxisEntries = len(addresses_r[0])
nColPoolAxisEntries = len(addresses_c[0])
nPRPoolAxisEntries = len(addresses_pr[0])
nPCPoolAxisEntries = len(addresses_pc[0])
poolAxisEntries = [nRowPoolAxisEntries, nColPoolAxisEntries, nPRPoolAxisEntries, \
nPCPoolAxisEntries]
maxEntriesInSingleAddressPoolAxis = max(poolAxisEntries)
return maxEntriesInSingleAddressPoolAxis
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
# Functions for calculating a Voigt function to a read ratio histogram
def voigtFunction(x, p):
from scipy.special import erfc
from numpy import exp
from numpy import sqrt
from numpy import pi, float64
a = p[0]
c = p[1]
delta = p[2]
sigma = p[3]
firstArg = ((-1.j)*(-c + x) + delta)/(sqrt(2)*sigma)
secondArg = ((1.j)*(-c + x) + delta)/(sqrt(2)*sigma)
voigtEquation = a\
*(exp(firstArg**2)*erfc(firstArg) \
+ exp(secondArg**2)*erfc(secondArg) ) \
/ (2*sqrt(2*pi)*sigma)
voigtEquation = float64(voigtEquation)
return voigtEquation
def voigtResiduals(p, y, x):
err = y - voigtFunction(x,p)
return err
def voigtFit(x,y, p0):
import scipy
from scipy.optimize import leastsq
plsq = leastsq(voigtResiduals, p0, args=(y, x), maxfev=2000)
return [plsq[0], voigtFunction(x, plsq[0])]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculateVoigtScore(logNRatio, plsq, normalizationFactor):
voigtScore = voigtFunction(logNRatio, plsq)/normalizationFactor
return voigtScore
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculatePossibleAddresses2(addresses_r, addresses_c, addresses_pr, addresses_pc, plsqDict, \
normalizationFactorDict):
# Calculate the unambiguous library addresses that that a read alignment coord maps to
# Same as CalculatePossibleAddresses
# However, the reported score is slightly different. It is a collection of the ratios of pool axes
# entries.
# Bookmark: this is where I'll add the Voigt function
import numpy
import pdb
possibleAddressesAndScores = []
for address_r in addresses_r:
for address_c in addresses_c:
for address_pr in addresses_pr:
for address_pc in addresses_pc:
row = address_r[0]
col = address_c[0]
pr = address_pr[0]
pc = address_pc[0]
possibleAddress = row + '_' + col + '_' + pr + '_' + pc
nRowReads = address_r[1]
nColReads = address_c[1]
nPRReads = address_pr[1]
nPCReads = address_pc[1]
totalReads = nRowReads + nColReads + nPRReads + nPCReads
nr2nc = nRowReads/nColReads
nr2npr = nRowReads/nPRReads
nr2npc = nRowReads/nPCReads
nc2npr = nColReads/nPRReads
nc2npc = nColReads/nPCReads
npr2npc = nPRReads/nPCReads
logNr2nc = numpy.log(nr2nc)
logNr2npr = numpy.log(nr2npr)
logNr2npc = numpy.log(nr2npc)
logNc2npr = numpy.log(nc2npr)
logNc2npc = numpy.log(nc2npc)
logNpr2npc = numpy.log(npr2npc)
voigtScoreNr2nc = CalculateVoigtScore(logNr2nc, plsqDict['nr2nc'], \
normalizationFactorDict['nr2nc'])
voigtScoreNr2npr = CalculateVoigtScore(logNr2npr, plsqDict['nr2npr'], \
normalizationFactorDict['nr2npr'])
voigtScoreNr2npc = CalculateVoigtScore(logNr2npc, plsqDict['nr2npc'], \
normalizationFactorDict['nr2npc'])
voigtScoreNc2npr = CalculateVoigtScore(logNc2npr, plsqDict['nc2npr'], \
normalizationFactorDict['nc2npr'])
voigtScoreNc2npc = CalculateVoigtScore(logNc2npc, plsqDict['nc2npc'], \
normalizationFactorDict['nc2npc'])
voigtScoreNpr2npc = CalculateVoigtScore(logNpr2npc, plsqDict['npr2npc'], \
normalizationFactorDict['npr2npc'])
scoreDict = {'nr2nc':voigtScoreNr2nc, 'nr2npr':voigtScoreNr2npr, \
'nr2npc':voigtScoreNr2npc, 'nc2npr':voigtScoreNc2npr, \
'nc2npc':voigtScoreNc2npc, 'npr2npc':voigtScoreNpr2npc}
logReadCountRatioDict = {'logNr2nc':logNr2nc, 'logNr2npr':logNr2npr, \
'logNr2npc':logNr2npc, 'logNc2npr':logNc2npr, 'logNc2npc':logNc2npc, \
'logNpr2npc':logNpr2npc}
score = voigtScoreNr2nc * voigtScoreNr2npr * voigtScoreNr2npc \
* voigtScoreNc2npr * voigtScoreNc2npc * voigtScoreNpr2npc
possibleAddressesAndScores.append([possibleAddress, scoreDict, score, \
totalReads, logReadCountRatioDict])
return possibleAddressesAndScores
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculatePhysicalAddresses(poolPresenceTable, rowPools, colPools, prPools, pcPools, \
threshold=5):
physicalAddressDict = GenerateBlankPhysicalAddressDict(poolPresenceTable)
i = 0
while i < len(poolPresenceTable):
entry = poolPresenceTable[i]
coord = int(poolPresenceTable[i]['readAlignmentCoord'])
addresses_r = FindAddressCoords(poolPresenceTable[i], rowPools, threshold=threshold)
addresses_c = FindAddressCoords(poolPresenceTable[i], colPools, threshold=threshold)
addresses_pr = FindAddressCoords(poolPresenceTable[i], prPools, threshold=threshold)
addresses_pc = FindAddressCoords(poolPresenceTable[i], pcPools, threshold=threshold)
possibleAddresses = CalculatePossibleAddresses(addresses_r, addresses_c, addresses_pr, \
addresses_pc)
physicalAddressDict[coord] = possibleAddresses
i += 1
return physicalAddressDict
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculatePhysicalAddressCoordsForLine(poolPresenceTableLine, rowPools, colPools, prPools, \
pcPools, threshold=5):
addresses_r = FindAddressCoords2(poolPresenceTableLine, rowPools, threshold=threshold)
addresses_c = FindAddressCoords2(poolPresenceTableLine, colPools, threshold=threshold)
addresses_pr = FindAddressCoords2(poolPresenceTableLine, prPools, threshold=threshold)
addresses_pc = FindAddressCoords2(poolPresenceTableLine, pcPools, threshold=threshold)
# Remember, each line in the addresses array is a 2 element list, the first containing the pool
# name, and the second containing the number of reads associated with it.
return [addresses_r, addresses_c, addresses_pr, addresses_pc]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculateAverageReadAlignmentCoordinate(group, rowPools, colPools, prPools, pcPools, \
averagingType='median'):
import numpy
from pdb import set_trace
import scipy.stats
# Calculate the median and mean read alignment coordinate
[readAlignmentCoords, totalReads] = \
GenerateHistogramOfReadsVersusReadAlignmentCoord(group, rowPools, colPools, prPools, pcPools)
readAlignmentCoordList = []
i = 0
while i < len(totalReads):
j = 0
while j < totalReads[i]:
readAlignmentCoordList.append(readAlignmentCoords[i])
j += 1
i += 1
# set_trace()
# print(str(readAlignmentCoordList))
if len(readAlignmentCoordList) == 0:
averageReadAlignmentCoord = 0
includeInSummedTable = False
elif averagingType == 'median':
averageReadAlignmentCoord = int(numpy.median(readAlignmentCoordList))
includeInSummedTable = True
elif averagingType == 'mode':
averageReadAlignmentCoord = int(scipy.stats.mode(readAlignmentCoordList))
includeInSummedTable = True
elif averagingType == 'mean':
averageReadAlignmentCoord = int(numpy.mean(readAlignmentCoordList))
includeInSummedTable = True
else:
averageReadAlignmentCoord = int(numpy.median(readAlignmentCoordList))
includeInSummedTable = True
return [averageReadAlignmentCoord, includeInSummedTable]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def GenerateHistogramOfReadsVersusReadAlignmentCoord(groupedPoolPresenceTableGroup, \
rowPools, colPools, prPools, pcPools):
i = 0
readAlignmentCoords = []
totalReads = []
while i < len(groupedPoolPresenceTableGroup):
readAlignmentCoord = groupedPoolPresenceTableGroup[i]['readAlignmentCoord']
readAlignmentCoords.append(readAlignmentCoord)
readCount = \
CountReadsAssociatedWithCoordinateThatAreInLocationPools(groupedPoolPresenceTableGroup[i],\
rowPools, colPools, prPools, pcPools)
totalReads.append(readCount)
i += 1
return [readAlignmentCoords, totalReads]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CountReadsAssociatedWithCoordinateThatAreInLocationPools(groupedPoolPresenceTableGroupLine,\
rowPools, colPools, prPools, pcPools):
totalReads = 0
i = 0
while i < len(rowPools):
totalReads += groupedPoolPresenceTableGroupLine[rowPools[i]]
i += 1
i = 0
while i < len(colPools):
totalReads += groupedPoolPresenceTableGroupLine[colPools[i]]
i += 1
i = 0
while i < len(prPools):
totalReads += groupedPoolPresenceTableGroupLine[prPools[i]]
i += 1
i = 0
while i < len(pcPools):
totalReads += groupedPoolPresenceTableGroupLine[pcPools[i]]
i += 1
return totalReads
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CountReadsAssociatedWithLineBooleanOperation(axisCoordIntersections, nextLineCoords, \
currentLineCoords):
readsCount = 0
for coord in axisCoordIntersections:
if coord in nextLineCoords[0]:
readsCount += nextLineCoords[1][coord]
if coord in currentLineCoords[0]:
readsCount += currentLineCoords[1][coord]
return readsCount
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculateTotalReadsInLine(poolPresenceTableLine, rowPools, colPools, prPools, pcPools, \
controlPools):
i = 0
totalReads = 0
while i < len(rowPools):
totalReads += poolPresenceTableLine[rowPools[i]]
i +=1
i = 0
while i < len(colPools):
totalReads += poolPresenceTableLine[colPools[i]]
i +=1
i = 0
while i < len(prPools):
totalReads += poolPresenceTableLine[prPools[i]]
i +=1
i = 0
while i < len(pcPools):
totalReads += poolPresenceTableLine[pcPools[i]]
i +=1
if controlPools != None:
i = 0
while i < len(controlPools):
totalReads += poolPresenceTableLine[controlPools[i]]
i +=1
return totalReads
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculatePoolPresenceTableTaxonomyDict(poolPresenceTable, rowPools, colPools, prPools, \
pcPools, controlPools, threshold):
keysInPrintOrder = [\
'totalLines',\
'linesThatMapToLibraryAddresses',\
'linesThatMapToSingleLibraryAddresses',\
'linesThatMapToMultipleLibraryAddresses',\
'linesThatMapToUnambiguousLibraryAddresses',\
'linesThatMapToAmbiguousLibraryAddresses', \
'linesThatDoNotMapToLibraryAddresses',\
'linesThatHaveNoReadsAboveThresholdInAnyPool',\
'linesThatMapToControlIndexesOnly',\
'linesThatHaveCoordinatesInNoPoolAxis',\
'linesThatHaveCoordinatesInOnlyOnePoolAxis',\
'linesThatHaveCoordinatesInOnlyTwoPoolAxes',\
'linesThatHaveCoordinatesInOnlyThreePoolAxes',\
'totalReads']
# Define the pool presence line taxonomy dict
poolPresenceTableTaxonomyDict = {}
for key in keysInPrintOrder:
poolPresenceTableTaxonomyDict[key] = 0
poolPresenceTablePoolsCoordsList = {}
possibleAddressesDict = {}
numberPoolAxesWithEntriesForLine = {}
numberPoolAxesWithMoreThanOneEntry = {}
i = 0
while i < len(poolPresenceTable):
readsInLine = CalculateTotalReadsInLine(poolPresenceTable[i], \
rowPools, colPools, prPools, pcPools, controlPools)
poolPresenceTableTaxonomyDict['totalReads'] += readsInLine
coord = int(poolPresenceTable[i]['readAlignmentCoord'])
poolPresenceTableTaxonomyDict['totalLines'] += 1
possibleAddresses = CalculateLibraryAddressesForPoolPresenceTableLine(\
poolPresenceTable[i], rowPools, colPools, prPools, pcPools, threshold=threshold)
lenPossibleAddresses = len(possibleAddresses)
[addresses_r, addresses_c, addresses_pr, addresses_pc, addresses_control] = \
CalculatePoolCoordsForLine(\
poolPresenceTable[i], rowPools, colPools, prPools, pcPools, controlPools, threshold=threshold)
poolCoordsForLine = [addresses_r, addresses_c, addresses_pr, addresses_pc, addresses_control]
[nAddressPoolAxesWithEntries, nControlPoolsWithEntries] = \
CalculateNumberOfPoolAxesThatHaveEntries(addresses_r, addresses_c, addresses_pr, addresses_pc, \
addresses_control)
# Calculate if there will be an ambiguous library address calculation by calculating the number
# of pool axes with more than one entry. One axis with multiple (even if possible coords are
# filled) is fine, but more axis with more than one entry leads to cross terms that are
# ambiguous.
nAddressPoolAxesWithMoreThanOneEntry = \
CalculateNumberOfPoolAxesThatHaveMoreThanOneEntry(addresses_r, addresses_c, addresses_pr, \
addresses_pc, addresses_control)
if (nAddressPoolAxesWithEntries == 0) and (nControlPoolsWithEntries == 0):
poolPresenceTableTaxonomyDict['linesThatHaveNoReadsAboveThresholdInAnyPool'] += 1
if lenPossibleAddresses != 0:
print(str(coord))
if (nAddressPoolAxesWithEntries == 0) and (nControlPoolsWithEntries > 0):
poolPresenceTableTaxonomyDict['linesThatMapToControlIndexesOnly'] += 1
if lenPossibleAddresses != 0:
print(str(coord))
if (nAddressPoolAxesWithEntries == 1):
poolPresenceTableTaxonomyDict['linesThatHaveCoordinatesInOnlyOnePoolAxis'] += 1
if lenPossibleAddresses != 0:
print(str(coord))
if (nAddressPoolAxesWithEntries == 2):
poolPresenceTableTaxonomyDict['linesThatHaveCoordinatesInOnlyTwoPoolAxes'] += 1
if lenPossibleAddresses != 0:
print(str(coord))
if (nAddressPoolAxesWithEntries == 3):
poolPresenceTableTaxonomyDict['linesThatHaveCoordinatesInOnlyThreePoolAxes'] += 1
if lenPossibleAddresses != 0:
print(str(coord))
if (nAddressPoolAxesWithEntries == 0):
poolPresenceTableTaxonomyDict['linesThatHaveCoordinatesInNoPoolAxis'] += 1
if lenPossibleAddresses != 0:
print(str(coord))
if nAddressPoolAxesWithMoreThanOneEntry > 1 and lenPossibleAddresses >= 1:
poolPresenceTableTaxonomyDict['linesThatMapToAmbiguousLibraryAddresses'] += 1
if nAddressPoolAxesWithMoreThanOneEntry <= 1 and lenPossibleAddresses >= 1:
poolPresenceTableTaxonomyDict['linesThatMapToUnambiguousLibraryAddresses'] += 1
if lenPossibleAddresses == 0:
poolPresenceTableTaxonomyDict['linesThatDoNotMapToLibraryAddresses'] += 1
elif lenPossibleAddresses == 1:
poolPresenceTableTaxonomyDict['linesThatMapToSingleLibraryAddresses'] += 1
poolPresenceTableTaxonomyDict['linesThatMapToLibraryAddresses'] += 1
elif lenPossibleAddresses > 1:
poolPresenceTableTaxonomyDict['linesThatMapToMultipleLibraryAddresses'] += 1
poolPresenceTableTaxonomyDict['linesThatMapToLibraryAddresses'] += 1
i += 1
PrintPoolPresenceTableTaxonomyDict(threshold, poolPresenceTableTaxonomyDict, keysInPrintOrder)
return poolPresenceTableTaxonomyDict
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def PrintPoolPresenceTableTaxonomyDict(threshold, poolPresenceTableTaxonomyDict, keysInPrintOrder):
outputStr = ''
outputStr += 'threshold: ' + str(threshold) + '\n'
for key in keysInPrintOrder:
outputStr += key + ': ' + str(poolPresenceTableTaxonomyDict[key]) + '\n'
print(outputStr)
return
# ------------------------------------------------------------------------------------------------ #
####################################################################################################
# ------------------------------------------------------------------------------------------------ #
# Step 6: Functions for initially populating the pool presence table
# ------------------------------------------------------------------------------------------------ #
####################################################################################################
# ------------------------------------------------------------------------------------------------ #
def GenerateUniqueCoordsList(genomeArray):
from scipy import unique
coords = genomeArray['readAlignmentCoord']
uniqueCoords = unique(coords)
return uniqueCoords
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def GenerateValidCoordsList(genomeArray):
import numpy
from numpy import int32
validCoords = 0
i = 0
maxReadIDLength = 15
while i < len(genomeArray):
coord = genomeArray[i]
if coord['alignmentQuality'] > 1 and coord['alignmentFound'] == 1 \
and coord['multipleAlignmentsFound'] == 0 and coord['himarRecognized'] == 1 \
and coord['index'] > 0:
validCoords += 1
readIDLength = len(coord[0])
if readIDLength > maxReadIDLength:
maxReadIDLength = readIDLength
i += 1
readIDFieldCode = 'a' + str(maxReadIDLength+2)
validGenomeArray = numpy.zeros(validCoords, \
dtype={'names':['readID', 'readAlignmentCoord', 'alignmentQuality', 'index'], \
'formats':[readIDFieldCode, int32, int32, int32, int32]})
i = 0
j = 0
while i < len(genomeArray) and j < validCoords:
coord = genomeArray[i]
if coord['alignmentQuality'] > 1 and coord['alignmentFound'] == 1 \
and coord['multipleAlignmentsFound'] == 0 and coord['himarRecognized'] == 1 \
and coord['index'] > 0 and coord['strangeFlagsSum'] == 0:
validGenomeArray[j]['readID'] = coord['readID']
validGenomeArray[j]['readAlignmentCoord'] = coord['readAlignmentCoord']
validGenomeArray[j]['alignmentQuality'] = coord['alignmentQuality']
validGenomeArray[j]['index'] = coord['index']
j += 1
i += 1
return validGenomeArray
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def GeneratePoolNameToPoolCodeLookupTable(barcodeFile):
# Note that this
barcodeFileHandle = open(barcodeFile, 'r')
barcodeFileData = barcodeFileHandle.readlines()
barcodeFileHandle.close()
indexLookupTable = {}
for line in barcodeFileData:
if line[0] != '#':
lineData = line.strip().split(',')
poolName = lineData[0]
forwardSeq = lineData[1]
revCompl = lineData[2]
barcodeNumber = lineData[3]
indexLookupTable[str(barcodeNumber)] = poolName
return indexLookupTable
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def BuildInitialPoolPresenceTable(genomeArray, outputLog, barcodeFile, poolPresenceTableFileName):
import numpy
import pdb
outputStr = gSeparatorString
outputStr += 'Building Initial Pool Presence Table\n'
UpdateLogFileData(outputLog, outputStr)
print(outputStr)
# Compile the valid reads
outputStr = "Making Valid Genome Array\n"
UpdateLogFileData(outputLog, outputStr)
print(outputStr)
validGenomeArray = GenerateValidCoordsList(genomeArray)
validGenomeArray = numpy.sort(validGenomeArray, order='readAlignmentCoord')
# Generate the unique coordinates list
outputStr = "Generating Unique Coordinates List\n"
UpdateLogFileData(outputLog, outputStr)
print(outputStr)
uniqueCoords = GenerateUniqueCoordsList(validGenomeArray)
# Make the first round of the pool presence table
indexLookupTable = GeneratePoolNameToPoolCodeLookupTable(barcodeFile)
dtypeArray = GenerateDTypeArrayForPoolPresenceDict(indexLookupTable)
poolKeys = sorted(indexLookupTable.keys())
poolColumns = ['readAlignmentCoord']
i = 0
while i < len(poolKeys):
poolColumns.append(indexLookupTable[poolKeys[i]])
i += 1
print("Generating Pool Presence Table")
poolPresenceTable = GeneratePoolPresenceTable(uniqueCoords, validGenomeArray, \
indexLookupTable, dtypeArray)
WritePoolPresenceTable3(poolPresenceTableFileName, poolPresenceTable, poolColumns)
return poolPresenceTable
# ------------------------------------------------------------------------------------------------ #
####################################################################################################
# ------------------------------------------------------------------------------------------------ #
# Step 7: Functions for analyzing the pool presence table
# ------------------------------------------------------------------------------------------------ #
####################################################################################################
# ------------------------------------------------------------------------------------------------ #
def CalculateLibraryAddressLocatabilityForPoolPresenceTableLine(poolPresenceTableLine, \
rowPools, colPools, prPools, pcPools, controlPools, fitDict, areaDict, threshold, \
maxEntriesInSingleAddressPoolAxis, maxTotalCoords):
import pdb
[addresses_r, addresses_c, addresses_pr, addresses_pc, addresses_control] = \
CalculatePoolCoordsForLine(\
poolPresenceTableLine, rowPools, colPools, prPools, pcPools, controlPools, \
threshold=threshold)
[nRowCoords, nColCoords, nPRCoords, nPCCoords] = \
CalculateNumberOfEntriesInPoolAxes(addresses_r, addresses_c, \
addresses_pr, addresses_pc)
nTotalCoords = nRowCoords + nColCoords + nPRCoords + nPCCoords
[nAddressPoolAxesWithEntries, nControlPoolsWithEntries] = \
CalculateNumberOfPoolAxesThatHaveEntries(addresses_r, addresses_c, addresses_pr, \
addresses_pc, addresses_control)
nAddressPoolAxesWithMoreThanOneEntry = \
CalculateNumberOfPoolAxesThatHaveMoreThanOneEntry(addresses_r, addresses_c, \
addresses_pr, addresses_pc, addresses_control)
maxSinglePoolCoordNumber = \
CalculateMaxNumberOfEntriesInSinglePoolAxis(addresses_r, addresses_c, \
addresses_pr, addresses_pc)
# Decide on the locatability of the genomic coordinate
if nAddressPoolAxesWithEntries < 3:
locatability = 'unlocatable'
possibleAddressesAndScores = None
elif maxSinglePoolCoordNumber > maxEntriesInSingleAddressPoolAxis or \
nTotalCoords > maxTotalCoords:
locatability = 'unlocatable'
possibleAddressesAndScores = None
elif nAddressPoolAxesWithEntries == 3:
if nAddressPoolAxesWithMoreThanOneEntry > 1:
locatability = 'unlocatable'
possibleAddressesAndScores = None
elif nAddressPoolAxesWithMoreThanOneEntry <= 1:
locatability = 'guessable'
possibleAddressesAndScores = None
elif nAddressPoolAxesWithEntries == 4:
possibleAddressesAndScores = \
CalculateLibraryAddressesForPoolPresenceTableLine2(poolPresenceTableLine, \
rowPools, colPools, prPools, pcPools, fitDict, \
areaDict, threshold)
if len(possibleAddressesAndScores) == 0:
pdb.set_trace()
if nAddressPoolAxesWithMoreThanOneEntry > 1:
locatability = 'ambiguous'
elif nAddressPoolAxesWithMoreThanOneEntry <= 1:
locatability = 'unambiguous'
else:
locatability = 'unlocatable'
possibleAddressesAndScores = None
return [possibleAddressesAndScores, locatability, maxSinglePoolCoordNumber]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def PopulateSudokuGrid3(sudokuGridLookupDict, poolPresenceTable, rowPools, colPools, \
prPools, pcPools, controlPools, fitDict, areaDict, readCountThreshold, scoreThreshold, \
maxEntriesInSingleAddressPoolAxis, maxTotalCoords):
import pdb
i = 0
while i < len(poolPresenceTable):
poolPresenceTableLine = poolPresenceTable[i]
# pdb.set_trace()
coord = poolPresenceTableLine['readAlignmentCoord']
[possibleAddressesAndScores, locatability, maxSinglePoolCoordNumber] = \
CalculateLibraryAddressLocatabilityForPoolPresenceTableLine(poolPresenceTableLine, \
rowPools, colPools, prPools, pcPools, controlPools, fitDict, areaDict, readCountThreshold, \
maxEntriesInSingleAddressPoolAxis, maxTotalCoords)
# if (coord==5038710) or (coord==5038711) or (coord==5038712):
# pdb.set_trace()
if locatability == 'unambiguous':
AssignUnambiguousAddresses(sudokuGridLookupDict, possibleAddressesAndScores, coord, \
locatability)
elif locatability == 'ambiguous':
AssignAmbiguousAddresses(sudokuGridLookupDict, possibleAddressesAndScores, coord, \
locatability, maxSinglePoolCoordNumber)
elif locatability == 'guessable':
AssignGuessableAddresses(sudokuGridLookupDict, coord, poolPresenceTableLine, \
rowPools, colPools, prPools, pcPools, controlPools, fitDict, areaDict, \
readCountThreshold, maxEntriesInSingleAddressPoolAxis, maxTotalCoords)
i += 1
return
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def AddAddressToSudokuGrid(coord, locatability, possibleAddressesAndScoresEntry, \
sudokuGridLookupDict):
addressCoords = possibleAddressesAndScoresEntry[0].split('_')
locatabilityScore = possibleAddressesAndScoresEntry[2]
readCount = possibleAddressesAndScoresEntry[3]
row = addressCoords[0]
col = addressCoords[1]
pr = addressCoords[2]
pc = addressCoords[3]
sudokuCoord = SudokuGenomicCoord(coord, locatability, locatabilityScore, readCount)
sudokuGridLookupDict[pr][pc].wellGrid[row][col].readAlignmentCoords.append(sudokuCoord)
return
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def AssignUnambiguousAddresses(sudokuGridLookupDict, possibleAddressesAndScores, coord, \
locatability):
j = 0
while j < len(possibleAddressesAndScores):
AddAddressToSudokuGrid(coord, locatability, possibleAddressesAndScores[j], \
sudokuGridLookupDict)
j += 1
return
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def AssignAmbiguousAddresses(sudokuGridLookupDict, possibleAddressesAndScores, coord, \
locatability, maxEntriesInSingleAddressPoolAxis):
import operator
import pdb
sortedPossibleAddressesAndScores = sorted(possibleAddressesAndScores, \
key=operator.itemgetter(2), reverse=True)
# if coord==5038711:
# pdb.set_trace()
j = 0
continueAddingAddressesToSudokuGrid = True
while continueAddingAddressesToSudokuGrid == True:
try:
addressAndScore = sortedPossibleAddressesAndScores[j]
except IndexError:
pdb.set_trace()
AddAddressToSudokuGrid(coord, locatability, addressAndScore, sudokuGridLookupDict)
j += 1
if (j < maxEntriesInSingleAddressPoolAxis) \
and j < len(sortedPossibleAddressesAndScores):
continueAddingAddressesToSudokuGrid = True
else:
continueAddingAddressesToSudokuGrid = False
return
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def AssignGuessableAddresses(sudokuGridLookupDict, coord, poolPresenceTableLine, \
rowPools, colPools, prPools, pcPools, controlPools, fitDict, areaDict, readCountThreshold, \
maxSinglePoolCoordNumber, maxTotalCoords):
temporaryThreshold = 1
validAddressesFound = False
while validAddressesFound == False and temporaryThreshold <= readCountThreshold:
[possibleAddressesAndScores, locatability, maxSinglePoolCoordNumber] = \
CalculateLibraryAddressLocatabilityForPoolPresenceTableLine(poolPresenceTableLine, \
rowPools, colPools, prPools, pcPools, controlPools, fitDict, areaDict, temporaryThreshold, \
maxSinglePoolCoordNumber, maxTotalCoords)
if locatability == 'unambiguous':
validAddressesFound = True
else:
temporaryThreshold += 1
if validAddressesFound == True:
AssignUnambiguousAddresses(sudokuGridLookupDict, possibleAddressesAndScores, coord, \
'unambiguous')
return
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def SolvePoolPresenceTable(sudokuGridLookupDict, poolPresenceTable, \
rowPools, colPools, prPools, pcPools, controlPools, logReadNumberRatioHistogramFitDict, \
logReadNumberRatioHistogramIntegralDict, \
readCountThreshold, voigtScoreThreshold, maxSinglePoolCoordNumber, maxTotalCoords, \
maxGapForCoordGrouping):
import pdb
# pdb.set_trace()
PopulateSudokuGrid3(sudokuGridLookupDict, poolPresenceTable, rowPools, colPools, \
prPools, pcPools, controlPools, logReadNumberRatioHistogramFitDict, \
logReadNumberRatioHistogramIntegralDict, readCountThreshold, voigtScoreThreshold, \
maxSinglePoolCoordNumber, maxTotalCoords)
sudokuGridTaxonomyDictPreGrouping = CalculateSudokuGridOccupancyTaxonomy(sudokuGridLookupDict, \
rowPools, colPools, prPools, pcPools)
GroupReadAlignmentCoordsInSudokuGrid(sudokuGridLookupDict, prPools, pcPools, rowPools, \
colPools, maxGap=maxGapForCoordGrouping)
sudokuGridTaxonomyDict = CalculateSudokuGridOccupancyTaxonomy(sudokuGridLookupDict, rowPools, \
colPools, prPools, pcPools)
print('Sudoku Taxonomy Pre-Grouping')
PrintSudokuGridOccupancyTaxonomy(sudokuGridTaxonomyDictPreGrouping)
print('Sudoku Taxonomy Post-Grouping')
PrintSudokuGridOccupancyTaxonomy(sudokuGridTaxonomyDict)
return sudokuGridTaxonomyDict
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def GroupSudokuGenomicCoords(coordArray, maxGap=1):
import pdb
import numpy
import operator
i = 0
expect = None
run = []
result = [run]
currentLineMatchesPrevious = True
while i < len(coordArray):
if currentLineMatchesPrevious:
run.append(coordArray[i])
else:
run = [coordArray[i]]
result.append(run)
currentLineMatchesPrevious = False
if i < len(coordArray) - 1:
currentCoord = coordArray[i].coord
nextCoord = coordArray[i+1].coord
expect = currentCoord + maxGap
if nextCoord <= expect:
currentLineMatchesPrevious = True
i += 1
groupedCoords = []
i = 0
while i < len(result):
if len(result[i]) == 1:
# pdb.set_trace()
groupedCoords.append(result[i][0])
elif len(result[i]) > 1:
coords = sorted(result[i], key=operator.attrgetter('readCount'), reverse=True)
j = 0
readCountList = []
while j < len(coords):
k = 0
while k < coords[j].readCount:
readCountList.append(coords[j].coord)
k += 1
j += 1
representativeCoord = numpy.median(readCountList)
j = 0
totalReadCount = 0
locatabilityArray = []
while j < len(coords):
totalReadCount += coords[j].readCount
locatabilityArray.append(coords[j].locatability)
j +=1
if 'unambiguous' in locatabilityArray:
locatability = 'unambiguous'
elif 'ambiguous' in locatabilityArray:
locatability = 'ambiguous'
elif 'guessable' in locatabilityArray:
locatability = 'guessable'
else:
locatability = 'merged'
locatabilityScore = 'merged'
groupedCoords.append(SudokuGenomicCoord(representativeCoord, locatability, \
locatabilityScore, totalReadCount))
i += 1
return groupedCoords
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def GroupReadAlignmentCoordsInSudokuGrid(sudokuGridLookupDict, prPools, pcPools, rowPools, \
colPools, maxGap=4):
import operator
for prPool in prPools:
for pcPool in pcPools:
sudokuPlate = None
try:
sudokuPlate = sudokuGridLookupDict[prPool][pcPool]
except IndexError:
print('No plate at: ' + rowPool + '_' + colPool)
pass
if sudokuPlate != None:
plateName = sudokuPlate.plateName
for colPool in colPools:
for rowPool in rowPools:
sudokuWell = sudokuPlate.wellGrid[rowPool][colPool]
readAlignmentCoords = sudokuWell.readAlignmentCoords
readAlignmentCoords = sorted(readAlignmentCoords, \
key=operator.attrgetter('coord'))
groupedReadAlignmentCoords = GroupSudokuGenomicCoords(readAlignmentCoords, \
maxGap=maxGap)
sudokuWell.readAlignmentCoords = groupedReadAlignmentCoords
# ------------------------------------------------------------------------------------------------ #
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cv2
import numpy as np
import os
def join_images(images):
rows = len(images[0])
cols = len(images[0][0])
final_image = np.zeros((rows, cols, 1), np.uint8)
print(len(images))
for img in images:
for row in range(0, len(final_image)):
for col in range(0, len(final_image[0])):
final_image[row][col] += img[row][col]
return final_image
def main():
path = os.getcwd() + "\\Test"
dirs = [f for f in os.listdir(path) if not f.endswith('bmpfinal.bmp')]
print(dirs)
for d in dirs:
files = [f for f in os.listdir(path+ "\\" + d) if f.endswith('_.bmp')]
images = []
for f in files:
print(path + "\\" + d + "\\" + f)
mat = cv2.imread(path + "\\" + d + "\\" + f, cv2.IMREAD_GRAYSCALE)
images.append(mat)
for i in range(0, 8):
decoded_image = join_images(images[i:8])
cv2.imwrite(path + "\\" + d + "\\" + "rec{}.bmp".format(i), decoded_image)
main()
|
nilq/baby-python
|
python
|
_mod_txt = """
NEURON {
POINT_PROCESS ExpSynMorphforge
RANGE tau, e, i
NONSPECIFIC_CURRENT i
RANGE peak_conductance
}
UNITS {
(nA) = (nanoamp)
(mV) = (millivolt)
(uS) = (microsiemens)
}
PARAMETER {
tau = 0.1 (ms) <1e-9,1e9>
e = 0 (mV)
peak_conductance = -100000 ()
}
ASSIGNED {
v (mV)
i (nA)
}
STATE {
g (uS)
}
INITIAL {
g=0
}
BREAKPOINT {
SOLVE state METHOD cnexp
i = g*(v - e)
}
DERIVATIVE state {
g' = -g/tau
}
UNITSOFF
NET_RECEIVE(weight (uS)) {
weight = 1.0
g = g + weight * peak_conductance
}
UNITSON
"""
def getExpSynModfile():
return _mod_txt
|
nilq/baby-python
|
python
|
##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Remote Task test setup
"""
__docformat__ = "reStructuredText"
from Testing import ZopeTestCase
from zope.testing.doctest import INTERPRET_FOOTNOTES
from zope.testing.loggingsupport import InstalledHandler
import doctest
import random
import unittest
import logging
from five.taskqueue import service
ZopeTestCase.installProduct('Five')
def _configure_conflict_error_log_level():
import App.config
config = App.config.getConfiguration()
config.conflict_error_log_level = logging.INFO
App.config.setConfiguration(config)
def setUp(test):
test.globs['root'] = ZopeTestCase.base.app()
# As task will be run in different threads, we cannot rely on print
# results. We need to log calls to prove correctness.
log_info = InstalledHandler('z3c.taskqueue')
test.globs['log_info'] = log_info
# We pass the ZPublisher conflict logger to prove that no conflict
# happened.
conflict_logger = InstalledHandler('ZPublisher.Conflict')
test.globs['conflict_logger'] = conflict_logger
# Make sure ZPublisher conflict error log level is setup.
_configure_conflict_error_log_level()
test.origArgs = service.TaskService.processorArguments
service.TaskService.processorArguments = {'waitTime': 0.0}
# Make tests predictable
random.seed(27)
def tearDown(test):
random.seed()
service.TaskService.processorArguments = test.origArgs
class TestIdGenerator(unittest.TestCase):
def setUp(self):
random.seed(27)
self.service = service.TaskService()
def tearDown(self):
random.seed()
def test_sequence(self):
id = 1392637175
self.assertEquals(id, self.service._generateId())
self.assertEquals(id + 1, self.service._generateId())
self.assertEquals(id + 2, self.service._generateId())
self.assertEquals(id + 3, self.service._generateId())
def test_in_use_randomises(self):
id = 1392637175
self.assertEquals(id, self.service._generateId())
self.service.jobs[id + 1] = object()
id = 1506179619
self.assertEquals(id, self.service._generateId())
self.assertEquals(id + 1, self.service._generateId())
self.service.jobs[id + 1] = object()
self.assertEquals(id + 2, self.service._generateId())
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(TestIdGenerator),
ZopeTestCase.ZopeDocFileSuite('processor.txt',
package='five.taskqueue.tests',
setUp=setUp,
tearDown=tearDown,
optionflags=doctest.NORMALIZE_WHITESPACE
| doctest.ELLIPSIS
| INTERPRET_FOOTNOTES),
))
|
nilq/baby-python
|
python
|
# Copyright 2018 Braxton Mckee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import requests
import time
import unittest
from bs4 import BeautifulSoup
from object_database.service_manager.ServiceManager import ServiceManager
from object_database.web.ActiveWebService import (
active_webservice_schema,
ActiveWebService,
User
)
from object_database import core_schema, connect, service_schema
from object_database.util import configureLogging, genToken
from object_database.test_util import autoconfigure_and_start_service_manager, currentMemUsageMb
ownDir = os.path.dirname(os.path.abspath(__file__))
ownName = os.path.basename(os.path.abspath(__file__))
DATABASE_SERVER_PORT=8023
WEB_SERVER_PORT=8025
class ActiveWebServiceTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cleanupFn = lambda error=None: None
cls.base_url = "http://localhost:{port}".format(port=WEB_SERVER_PORT)
configureLogging("aws_test")
cls._logger = logging.getLogger(__name__)
def configurableSetUp(self, auth_type="LDAP",
auth_hostname=None, authorized_groups=(),
ldap_base_dn=None, ldap_ntlm_domain=None,
company_name=None):
self.token = genToken()
log_level = self._logger.getEffectiveLevel()
loglevel_name = logging.getLevelName(log_level)
self.server, self.cleanupFn = autoconfigure_and_start_service_manager(
port=DATABASE_SERVER_PORT,
auth_token=self.token,
loglevel_name=loglevel_name)
try:
self.database = connect("localhost", DATABASE_SERVER_PORT, self.token, retry=True)
self.database.subscribeToSchema(core_schema, service_schema, active_webservice_schema)
with self.database.transaction():
service = ServiceManager.createOrUpdateService(ActiveWebService, "ActiveWebService", target_count=0)
optional_args = []
if len(authorized_groups) > 0:
optional_args.extend(['--authorized-groups', *authorized_groups])
if auth_hostname:
optional_args.extend(['--auth-hostname', auth_hostname])
if ldap_base_dn:
optional_args.extend(['--ldap-base-dn', ldap_base_dn])
if ldap_ntlm_domain:
optional_args.extend(['--ldap-ntlm-domain', ldap_ntlm_domain])
if company_name:
optional_args.extend(['--company-name', company_name])
ActiveWebService.configureFromCommandline(
self.database,
service,
[
'--port', str(WEB_SERVER_PORT),
'--host', 'localhost',
'--log-level', loglevel_name,
'--auth', auth_type
] + optional_args
)
with self.database.transaction():
ServiceManager.startService("ActiveWebService", 1)
self.waitUntilUp()
except Exception:
self.cleanupFn(error=True)
raise
def waitUntilUp(self, timeout = 2.0):
t0 = time.time()
while time.time() - t0 < timeout:
try:
res = requests.get(self.base_url + "/login")
return
except Exception:
time.sleep(.5)
raise Exception("Webservice never came up.")
def tearDown(self):
self.cleanupFn()
def login(self, client, username='anonymous', password='bogus'):
# Because of CSRF security we need to do the following to authenticate:
# - Load the login page
# - Extract the csrf token (using BeautifulSoup)
# - Issue a POST request to the login endpoint that includes the CSRF token
login_url = self.base_url + "/login"
res = client.get(login_url)
self.assertFalse(res.history)
self.assertEqual(res.status_code, 200)
soup = BeautifulSoup(res.text, 'html.parser')
csrf_token = soup.find('input', dict(name='csrf_token'))['value']
res = client.post(login_url, data=dict(username=username, password=password, csrf_token=csrf_token))
self.assertTrue(res.history)
self.assertEqual(res.status_code, 200)
self.assertTrue('login' not in res.url)
def test_web_service_no_auth(self):
self.configurableSetUp(auth_type="NONE")
url = self.base_url + "/content/object_database.css"
client = requests.Session()
res = client.get(url)
self.assertTrue(res.history) # first time around we WILL get redirects
self.assertEqual(res.status_code, 200)
res = client.get(url)
self.assertFalse(res.history) # second time around we will NOT get redirects
self.assertEqual(res.status_code, 200)
self.assertEqual(res.url, url)
def test_web_service_login_and_access(self):
self.configurableSetUp(auth_type="PERMISSIVE")
url = self.base_url + "/content/object_database.css"
client = requests.Session()
username = 'anonymous'
# 1. Cannot access without login
res = requests.get(url)
self.assertTrue(res.history)
self.assertEqual(len(res.history), 1)
self.assertEqual(res.status_code, 200)
self.assertNotEqual(res.url, url)
self.assertTrue('login' in res.url)
# 2. login successfully
self.login(client, username)
# 3. now we can access our target page
res = client.get(url)
self.assertFalse(res.history)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.url, url)
# 4. test that we get auto-logged-out by modifying the user in object DB
with self.database.transaction():
user = User.lookupAny(username=username)
if user:
user.logout()
res = client.get(url)
self.assertTrue(res.history)
self.assertEqual(res.status_code, 200)
self.assertTrue('login' in res.url)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""
read ntuple produced by Truth_JETMET...
make some validation plots
"""
import ROOT
ROOT.gROOT.SetBatch()
from optparse import OptionParser
def make_plots(file_name, post_fix):
import AtlasStyle
f1 = ROOT.TFile.Open(file_name)
tree = f1.Get("physics")
h_m4l = ROOT.TH1F("h_m4l", "m4l;m_{4l} [GeV];Events/2 GeV", 100, 100, 600)
h_mZ1 = ROOT.TH1F("h_mZ1", "mZ1;m_{Z1} [GeV];Events/1 GeV", 70, 30, 110)
h_mZ2 = ROOT.TH1F("h_mZ2", "mZ2;m_{Z2} [GeV];Events/2 GeV", 60, 0, 120)
h_Z1_lepplus_pt = ROOT.TH1F("h_Z1_lepplus_pt", "Z1_lepplus_pt;l^{+} of Z1 p_{T} [GeV];Events/4 GeV", 35, 0, 140)
h_Z2_lepplus_pt = ROOT.TH1F("h_Z2_lepplus_pt", "Z2_lepplus_pt;l^{+} of Z2 p_{T} [GeV];Events/4 GeV", 35, 0, 140)
h_Z1_lepminus_pt = ROOT.TH1F("h_Z1_lepminus_pt", "Z1_lepminus_pt;l^{-} of Z1 p_{T} [GeV];Events/ 4 GeV", 35, 0, 140)
h_Z2_lepminus_pt = ROOT.TH1F("h_Z2_lepminus_pt", "Z2_lepminus_pt;l^{-} of Z2 p_{T} [GeV];Events/ 4 GeV", 35, 0, 140)
tree.Draw("m4l/1E3>>"+h_m4l.GetName(), "")
tree.Draw("mZ1/1E3>>"+h_mZ1.GetName(), "")
tree.Draw("mZ2/1E3>>"+h_mZ2.GetName(), "")
tree.Draw("Z1_lepplus_pt/1E3>>"+h_Z1_lepplus_pt.GetName(), "")
tree.Draw("Z2_lepplus_pt/1E3>>"+h_Z2_lepplus_pt.GetName(), "")
tree.Draw("Z1_lepminus_pt/1E3>>"+h_Z1_lepminus_pt.GetName(), "")
tree.Draw("Z2_lepminus_pt/1E3>>"+h_Z2_lepminus_pt.GetName(), "")
canvas = ROOT.TCanvas("canvas", "canvas", 600, 600)
hists = [h_m4l, h_mZ1, h_mZ2, h_Z1_lepplus_pt, h_Z2_lepplus_pt, h_Z1_lepminus_pt, h_Z2_lepminus_pt]
for hist in hists:
hist.Draw()
canvas.SaveAs(post_fix+"_"+hist.GetName()+".pdf")
if __name__ == "__main__":
usage = "%prog file_name out_tag"
parser = OptionParser(usage=usage, description="read truth file, plot basic variables")
(options, args) = parser.parse_args()
if len(args) < 2:
print parser.print_help()
exit(1)
file_ = args[0]
out_ = args[1]
make_plots(file_, out_)
|
nilq/baby-python
|
python
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
cd=os.path.join('LinearRegression','Kidem_ve_Maas_VeriSeti.csv')
dataset = pd.read_csv(cd)
print(dataset.describe())
x=dataset.iloc[:,:-1].values
y=dataset.iloc[:,1].values
from sklearn.cross_validation import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=1/3,random_state=0)
##Modeli Eğitme
from sklearn.linear_model import LinearRegression
regressor=LinearRegression()
regressor.fit(x_train,y_train)
y_pred=regressor.predict(x_test)
#visualize
plt.scatter(x_train,y_train,color='red')
plt.title('Kıdeme göre maaş tahmini regresyon modeli')
plt.xlabel('Kıdem')
plt.ylabel('Maaş')
plt.show()
plt.scatter(x_train,y_train,color='red')
modelin_tahmin_ettigi_y=regressor.predict(x_train)
plt.scatter(x_train,modelin_tahmin_ettigi_y,color='blue')
plt.title('Kıdeme göre maaş tahmini regresyon modeli')
plt.xlabel('Kıdem')
plt.ylabel('Maaş')
plt.show()
plt.scatter(x_train,y_train,color='red')
modelin_tahmin_ettigi_y=regressor.predict(x_train)
plt.plot(x_train,modelin_tahmin_ettigi_y,color='blue')
plt.title('Kıdeme göre maaş tahmini regresyon modeli')
plt.xlabel('Kıdem')
plt.ylabel('Maaş')
plt.show()
|
nilq/baby-python
|
python
|
#__BEGIN_LICENSE__
# Copyright (c) 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The xGDS platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#__END_LICENSE__
from django.contrib import admin
from xgds_map_server import models
class KmlMapAdmin(admin.ModelAdmin):
list_display = ('uuid',
'name',
'parent',
'openable',
'visible',
'kmlFile',
'description')
list_editable = list_display[1:]
ordering = ('parent', 'name')
search_fields = ('name',
'description',
'kmlFile')
class MapLayerAdmin(admin.ModelAdmin):
list_display = ('uuid',
'name',
'parent',
'visible',
'description')
list_editable = list_display[1:]
ordering = ('parent', 'name')
search_fields = ('name',
'description')
class MapGroupAdmin(admin.ModelAdmin):
list_display = ('uuid',
'name',
'parent',
'description')
list_editable = list_display[1:]
ordering = ('parent', 'name')
search_fields = ('name',
'description')
class MapLayerAdmin(admin.ModelAdmin):
list_display = ('uuid',
'name',
'parent',
'visible',
'description')
list_editable = list_display[1:]
ordering = ('parent', 'name')
search_fields = ('name',
'description')
admin.site.register(models.KmlMap, KmlMapAdmin)
admin.site.register(models.MapGroup, MapGroupAdmin)
admin.site.register(models.MapLayer, MapLayerAdmin)
#TODO make admin classes for other map layer stuff below
admin.site.register(models.MapTile)
admin.site.register(models.WMSTile, MapLayerAdmin)
admin.site.register(models.WMTSTile, MapLayerAdmin)
admin.site.register(models.GroundOverlayTime, MapLayerAdmin)
admin.site.register(models.GeoJSON)
admin.site.register(models.Place)
admin.site.register(models.Geotiff)
|
nilq/baby-python
|
python
|
__author__ = 'Jeremy'
|
nilq/baby-python
|
python
|
#!/bin/false python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The SymbiFlow Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import os
import shutil
from BaseRunner import BaseRunner
class Verilator(BaseRunner):
def __init__(self):
super().__init__("verilator", "verilator")
self.url = "https://verilator.org"
def prepare_run_cb(self, tmp_dir, params):
mode = params['mode']
conf = os.environ['CONF_DIR']
scr = os.path.join(tmp_dir, 'scr.sh')
shutil.copy(os.path.join(conf, 'runners', 'vmain.cpp'), tmp_dir)
build_dir = 'vbuild'
build_exe = 'vmain'
with open(scr, 'w') as f:
f.write('set -x\n')
f.write('{0} $@ || exit $?\n'.format(self.executable))
if mode == 'simulation':
f.write('make -C {} -f Vtop.mk\n'.format(build_dir))
f.write('./vbuild/{}'.format(build_exe))
# verilator executable is a script but it doesn't
# have shell shebang on the first line
self.cmd = ['sh', 'scr.sh']
if mode == 'simulation':
self.cmd += ['--cc']
elif mode == 'preprocessing':
self.cmd += ['-E']
else:
self.cmd += ['--lint-only']
self.cmd += ['-Wno-fatal', '-Wno-UNOPTFLAT', '-Wno-BLKANDNBLK']
# Flags for compliance testing:
self.cmd += ['-Wpedantic', '-Wno-context']
if params['top_module'] != '':
self.cmd.append('--top-module ' + params['top_module'])
if mode == 'preprocessing':
self.cmd += ['-P', '-E']
for incdir in params['incdirs']:
self.cmd.append('-I' + incdir)
if mode == 'simulation':
self.cmd += [
'--Mdir', build_dir, '--prefix', 'Vtop', '--exe', '-o',
build_exe
]
self.cmd.append('vmain.cpp')
if 'runner_verilator_flags' in params:
self.cmd += [params['runner_verilator_flags']]
for define in params['defines']:
self.cmd.append('-D' + define)
self.cmd += params['files']
|
nilq/baby-python
|
python
|
class Solution:
def intToRoman(self, num):
def get_representation(num):
symbol_table={
1:"I",
5:"V",
10:"X",
50:"L",
100:"C",
500:"D",
1000 :"M",
}
if num < 4:
return symbol_table[1]*num
if 4 <= num <6 :
return symbol_table[1]+symbol_table[5] if num == 4 else symbol_table[5]
if 5< num < 9 :
return symbol_table[5]+symbol_table[1]
pass
|
nilq/baby-python
|
python
|
#py_gui.py
"gui basics"
from tkinter import *
class Application(Frame):
pass
root = Tk()
app = Application(master=root)
app.mainloop()
|
nilq/baby-python
|
python
|
'''
There are a total of numCourses courses you have to take, labeled from 0 to numCourses-1.
Some courses may have prerequisites, for example to take course 0 you have to first take course 1, which is expressed as a pair: [0,1]
Given the total number of courses and a list of prerequisite pairs, is it possible for you to finish all courses?
**Example 1**
`Input: numCourses = 2, prerequisites = [[1,0]]`
`Output: true`
Explanation: There are a total of 2 courses to take.
To take course 1 you should have finished course 0. So it is possible.
**Example 2**
`Input: numCourses = 2, prerequisites = [[1,0],[0,1]]`
`Output: false`
Explanation: There are a total of 2 courses to take.
To take course 1 you should have finished course 0, and to take course 0 you should
also have finished course 1. So it is impossible.
**Note**
You may assume that there are no duplicate edges in the input prerequisites.
'''
from collections import defaultdict
class Solution(object):
def __init__(self):
self.eligibleCourses = []
self.visited = []
def seedEligibleCourses(self, g):
for index, node in g.items():
if len(node) == 0 and index not in self.visited:
self.eligibleCourses.append(index)
def dfs(self, node, g):
if node in self.visited:
return
self.visited.append(node)
for _, n in g.items():
if node in n:
n.remove(node)
for successor in g[node]:
if successor not in self.visited:
self.eligibleCourses.append(successor)
self.dfs(node, g)
def canFinish(self, numCourses, prerequisites):
if not prerequisites:
return True
graph = defaultdict(list)
for relation in prerequisites:
currentCourse, prerequisite = relation[0], relation[1]
graph[prerequisite].append(currentCourse) # post order!!
if currentCourse not in graph:
graph[currentCourse] = []
self.seedEligibleCourses(graph)
while self.eligibleCourses:
current = self.eligibleCourses.pop(0)
self.dfs(current, graph)
self.seedEligibleCourses(graph)
for _, n in graph.items():
if len(n) > 0:
return False
return True
|
nilq/baby-python
|
python
|
"""This acts as a kind of middleware - which has now been whittled down to only providing
logging information"""
import logging
import platform
from .constants import NameSpace
from .config import CLIInputs, ParsedArgs
from .utils import read_sdk_version
logger = logging.getLogger("validate-cli-args")
class ValidateCliArgs:
"""Called via ArgParser.validate_cli_args"""
def __init__(self, cli_inputs: CLIInputs):
self.cli_inputs = cli_inputs
# ----- MAIN ARGUMENT HANDLERS ----- #
def handle_top_level_args(self, parsed_args: ParsedArgs) -> None:
if not self.cli_inputs.namespace == NameSpace.TOP_LEVEL:
return
if parsed_args.version:
logger.info("ElectrumSV Software Development Kit")
logger.info(f"Python version {platform.python_version()}-{platform.architecture()[0]}")
logger.info(f"SDK version {read_sdk_version()}")
def handle_install_args(self, parsed_args: ParsedArgs) -> None:
if not self.cli_inputs.namespace == NameSpace.INSTALL:
return
# logging
if parsed_args.id != "":
logger.debug(f"id flag={parsed_args.id}")
if parsed_args.repo != "":
logger.debug(f"repo flag={parsed_args.repo}")
if parsed_args.branch != "":
logger.debug(f"branch flag={parsed_args.branch}")
def handle_start_args(self, parsed_args: ParsedArgs) -> None:
if not self.cli_inputs.namespace == NameSpace.START:
return
# logging
if parsed_args.new:
logger.debug("new flag=set")
if parsed_args.gui:
logger.debug("gui flag=set")
if parsed_args.id != "":
logger.debug(f"id flag={parsed_args.id}")
if parsed_args.repo != "":
logger.debug(f"repo flag={parsed_args.repo}")
if parsed_args.branch != "":
logger.debug(f"branch flag={parsed_args.branch}")
def handle_stop_args(self, parsed_args: ParsedArgs) -> None:
"""takes no arguments"""
if not self.cli_inputs.namespace == NameSpace.STOP:
return
# logging
if parsed_args.id != "":
logger.debug(f"id flag={parsed_args.id}")
def handle_reset_args(self, parsed_args: ParsedArgs) -> None:
"""takes no arguments"""
if not self.cli_inputs.namespace == NameSpace.RESET:
return
# logging
if parsed_args.id != "":
logger.debug(f"id flag={parsed_args.id}")
if parsed_args.repo != "":
logger.debug(f"repo flag={parsed_args.repo}")
def handle_status_args(self, _parsed_args: ParsedArgs) -> None:
return
def handle_config_args(self, parsed_args: ParsedArgs) -> None:
return
|
nilq/baby-python
|
python
|
# unet.py
#
from __future__ import division
import torch.nn as nn
import torch.nn.functional as F
import torch
from numpy.linalg import svd
from numpy.random import normal
from math import sqrt
class UNet(nn.Module):
def __init__(self, colordim = 1):
super(UNet, self).__init__()
self.conv1_1 = nn.Conv2d(colordim, 64, 3, padding = 1)
self.conv1_2 = nn.Conv2d(64, 64, 3, padding = 1)
self.bn1_1 = nn.BatchNorm2d(64)
self.bn1_2 = nn.BatchNorm2d(64)
self.conv2_1 = nn.Conv2d(64, 128, 3, padding = 1)
self.conv2_2 = nn.Conv2d(128, 128, 3, padding = 1)
self.bn2_1 = nn.BatchNorm2d(128)
self.bn2_2 = nn.BatchNorm2d(128)
self.conv4_1 = nn.Conv2d(128, 256, 3, padding = 1)
self.conv4_2 = nn.Conv2d(256, 256, 3, padding = 1)
self.upconv4 = nn.Conv2d(256, 128, 1)
self.bn4 = nn.BatchNorm2d(128)
self.bn4_1 = nn.BatchNorm2d(256)
self.bn4_2 = nn.BatchNorm2d(256)
self.bn4_out = nn.BatchNorm2d(256)
self.conv7_1 = nn.Conv2d(256, 128, 3, padding = 1)
self.conv7_2 = nn.Conv2d(128, 128, 3, padding = 1)
self.upconv7 = nn.Conv2d(128, 64, 1)
self.bn7 = nn.BatchNorm2d(64)
self.bn7_1 = nn.BatchNorm2d(128)
self.bn7_2 = nn.BatchNorm2d(128)
self.bn7_out = nn.BatchNorm2d(128)
self.conv9_1 = nn.Conv2d(128, 64, 3, padding = 1)
self.conv9_2 = nn.Conv2d(64, 64, 3, padding = 1)
self.bn9_1 = nn.BatchNorm2d(64)
self.bn9_2 = nn.BatchNorm2d(64)
self.conv9_3 = nn.Conv2d(64, colordim, 1)
self.bn9_3 = nn.BatchNorm2d(colordim)
self.bn9 = nn.BatchNorm2d(colordim)
self.maxpool = nn.MaxPool2d(2, stride = 2, return_indices = False, ceil_mode = False)
self.upsample = nn.UpsamplingBilinear2d(scale_factor = 2)
self._initialize_weights()
def forward(self, x1):
x1 = F.relu(self.bn1_2(self.conv1_2(F.relu(self.bn1_1(self.conv1_1(x1))))))
x2 = F.relu(self.bn2_2(self.conv2_2(F.relu(self.bn2_1(self.conv2_1(self.maxpool(x1)))))))
xup = F.relu(self.bn4_2(self.conv4_2(F.relu(self.bn4_1(self.conv4_1(self.maxpool(x2)))))))
xup = self.bn4(self.upconv4(self.upsample(xup)))
xup = self.bn4_out(torch.cat((x2, xup), 1))
xup = F.relu(self.bn7_2(self.conv7_2(F.relu(self.bn7_1(self.conv7_1(xup))))))
xup = self.bn7(self.upconv7(self.upsample(xup)))
xup = self.bn7_out(torch.cat((x1, xup), 1))
xup = F.relu(self.bn9_3(self.conv9_3(F.relu(self.bn9_2(self.conv9_2(F.relu(self.bn9_1(self.conv9_1(xup)))))))))
return F.softsign(self.bn9(xup))
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
|
nilq/baby-python
|
python
|
# First, we import a tool to allow text to pop up on a plot when the cursor
# hovers over it. Also, we import a data structure used to store arguments
# of what to plot in Bokeh. Finally, we will use numpy for this section as well!
from bokeh.models import HoverTool, ColumnDataSource
from bokeh.plotting import figure, output_file, show
import numpy as np
# Let's plot a simple 5x5 grid of squares, alternating in color as red and blue.
plot_values = [1,2,3,4,5]
plot_colors = ["red", "blue"]
# How do we tell Bokeh to plot each point in a grid? Let's use a function that
# finds each combination of values from 1-5.
from itertools import product
grid = list(product(plot_values, plot_values))
print(grid)
# The first value is the x coordinate, and the second value is the y coordinate.
# Let's store these in separate lists.
xs, ys = zip(*grid)
print(xs)
print(ys)
# Now we will make a list of colors, alternating between red and blue.
colors = [plot_colors[i%2] for i in range(len(grid))]
print(colors)
# Finally, let's determine the strength of transparency (alpha) for each point,
# where 0 is completely transparent.
alphas = np.linspace(0, 1, len(grid))
# Bokeh likes each of these to be stored in a special dataframe, called
# ColumnDataSource. Let's store our coordinates, colors, and alpha values.
source = ColumnDataSource(
data={
"x": xs,
"y": ys,
"colors": colors,
"alphas": alphas,
}
)
# We are ready to make our interactive Bokeh plot!
output_file("Basic_Example.html", title="Basic Example")
fig = figure(tools="resize, hover, save")
fig.rect("x", "y", 0.9, 0.9, source=source, color="colors",alpha="alphas")
hover = fig.select(dict(type=HoverTool))
hover.tooltips = {
"Value": "@x, @y",
}
show(fig)
|
nilq/baby-python
|
python
|
from typing import Literal, Optional, Union
class Trust_Region_Options:
border_abstol: float = 1e-10
tol_step: float = 1.0e-10
tol_grad: float = 1.0e-6
abstol_fval: Optional[float] = None
max_stall_iter: Optional[int] = None
init_delta: float = 1.0
max_iter: int
check_rel: float = 1.0e-2
check_abs: Optional[float] = None
check_iter: Optional[int] = None # 0表示只在最优化开始前进行一次梯度检查,-1表示完全关闭检查,默认的None表示始终进行检查
shaking: Union[Literal["x.shape[0]"], int] = "x.shape[0]"
display: bool = True
def __init__(self, *, max_iter: int) -> None:
self.max_iter = max_iter
|
nilq/baby-python
|
python
|
import sqlite3
import os
import MLBProjections.MLBProjections.DB.MLB as MLB
import MLBProjections.MLBProjections.Environ as ENV
from pprint import pprint
################################################################################
################################################################################
pitchContactCmd = """
SELECT pitches.game_id,
pitcher_id,
batter_id,
pitch_type_id,
pitch_result_id,
ab_type_id,
pitch_num,
box,
turn,
sequence,
pitch_velocity,
balls,
strikes,
outs,
(CASE WHEN pitcher.throws = batter.bats THEN 1 ELSE 0 END) AS side,
(CASE WHEN base_runners.first_base != -10 THEN 1 ELSE 0 END) AS first_base,
(CASE WHEN base_runners.second_base != -10 THEN 1 ELSE 0 END) AS second_base,
(CASE WHEN base_runners.third_base != -10 THEN 1 ELSE 0 END) AS third_base,
hit_style,
hit_hardness,
hit_angle,
hit_distance
FROM pitches
LEFT JOIN ab_results
ON pitches.pitch_id = ab_results.pitch_id
INNER JOIN pro_players AS pitcher
ON pitches.pitcher_id = pitcher.player_id
INNER JOIN pro_players AS batter
ON pitches.batter_id = batter.player_id
INNER JOIN pitch_locations
ON pitches.pitch_location_id = pitch_locations.pitch_location_id
INNER JOIN pitch_counts
ON pitches.pitch_count_id = pitch_counts.pitch_count_id
INNER JOIN base_runners
ON pitches.base_runners_id = base_runners.base_runners_id
WHERE {0[playerType]}_id = ?
"""
playerNameCmd = "SELECT first_name, last_name FROM pro_players WHERE player_id = ?"
leagueCmd = "SELECT league FROM pro_teams WHERE team_id = ?"
lineupCmd = """
SELECT lineups.player_id, first_name, last_name, batt_order, lineups.pos
FROM lineups
INNER JOIN pro_players
ON lineups.player_id = pro_players.player_id
INNER JOIN (SELECT team_id, MAX(game_id) AS game_id
FROM lineups
WHERE team_id = ?) AS max_id
ON lineups.game_id = max_id.game_id AND lineups.team_id = max_id.team_id
WHERE sub_order = 1 AND lineups.pos != 'P'
ORDER BY batt_order
"""
similarPitcherCmd = """
SELECT pp.player_id,
team_id
FROM pro_players AS pp
INNER JOIN (SELECT pos, throws FROM pro_players WHERE player_id =?) AS a
ON pp.pos = a.pos AND pp.throws = a.throws
INNER JOIN (SELECT pitcher_id, COUNT(pitcher_id) AS pitch_count FROM pitches GROUP BY pitcher_id) AS b
ON pp.player_id = b.pitcher_id
INNER JOIN (SELECT MAX(game_id), player_id, team_id FROM lineups GROUP BY player_id) AS c
ON pp.player_id = c.player_id
WHERE pitch_count >= 100
ORDER BY pp.player_id DESC
"""
similarBatterCmd = """
SELECT pp.player_id,
team_id
FROM pro_players AS pp
INNER JOIN (SELECT pos, bats FROM pro_players WHERE player_id =?) AS a
ON pp.pos = a.pos AND pp.bats = a.bats
INNER JOIN (SELECT batter_id, COUNT(batter_id) AS pitch_count FROM pitches GROUP BY batter_id) AS b
ON pp.player_id = b.batter_id
INNER JOIN (SELECT MAX(game_id), player_id, team_id FROM lineups GROUP BY player_id) AS c
ON pp.player_id = c.player_id
WHERE pitch_count >= 100
ORDER BY pp.player_id DESC
"""
################################################################################
################################################################################
class DatabaseManager:
def __init__(self, db):
self.mlbDB = db
self.mlbDB.openDB()
self.gameDBs = {}
def __del__(self):
self.db.closeDB()
def findPlayer(self, playerId):
return self.mlbDB.fetchOne("SELECT player_id FROM pro_players WHERE player_id = ?", (playerId,))
def addPlayerToDB(self, info):
print("new Player")
self.mlbDB.insert(MLB.proPlayersTable, info=info )
self.mlbDB.commit()
def getLeague(self, teamId):
return self.mlbDB.fetchOne(leagueCmd, (teamId,))[0]
def update(self):
self.mlbDB.update()
def gameDBExists(self, index):
return index in self.gameDBs.keys()
def getRecentLineup(self, teamId):
return self.mlbDB.fetchAll(lineupCmd, (teamId,))
def cloneDB(self, matchup):
gameDB = MLB.MLBGame(matchup.getGameId())
if not os.path.exists(ENV.getPath("game", fileName=matchup.getGameId())):
gameDB.openDB()
info = matchup.getInfo()
self.setMetaData(gameDB, info)
self.setTeams(gameDB, info)
self.setBullpens(gameDB, info)
self.setLineups(gameDB, info)
self.setContacts(gameDB, info)
self.setGames(gameDB)
gameDB.commit()
gameDB.closeDB()
self.gameDBs[matchup.getGameId()] = gameDB
return gameDB
def setGames(self, gameDB):
for gameId in gameDB.fetchAll("SELECT DISTINCT game_id FROM pitch_contacts"):
gameDB.insert(MLB.gamesTable, values=self.mlbDB.fetchOne("SELECT * FROM games WHERE game_id = ?",(gameId[0],)))
def setMetaData(self, gameDB, info):
gameId = info["gameId"]
homeId = info["teams"]["home"]["info"]["team_id"]
awayId = info["teams"]["away"]["info"]["team_id"]
stadiumId = info["teams"]["home"]["info"]["stadium_id"]
gameDB.insert(MLB.metaTable, values=(gameId, homeId, awayId, stadiumId))
stadiumInfo = self.mlbDB.fetchOne("SELECT * FROM stadiums WHERE stadium_id = ?",(stadiumId,))
gameDB.insert(MLB.stadiumsTable, values=stadiumInfo)
def setContacts(self, gameDB, info):
homeId = info["teams"]["home"]["info"]["team_id"]
awayId = info["teams"]["away"]["info"]["team_id"]
for teamId in (homeId, awayId):
for data in self.mlbDB.fetchAll("SELECT hit_style, hit_hardness, hit_angle, hit_distance, ab_type_id FROM pitches INNER JOIN ab_results ON pitches.pitch_id = ab_results.pitch_id INNER JOIN games ON pitches.game_id = games.game_id WHERE (home_id = ? OR away_id = ?) AND hit_style != -1", (teamId, teamId)):
cabId = gameDB.nextKey(MLB.contactAtBatsTable)
try:
gameDB.insert(MLB.contactAtBatsTable, values=[cabId,teamId]+list(data))
except sqlite3.IntegrityError:
pass
def setBullpens(self, gameDB, info):
for key in ("home", "away"):
team = info["teams"][key]
teamId = team["teamId"]
starterId = team["starter"]["playerId"]
self.newPitcher(teamId, starterId, gameDB, True)
for pitcher in team["roster"]["pitchers"]:
self.newPitcher(teamId, pitcher["playerId"], gameDB)
def setLineups(self, gameDB, info):
for key in ("home", "away"):
team = info["teams"][key]
teamId = team["teamId"]
for batterId in [batter["playerId"] for batter in team["roster"]["batters"]]:
self.newBatter(teamId, batterId, gameDB)
if info["league"] == "NL":
self.newBatter(teamId, team["starter"]["playerId"], gameDB)
for batter in team["lineup"]:
lId = gameDB.nextKey(MLB.lineupsTable)
gameDB.insert(MLB.lineupsTable, values=(lId, info["gameId"], teamId, batter[0], batter[3], 1, batter[-1]))
def setTeams(self, gameDB, info):
homeId = info["teams"]["home"]["teamId"]
awayId = info["teams"]["away"]["teamId"]
for teamId in (homeId, awayId):
teamInfo = self.mlbDB.fetchOne("SELECT * FROM pro_teams WHERE team_id = ?", (teamId,))
gameDB.insert(MLB.proTeamsTable, values=teamInfo)
def addPlayer(self, gameDB, playerId):
playerInfo = self.mlbDB.curs.execute("SELECT * FROM pro_players WHERE player_id = ?",(playerId,)).fetchone()
pprint(playerInfo)
gameDB.insert(MLB.proPlayersTable, values=playerInfo)
def newBatter(self, teamId, batterId, gameDB):
if not gameDB.curs.execute("SELECT player_id FROM pro_players WHERE player_id = ?",(batterId,)).fetchone():
self.addPlayer(gameDB, batterId)
pitchCount = self.mlbDB.fetchOne("SELECT COUNT(batter_id) FROM pitches INNER JOIN pro_players ON pitches.batter_id = pro_players.player_id WHERE batter_id = ?", (batterId,))[0]
checkId = batterId
checkTeamId = teamId
if pitchCount < 100:
checkId, checkTeamId = self.mlbDB.fetchOne(similarBatterCmd, (batterId,))
pitchContacts = self.mlbDB.fetchAll(pitchContactCmd.format({"playerType":"batter"}), (checkId,))
for contact in pitchContacts:
pitchContactId = gameDB.nextKey(MLB.pitchContactsTable)
try:
gameDB.insert(MLB.pitchContactsTable, values=[pitchContactId, *contact[:2], batterId, *contact[3:]])
except sqlite3.IntegrityError:
pass
def newPitcher(self, teamId, pitcherId, gameDB, starter=0):
if not gameDB.curs.execute("SELECT player_id FROM pro_players WHERE player_id = ?",(pitcherId,)).fetchone():
self.addPlayer(gameDB, pitcherId)
bpId = gameDB.nextKey(MLB.bullpensTable)
gameDB.insert(MLB.bullpensTable, values=(bpId, teamId, pitcherId, starter))
pitchCount = self.mlbDB.fetchOne("SELECT COUNT(pitcher_id) FROM pitches INNER JOIN pro_players ON pitches.pitcher_id = pro_players.player_id WHERE pitcher_id = ?", (pitcherId,))[0]
checkId = pitcherId
checkTeamId = teamId
try:
if pitchCount < 100:
checkId, checkTeamId = self.mlbDB.fetchOne(similarPitcherCmd, (pitcherId,))
except TypeError:
pass
pitches = self.mlbDB.fetchAll(pitchContactCmd.format({"playerType":"pitcher"}), (checkId,))
for contact in pitches:
pitchContactId = gameDB.nextKey(MLB.pitchContactsTable)
try:
gameDB.insert(MLB.pitchContactsTable, values=[pitchContactId, contact[0], pitcherId, *contact[2:]])
except sqlite3.IntegrityError:
pass
for replace in self.mlbDB.fetchAll("SELECT * FROM pitcher_replace WHERE (remove_id = ? OR replace_id = ?)",(checkId, checkId)):
try:
gameDB.insert(MLB.pitchReplaceTable, values=replace)
except sqlite3.IntegrityError:
pass
################################################################################
################################################################################
|
nilq/baby-python
|
python
|
#
# Functional Python: The Lambda Lambada (Recursion)
# Python Techdegree
#
# Created by Dulio Denis on 3/22/19.
# Copyright (c) 2019 ddApps. All rights reserved.
# ------------------------------------------------
# Recursion Challenge
# ------------------------------------------------
# Challenge Task 1 of 1
# Finish the prereqs function so that it recursively
# finds all of the prerequisite course titles in courses
# (like "Object-Oriented Python" is a prerequisite for
# "Django Basics").
# You should add() the title of the prerequisite to the pres
# set and then call prereqs again with the child courses.
# In the end, return the prereqs set.
courses = {'count': 2,
'title': 'Django Basics',
'prereqs': [{'count': 3,
'title': 'Object-Oriented Python',
'prereqs': [{'count': 1,
'title': 'Python Collections',
'prereqs': [{'count':0,
'title': 'Python Basics',
'prereqs': []}]},
{'count': 0,
'title': 'Python Basics',
'prereqs': []},
{'count': 0,
'title': 'Setting Up a Local Python Environment',
'prereqs': []}]},
{'count': 0,
'title': 'Flask Basics',
'prereqs': []}]}
def prereqs(data, pres=None):
pres = pres or set()
# for each prereq in this courses' prereq
for prereq in data['prereqs']:
# add title of this prereq course
pres.add(prereq['title'])
# use recursive call to drill into any further prereqs
prereqs(prereq, pres)
return pres
print(prereqs(courses))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os
import random
import requests
import subprocess
import argparse
import datetime
import time
import sys
"""Based off https://github.com/fogleman/primitive/blob/master/bot/main.py
"""
with open(os.path.expanduser('~/.flickr_api_key'), 'r') as key_file:
FLICKR_API_KEY = key_file.readline().rstrip()
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class Config(AttrDict):
def randomize(self):
self.m = random.choice([1, 5, 7])
self.n = random.randint(15, 50) * 10
self.rep = 0
self.a = 128
self.r = 256
def parse(self, text):
text = (text or '').lower()
tokens = text.split()
for i, name in enumerate(MODE_NAMES):
if name in text:
self.m = i
for token in tokens:
try:
self.n = int(token)
except Exception:
pass
def validate(self):
self.m = clamp(self.m, 0, 8)
if self.m == 6:
self.n = random.randint(1400, 2000)
@property
def description(self):
total = self.n + self.n * self.rep
return '%d %s' % (total, MODE_NAMES[self.m])
def clamp(x, lo, hi):
if x < lo:
x = lo
if x > hi:
x = hi
return x
def random_date(max_days_ago=1000):
today = datetime.date.today()
days = random.randint(1, max_days_ago)
d = today - datetime.timedelta(days=days)
return d.strftime('%Y-%m-%d')
def interesting(date=None):
url = 'https://api.flickr.com/services/rest/'
params = dict(
api_key=FLICKR_API_KEY,
format='json',
nojsoncallback=1,
method='flickr.interestingness.getList',
)
if date:
params['date'] = date
r = requests.get(url, params=params)
return r.json()['photos']['photo']
def get_aspect_ratio(p):
url = 'https://api.flickr.com/services/rest/'
params = dict(
api_key=FLICKR_API_KEY,
format='json',
nojsoncallback=1,
method='flickr.photos.getSizes',
photo_id=p['id']
)
r = requests.get(url, params=params)
sizes = r.json()['sizes']['size']
thumbnail = filter(lambda x: x['label']=='Thumbnail', sizes)
return float(thumbnail[0]['width']) / float(thumbnail[0]['height'])
def photo_url(p, size=None):
# See: https://www.flickr.com/services/api/misc.urls.html
if size:
url = 'https://farm%s.staticflickr.com/%s/%s_%s_%s.jpg'
return url % (p['farm'], p['server'], p['id'], p['secret'], size)
else:
url = 'https://farm%s.staticflickr.com/%s/%s_%s.jpg'
return url % (p['farm'], p['server'], p['id'], p['secret'])
def download_photo(url, path):
r = requests.get(url)
with open(path, 'wb') as fp:
fp.write(r.content)
def primitive(primitive_path, **kwargs):
args = []
for k, v in kwargs.items():
if v is None:
continue
args.append('-%s' % k)
args.append(str(v))
args = ' '.join(args)
cmd = '{0} {1}'.format(primitive_path, args)
subprocess.call(cmd, shell=True)
def create_wallpaper(args):
download_path = None
try:
print 'Finding interesting photo...'
photos = interesting(date=random_date())
photo = random.choice(photos)
aspect_ratio = get_aspect_ratio(photo)
print 'Downloading photo...'
url = photo_url(photo, 'z')
download_path = os.path.join('/tmp', photo['id'] + '.png')
download_photo(url, download_path)
output_path = os.path.expanduser(args.output)
output_path = os.path.join(output_path, 'landscape' if aspect_ratio > 1 else 'portrait')
if not os.path.exists(output_path):
os.makedirs(output_path)
config = Config()
config.randomize()
config.validate()
print 'Generating wallpaper with parameters {0}'.format(config)
primitive(args.primitive_path,
i=download_path,
s=args.size,
o='\'{0}\''.format(os.path.join(output_path, photo['id'] + '.png')),
**config)
print 'Done!'
except Exception as e:
print e
finally:
if download_path is not None and os.path.exists(download_path):
os.remove(download_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output', help="path to output directory", required=True)
parser.add_argument('-s', '--size', type=int, help="width of output image", required=True)
parser.add_argument('--primitive_path', help="path to primitive executable", default='/usr/local/bin/primitive')
parser.add_argument('-n', '--num', type=int, help="number of wallpapers to generate", default=1)
args = parser.parse_args()
# check network status
max_retries = 10
attempt = 0
response = None
while attempt < max_retries:
attempt += 1
try:
print 'Checking network...'
response = interesting()
break
except:
print 'No network, retrying...'
time.sleep(5)
if response is None:
print 'No network connection'
sys.exit(1)
for n in xrange(args.num):
create_wallpaper(args)
|
nilq/baby-python
|
python
|
import multiprocessing as mp
import time
def foo_pool(taskQ, x):
print(x)
taskQ.put(x)
return x*x
result_list = []
def log_result(result):
# This is called whenever foo_pool(i) returns a result.
# result_list is modified only by the main process, not the pool workers.
result_list.append(result)
def apply_async_with_callback():
pool = mp.Pool()
taskQ = mp.Queue(4)
for i in range(10):
pool.apply_async(foo_pool, args = (taskQ, i, ), callback = log_result)
pool.close()
pool.join()
print(result_list)
if __name__ == '__main__':
apply_async_with_callback()
|
nilq/baby-python
|
python
|
# Crie um script Python que leia o nome de uma pessoa e mostre uma mensagem de boas-vindas de acordo com o valor digitado
msg = 'Olá Mundo!'
print(msg)
|
nilq/baby-python
|
python
|
import ldap
import json
import socket
from urllib.parse import urlparse
def create_from_env():
import os
auth = LdapAuth(os.environ.get('LDAP_ADDRESS'))
auth.base_dn = os.environ.get('LDAP_BASE_DN')
auth.bind_dn = os.environ.get('LDAP_BIND_DN')
auth.bind_pass = os.environ.get('LDAP_BIND_PASS')
return auth
class LdapAuthException(Exception):
pass
class LdapAuth(object):
def __init__(self, address=None):
self.address = address
self.base_dn = None
self.bind_dn = None
self.bind_pass = None
self.search_template = 'uid=%(username)s'
self.ldap_timeout = 2
self.conn_timeout = 2
def assert_configs(self):
print(json.dumps({
'address': self.address,
'base_dn': self.base_dn,
'bind_dn': self.bind_dn,
'bind_pass': '***' if self.bind_pass else None,
'search_template': self.search_template,
}))
assert self.address is not None
assert self.base_dn is not None
assert self.bind_dn is not None
assert self.bind_pass is not None
assert self.search_template is not None
def check_credentials(self, username, password):
# -> (str msg, bool authorized)
try:
self.whoami(username, password)
return ("OK: "+username, True)
except ldap.LDAPError as e:
return (e.__class__.__name__, False)
except Exception as e:
return (str(e), False)
def check_connection(self):
address = urlparse(self.address)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(self.conn_timeout)
try:
s.connect((address.hostname, int(address.port)))
s.shutdown(2)
return True
except:
return False
def check_binding(self):
# initialize
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
l = ldap.initialize(self.address)
l.timeout = self.ldap_timeout
try:
l.simple_bind_s(self.bind_dn, self.bind_pass)
whoami = l.whoami_s()
except:
whoami = None
finally:
l.unbind_s()
return whoami is not None and len(whoami) > 0
def whoami(self, username, password):
# initialize
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
l = ldap.initialize(self.address)
l.timeout = self.ldap_timeout
try:
l.simple_bind_s(self.bind_dn, self.bind_pass)
# search user
search_filter = self.search_template % {'username': username}
users = l.search_s(self.base_dn, ldap.SCOPE_SUBTREE, search_filter)
if len(users) == 0:
msg = "User with username '%s' not found on %s" % (username, self.base_dn)
raise LdapAuthException(msg)
if len(users) > 1:
raise LdapAuthException("Multiple users found")
# try to verify user password
user_dn, _ = users[0]
l.simple_bind_s(user_dn, password)
whoami = l.whoami_s()
except:
whoami = None
finally:
l.unbind_s()
if whoami is None or len(whoami) == 0:
raise LdapAuthException("Invalid username/password")
return whoami
|
nilq/baby-python
|
python
|
# %% coding=utf-8
import pandas as pd
from atm import ATM
from sklearn.model_selection import train_test_split
beauty_data = pd.read_csv('/data/face/df_input.csv')
select_cols = ['Image', 'label', '0_10_x', '0_10_y', '0_11_x', '0_11_y', '0_12_x', '0_12_y', '0_13_x', '0_13_y',
'0_14_x', '0_14_y', '0_15_x', '0_15_y', '0_16_x', '0_16_y', '0_17_x', '0_17_y', '0_18_x', '0_18_y',
'0_19_x', '0_19_y', '0_1_x', '0_1_y', '0_20_x', '0_20_y', '0_21_x', '0_21_y', '0_22_x', '0_22_y',
'0_23_x', '0_23_y', '0_24_x', '0_24_y', '0_25_x', '0_25_y', '0_26_x', '0_26_y', '0_27_x', '0_27_y',
'0_28_x', '0_28_y', '0_29_x', '0_29_y', '0_2_x', '0_2_y', '0_30_x', '0_30_y', '0_31_x', '0_31_y',
'0_32_x', '0_32_y', '0_33_x', '0_33_y', '0_34_x', '0_34_y', '0_35_x', '0_35_y', '0_36_x', '0_36_y',
'0_37_x', '0_37_y', '0_38_x', '0_38_y', '0_39_x', '0_39_y', '0_3_x', '0_3_y', '0_40_x', '0_40_y',
'0_41_x', '0_41_y', '0_42_x', '0_42_y', '0_43_x', '0_43_y', '0_44_x', '0_44_y', '0_45_x', '0_45_y',
'0_46_x', '0_46_y', '0_47_x', '0_47_y', '0_48_x', '0_48_y', '0_49_x', '0_49_y', '0_4_x', '0_4_y',
'0_50_x', '0_50_y', '0_51_x', '0_51_y', '0_52_x', '0_52_y', '0_53_x', '0_53_y', '0_54_x', '0_54_y',
'0_55_x', '0_55_y', '0_56_x', '0_56_y', '0_57_x', '0_57_y', '0_58_x', '0_58_y', '0_59_x', '0_59_y',
'0_5_x', '0_5_y', '0_60_x', '0_60_y', '0_61_x', '0_61_y', '0_62_x', '0_62_y', '0_63_x', '0_63_y',
'0_64_x', '0_64_y', '0_65_x', '0_65_y', '0_66_x', '0_66_y', '0_67_x', '0_67_y', '0_6_x', '0_6_y',
'0_7_x', '0_7_y', '0_8_x', '0_8_y', '0_9_x', '0_9_y', '10_11_x', '10_11_y', '10_12_x', '10_12_y',
'10_13_x', '10_13_y', '10_14_x', '10_14_y', '10_15_x', '10_15_y', '10_16_x', '10_16_y', '10_17_x',
'10_17_y', '10_18_x', '10_18_y', '10_19_x', '10_19_y', '10_20_x', '10_20_y', '10_21_x', '10_21_y',
'10_22_x', '10_22_y', '10_23_x', '10_23_y', '10_24_x', '10_24_y', '10_25_x', '10_25_y', '10_26_x',
'10_26_y', '10_27_x', '10_27_y', '10_28_x', '10_28_y', '10_29_x', '10_29_y', '10_30_x', '10_30_y',
'10_31_x', '10_31_y', '10_32_x', '10_32_y', '10_33_x', '10_33_y', '10_34_x', '10_34_y', '10_35_x',
'10_35_y', '10_36_x', '10_36_y', '10_37_x', '10_37_y', '10_38_x', '10_38_y', '10_39_x', '10_39_y',
'10_40_x', '10_40_y', '10_41_x', '10_41_y', '10_42_x', '10_42_y', '10_43_x', '10_43_y', '10_44_x',
'10_44_y', '10_45_x', '10_45_y', '10_46_x', '10_46_y', '10_47_x', '10_47_y', '10_48_x', '10_48_y',
'10_49_x', '10_49_y', '10_50_x', '10_50_y', '10_51_x', '10_51_y', '10_52_x', '10_52_y', '10_53_x',
'10_53_y', '10_54_x', '10_54_y', '10_55_x', '10_55_y', '10_56_x', '10_56_y', '10_57_x', '10_57_y',
'10_58_x', '10_58_y', '10_59_x', '10_59_y', '10_60_x', '10_60_y', '10_61_x', '10_61_y', '10_62_x',
'10_62_y', '10_63_x', '10_63_y', '10_64_x', '10_64_y', '10_65_x', '10_65_y', '10_66_x', '10_66_y',
'10_67_x', '10_67_y', '11_12_x', '11_12_y', '11_13_x', '11_13_y', '11_14_x', '11_14_y', '11_15_x',
'11_15_y', '11_16_x', '11_16_y', '11_17_x', '11_17_y', '11_18_x', '11_18_y', '11_19_x', '11_19_y',
'11_20_x', '11_20_y', '11_21_x', '11_21_y', '11_22_x', '11_22_y', '11_23_x', '11_23_y', '11_24_x',
'11_24_y', '11_25_x', '11_25_y', '11_26_x', '11_26_y', '11_27_x', '11_27_y', '11_28_x', '11_28_y',
'11_29_x', '11_29_y', '11_30_x', '11_30_y', '11_31_x', '11_31_y', '11_32_x', '11_32_y', '11_33_x',
'11_33_y', '11_34_x', '11_34_y', '11_35_x', '11_35_y', '11_36_x', '11_36_y', '11_37_x', '11_37_y',
'11_38_x', '11_38_y', '11_39_x', '11_39_y', '11_40_x', '11_40_y', '11_41_x', '11_41_y', '11_42_x',
'11_42_y', '11_43_x', '11_43_y', '11_44_x', '11_44_y', '11_45_x', '11_45_y', '11_46_x', '11_46_y',
'11_47_x', '11_47_y', '11_48_x', '11_48_y', '11_49_x', '11_49_y', '11_50_x', '11_50_y', '11_51_x',
'11_51_y', '11_52_x', '11_52_y', '11_53_x', '11_53_y', '11_54_x', '11_54_y', '11_55_x', '11_55_y',
'11_56_x', '11_56_y', '11_57_x', '11_57_y', '11_58_x', '11_58_y', '11_59_x', '11_59_y', '11_60_x',
'11_60_y', '11_61_x', '11_61_y', '11_62_x', '11_62_y', '11_63_x', '11_63_y', '11_64_x', '11_64_y',
'11_65_x', '11_65_y', '11_66_x', '11_66_y', '11_67_x', '11_67_y', '12_13_x', '12_13_y', '12_14_x',
'12_14_y', '12_15_x', '12_15_y', '12_16_x', '12_16_y', '12_17_x', '12_17_y', '12_18_x', '12_18_y',
'12_19_x', '12_19_y', '12_20_x', '12_20_y', '12_21_x', '12_21_y', '12_22_x', '12_22_y', '12_23_x',
'12_23_y', '12_24_x', '12_24_y', '12_25_x', '12_25_y', '12_26_x', '12_26_y', '12_27_x', '12_27_y',
'12_28_x', '12_28_y', '12_29_x', '12_29_y', '12_30_x', '12_30_y', '12_31_x', '12_31_y', '12_32_x',
'12_32_y', '12_33_x', '12_33_y', '12_34_x', '12_34_y', '12_35_x', '12_35_y', '12_36_x', '12_36_y',
'12_37_x', '12_37_y', '12_38_x', '12_38_y', '12_39_x', '12_39_y', '12_40_x', '12_40_y', '12_41_x',
'12_41_y', '12_42_x', '12_42_y', '12_43_x', '12_43_y', '12_44_x', '12_44_y', '12_45_x', '12_45_y',
'12_46_x', '12_46_y', '12_47_x', '12_47_y', '12_48_x', '12_48_y', '12_49_x', '12_49_y', '12_50_x',
'12_50_y', '12_51_x', '12_51_y', '12_52_x', '12_52_y', '12_53_x', '12_53_y', '12_54_x', '12_54_y',
'12_55_x', '12_55_y', '12_56_x', '12_56_y', '12_57_x', '12_57_y', '12_58_x', '12_58_y', '12_59_x',
'12_59_y', '12_60_x', '12_60_y', '12_61_x', '12_61_y', '12_62_x', '12_62_y', '12_63_x', '12_63_y',
'12_64_x', '12_64_y', '12_65_x', '12_65_y', '12_66_x', '12_66_y', '12_67_x', '12_67_y', '13_14_x',
'13_14_y', '13_15_x', '13_15_y', '13_16_x', '13_16_y', '13_17_x', '13_17_y', '13_18_x', '13_18_y',
'13_19_x', '13_19_y', '13_20_x', '13_20_y', '13_21_x', '13_21_y', '13_22_x', '13_22_y', '13_23_x',
'13_23_y', '13_24_x', '13_24_y', '13_25_x', '13_25_y', '13_26_x', '13_26_y', '13_27_x', '13_27_y',
'13_28_x', '13_28_y', '13_29_x', '13_29_y', '13_30_x', '13_30_y', '13_31_x', '13_31_y', '13_32_x',
'13_32_y', '13_33_x', '13_33_y', '13_34_x', '13_34_y', '13_35_x', '13_35_y', '13_36_x', '13_36_y',
'13_37_x', '13_37_y', '13_38_x', '13_38_y', '13_39_x', '13_39_y', '13_40_x', '13_40_y', '13_41_x',
'13_41_y', '13_42_x', '13_42_y', '13_43_x', '13_43_y', '13_44_x', '13_44_y', '13_45_x', '13_45_y',
'13_46_x', '13_46_y', '13_47_x', '13_47_y', '13_48_x', '13_48_y', '13_49_x', '13_49_y', '13_50_x',
'13_50_y', '13_51_x', '13_51_y', '13_52_x', '13_52_y', '13_53_x', '13_53_y', '13_54_x', '13_54_y',
'13_55_x', '13_55_y', '13_56_x', '13_56_y', '13_57_x', '13_57_y', '13_58_x', '13_58_y', '13_59_x',
'13_59_y', '13_60_x', '13_60_y', '13_61_x', '13_61_y', '13_62_x', '13_62_y', '13_63_x', '13_63_y',
'13_64_x', '13_64_y', '13_65_x', '13_65_y', '13_66_x', '13_66_y', '13_67_x', '13_67_y', '14_15_x',
'14_15_y', '14_16_x', '14_16_y', '14_17_x', '14_17_y', '14_18_x', '14_18_y', '14_19_x', '14_19_y',
'14_20_x', '14_20_y', '14_21_x', '14_21_y', '14_22_x', '14_22_y', '14_23_x', '14_23_y', '14_24_x',
'14_24_y', '14_25_x', '14_25_y', '14_26_x', '14_26_y', '14_27_x', '14_27_y', '14_28_x', '14_28_y',
'14_29_x', '14_29_y', '14_30_x', '14_30_y', '14_31_x', '14_31_y', '14_32_x', '14_32_y', '14_33_x',
'14_33_y', '14_34_x', '14_34_y', '14_35_x', '14_35_y', '14_36_x', '14_36_y', '14_37_x', '14_37_y',
'14_38_x', '14_38_y', '14_39_x', '14_39_y', '14_40_x', '14_40_y', '14_41_x', '14_41_y', '14_42_x',
'14_42_y', '14_43_x', '14_43_y', '14_44_x', '14_44_y', '14_45_x', '14_45_y', '14_46_x', '14_46_y',
'14_47_x', '14_47_y', '14_48_x', '14_48_y', '14_49_x', '14_49_y', '14_50_x', '14_50_y', '14_51_x',
'14_51_y', '14_52_x', '14_52_y', '14_53_x', '14_53_y', '14_54_x', '14_54_y', '14_55_x', '14_55_y',
'14_56_x', '14_56_y', '14_57_x', '14_57_y', '14_58_x', '14_58_y', '14_59_x', '14_59_y', '14_60_x',
'14_60_y', '14_61_x', '14_61_y', '14_62_x', '14_62_y', '14_63_x', '14_63_y', '14_64_x', '14_64_y',
'14_65_x', '14_65_y', '14_66_x', '14_66_y', '14_67_x', '14_67_y', '15_16_x', '15_16_y', '15_17_x',
'15_17_y', '15_18_x', '15_18_y', '15_19_x', '15_19_y', '15_20_x', '15_20_y', '15_21_x', '15_21_y',
'15_22_x', '15_22_y', '15_23_x', '15_23_y', '15_24_x', '15_24_y', '15_25_x', '15_25_y', '15_26_x',
'15_26_y', '15_27_x', '15_27_y', '15_28_x', '15_28_y', '15_29_x', '15_29_y', '15_30_x', '15_30_y',
'15_31_x', '15_31_y', '15_32_x', '15_32_y', '15_33_x', '15_33_y', '15_34_x', '15_34_y', '15_35_x',
'15_35_y', '15_36_x', '15_36_y', '15_37_x', '15_37_y', '15_38_x', '15_38_y', '15_39_x', '15_39_y',
'15_40_x', '15_40_y', '15_41_x', '15_41_y', '15_42_x', '15_42_y', '15_43_x', '15_43_y', '15_44_x',
'15_44_y', '15_45_x', '15_45_y', '15_46_x', '15_46_y', '15_47_x', '15_47_y', '15_48_x', '15_48_y',
'15_49_x', '15_49_y', '15_50_x', '15_50_y', '15_51_x', '15_51_y', '15_52_x', '15_52_y', '15_53_x',
'15_53_y', '15_54_x', '15_54_y', '15_55_x', '15_55_y', '15_56_x', '15_56_y', '15_57_x', '15_57_y',
'15_58_x', '15_58_y', '15_59_x', '15_59_y', '15_60_x', '15_60_y', '15_61_x', '15_61_y', '15_62_x',
'15_62_y', '15_63_x', '15_63_y', '15_64_x', '15_64_y', '15_65_x', '15_65_y', '15_66_x', '15_66_y',
'15_67_x', '15_67_y', '16_17_x', '16_17_y', '16_18_x', '16_18_y', '16_19_x', '16_19_y', '16_20_x',
'16_20_y', '16_21_x', '16_21_y', '16_22_x', '16_22_y', '16_23_x', '16_23_y', '16_24_x', '16_24_y',
'16_25_x', '16_25_y', '16_26_x', '16_26_y', '16_27_x', '16_27_y', '16_28_x', '16_28_y', '16_29_x',
'16_29_y', '16_30_x', '16_30_y', '16_31_x', '16_31_y', '16_32_x', '16_32_y', '16_33_x', '16_33_y',
'16_34_x', '16_34_y', '16_35_x', '16_35_y', '16_36_x', '16_36_y', '16_37_x', '16_37_y', '16_38_x',
'16_38_y', '16_39_x', '16_39_y', '16_40_x', '16_40_y', '16_41_x', '16_41_y', '16_42_x', '16_42_y',
'16_43_x', '16_43_y', '16_44_x', '16_44_y', '16_45_x', '16_45_y', '16_46_x', '16_46_y', '16_47_x',
'16_47_y', '16_48_x', '16_48_y', '16_49_x', '16_49_y', '16_50_x', '16_50_y', '16_51_x', '16_51_y',
'16_52_x', '16_52_y', '16_53_x', '16_53_y', '16_54_x', '16_54_y', '16_55_x', '16_55_y', '16_56_x',
'16_56_y', '16_57_x', '16_57_y', '16_58_x', '16_58_y', '16_59_x', '16_59_y', '16_60_x', '16_60_y',
'16_61_x', '16_61_y', '16_62_x', '16_62_y', '16_63_x', '16_63_y', '16_64_x', '16_64_y', '16_65_x',
'16_65_y', '16_66_x', '16_66_y', '16_67_x', '16_67_y', '17_18_x', '17_18_y', '17_19_x', '17_19_y',
'17_20_x', '17_20_y', '17_21_x', '17_21_y', '17_22_x', '17_22_y', '17_23_x', '17_23_y', '17_24_x',
'17_24_y', '17_25_x', '17_25_y', '17_26_x', '17_26_y', '17_27_x', '17_27_y', '17_28_x', '17_28_y',
'17_29_x', '17_29_y', '17_30_x', '17_30_y', '17_31_x', '17_31_y', '17_32_x', '17_32_y', '17_33_x',
'17_33_y', '17_34_x', '17_34_y', '17_35_x', '17_35_y', '17_36_x', '17_36_y', '17_37_x', '17_37_y',
'17_38_x', '17_38_y', '17_39_x', '17_39_y', '17_40_x', '17_40_y', '17_41_x', '17_41_y', '17_42_x',
'17_42_y', '17_43_x', '17_43_y', '17_44_x', '17_44_y', '17_45_x', '17_45_y', '17_46_x', '17_46_y',
'17_47_x', '17_47_y', '17_48_x', '17_48_y', '17_49_x', '17_49_y', '17_50_x', '17_50_y', '17_51_x',
'17_51_y', '17_52_x', '17_52_y', '17_53_x', '17_53_y', '17_54_x', '17_54_y', '17_55_x', '17_55_y',
'17_56_x', '17_56_y', '17_57_x', '17_57_y', '17_58_x', '17_58_y', '17_59_x', '17_59_y', '17_60_x',
'17_60_y', '17_61_x', '17_61_y', '17_62_x', '17_62_y', '17_63_x', '17_63_y', '17_64_x', '17_64_y',
'17_65_x', '17_65_y', '17_66_x', '17_66_y', '17_67_x', '17_67_y', '18_19_x', '18_19_y', '18_20_x',
'18_20_y', '18_21_x', '18_21_y', '18_22_x', '18_22_y', '18_23_x', '18_23_y', '18_24_x', '18_24_y',
'18_25_x', '18_25_y', '18_26_x', '18_26_y', '18_27_x', '18_27_y', '18_28_x', '18_28_y', '18_29_x',
'18_29_y', '18_30_x', '18_30_y', '18_31_x', '18_31_y', '18_32_x', '18_32_y', '18_33_x', '18_33_y',
'18_34_x', '18_34_y', '18_35_x', '18_35_y', '18_36_x', '18_36_y', '18_37_x', '18_37_y', '18_38_x',
'18_38_y', '18_39_x', '18_39_y', '18_40_x', '18_40_y', '18_41_x', '18_41_y', '18_42_x', '18_42_y',
'18_43_x', '18_43_y', '18_44_x', '18_44_y', '18_45_x', '18_45_y', '18_46_x', '18_46_y', '18_47_x',
'18_47_y', '18_48_x', '18_48_y', '18_49_x', '18_49_y', '18_50_x', '18_50_y', '18_51_x', '18_51_y',
'18_52_x', '18_52_y', '18_53_x', '18_53_y', '18_54_x', '18_54_y', '18_55_x', '18_55_y', '18_56_x',
'18_56_y', '18_57_x', '18_57_y', '18_58_x', '18_58_y', '18_59_x', '18_59_y', '18_60_x', '18_60_y',
'18_61_x', '18_61_y', '18_62_x', '18_62_y', '18_63_x', '18_63_y', '18_64_x', '18_64_y', '18_65_x',
'18_65_y', '18_66_x', '18_66_y', '18_67_x', '18_67_y', '19_20_x', '19_20_y', '19_21_x', '19_21_y',
'19_22_x', '19_22_y', '19_23_x', '19_23_y', '19_24_x', '19_24_y', '19_25_x', '19_25_y', '19_26_x',
'19_26_y', '19_27_x', '19_27_y', '19_28_x', '19_28_y', '19_29_x', '19_29_y', '19_30_x', '19_30_y',
'19_31_x', '19_31_y', '19_32_x', '19_32_y', '19_33_x', '19_33_y', '19_34_x', '19_34_y', '19_35_x',
'19_35_y', '19_36_x', '19_36_y', '19_37_x', '19_37_y', '19_38_x', '19_38_y', '19_39_x', '19_39_y',
'19_40_x', '19_40_y', '19_41_x', '19_41_y', '19_42_x', '19_42_y', '19_43_x', '19_43_y', '19_44_x',
'19_44_y', '19_45_x', '19_45_y', '19_46_x', '19_46_y', '19_47_x', '19_47_y', '19_48_x', '19_48_y',
'19_49_x', '19_49_y', '19_50_x', '19_50_y', '19_51_x', '19_51_y', '19_52_x', '19_52_y', '19_53_x',
'19_53_y', '19_54_x', '19_54_y', '19_55_x', '19_55_y', '19_56_x', '19_56_y', '19_57_x', '19_57_y',
'19_58_x', '19_58_y', '19_59_x', '19_59_y', '19_60_x', '19_60_y', '19_61_x', '19_61_y', '19_62_x',
'19_62_y', '19_63_x', '19_63_y', '19_64_x', '19_64_y', '19_65_x', '19_65_y', '19_66_x', '19_66_y',
'19_67_x', '19_67_y', '1_10_x', '1_10_y', '1_11_x', '1_11_y', '1_12_x', '1_12_y', '1_13_x', '1_13_y',
'1_14_x', '1_14_y', '1_15_x', '1_15_y', '1_16_x', '1_16_y', '1_17_x', '1_17_y', '1_18_x', '1_18_y',
'1_19_x', '1_19_y', '1_20_x', '1_20_y', '1_21_x', '1_21_y', '1_22_x', '1_22_y', '1_23_x', '1_23_y',
'1_24_x', '1_24_y', '1_25_x', '1_25_y', '1_26_x', '1_26_y', '1_27_x', '1_27_y', '1_28_x', '1_28_y',
'1_29_x', '1_29_y', '1_2_x', '1_2_y', '1_30_x', '1_30_y', '1_31_x', '1_31_y', '1_32_x', '1_32_y',
'1_33_x', '1_33_y', '1_34_x', '1_34_y', '1_35_x', '1_35_y', '1_36_x', '1_36_y', '1_37_x', '1_37_y',
'1_38_x', '1_38_y', '1_39_x', '1_39_y', '1_3_x', '1_3_y', '1_40_x', '1_40_y', '1_41_x', '1_41_y',
'1_42_x', '1_42_y', '1_43_x', '1_43_y', '1_44_x', '1_44_y', '1_45_x', '1_45_y', '1_46_x', '1_46_y',
'1_47_x', '1_47_y', '1_48_x', '1_48_y', '1_49_x', '1_49_y', '1_4_x', '1_4_y', '1_50_x', '1_50_y',
'1_51_x', '1_51_y', '1_52_x', '1_52_y', '1_53_x', '1_53_y', '1_54_x', '1_54_y', '1_55_x', '1_55_y',
'1_56_x', '1_56_y', '1_57_x', '1_57_y', '1_58_x', '1_58_y', '1_59_x', '1_59_y', '1_5_x', '1_5_y',
'1_60_x', '1_60_y', '1_61_x', '1_61_y', '1_62_x', '1_62_y', '1_63_x', '1_63_y', '1_64_x', '1_64_y',
'1_65_x', '1_65_y', '1_66_x', '1_66_y', '1_67_x', '1_67_y', '1_6_x', '1_6_y', '1_7_x', '1_7_y', '1_8_x',
'1_8_y', '1_9_x', '1_9_y', '20_21_x', '20_21_y', '20_22_x', '20_22_y', '20_23_x', '20_23_y', '20_24_x',
'20_24_y', '20_25_x', '20_25_y', '20_26_x', '20_26_y', '20_27_x', '20_27_y', '20_28_x', '20_28_y',
'20_29_x', '20_29_y', '20_30_x', '20_30_y', '20_31_x', '20_31_y', '20_32_x', '20_32_y', '20_33_x',
'20_33_y', '20_34_x', '20_34_y', '20_35_x', '20_35_y', '20_36_x', '20_36_y', '20_37_x', '20_37_y',
'20_38_x', '20_38_y', '20_39_x', '20_39_y', '20_40_x', '20_40_y', '20_41_x', '20_41_y', '20_42_x',
'20_42_y', '20_43_x', '20_43_y', '20_44_x', '20_44_y', '20_45_x', '20_45_y', '20_46_x', '20_46_y',
'20_47_x', '20_47_y', '20_48_x', '20_48_y', '20_49_x', '20_49_y', '20_50_x', '20_50_y', '20_51_x',
'20_51_y', '20_52_x', '20_52_y', '20_53_x', '20_53_y', '20_54_x', '20_54_y', '20_55_x', '20_55_y',
'20_56_x', '20_56_y', '20_57_x', '20_57_y', '20_58_x', '20_58_y', '20_59_x', '20_59_y', '20_60_x',
'20_60_y', '20_61_x', '20_61_y', '20_62_x', '20_62_y', '20_63_x', '20_63_y', '20_64_x', '20_64_y',
'20_65_x', '20_65_y', '20_66_x', '20_66_y', '20_67_x', '20_67_y', '21_22_x', '21_22_y', '21_23_x',
'21_23_y', '21_24_x', '21_24_y', '21_25_x', '21_25_y', '21_26_x', '21_26_y', '21_27_x', '21_27_y',
'21_28_x', '21_28_y', '21_29_x', '21_29_y', '21_30_x', '21_30_y', '21_31_x', '21_31_y', '21_32_x',
'21_32_y', '21_33_x', '21_33_y', '21_34_x', '21_34_y', '21_35_x', '21_35_y', '21_36_x', '21_36_y',
'21_37_x', '21_37_y', '21_38_x', '21_38_y', '21_39_x', '21_39_y', '21_40_x', '21_40_y', '21_41_x',
'21_41_y', '21_42_x', '21_42_y', '21_43_x', '21_43_y', '21_44_x', '21_44_y', '21_45_x', '21_45_y',
'21_46_x', '21_46_y', '21_47_x', '21_47_y', '21_48_x', '21_48_y', '21_49_x', '21_49_y', '21_50_x',
'21_50_y', '21_51_x', '21_51_y', '21_52_x', '21_52_y', '21_53_x', '21_53_y', '21_54_x', '21_54_y',
'21_55_x', '21_55_y', '21_56_x', '21_56_y', '21_57_x', '21_57_y', '21_58_x', '21_58_y', '21_59_x',
'21_59_y', '21_60_x', '21_60_y', '21_61_x', '21_61_y', '21_62_x', '21_62_y', '21_63_x', '21_63_y',
'21_64_x', '21_64_y', '21_65_x', '21_65_y', '21_66_x', '21_66_y', '21_67_x', '21_67_y', '22_23_x',
'22_23_y', '22_24_x', '22_24_y', '22_25_x', '22_25_y', '22_26_x', '22_26_y', '22_27_x', '22_27_y',
'22_28_x', '22_28_y', '22_29_x', '22_29_y', '22_30_x', '22_30_y', '22_31_x', '22_31_y', '22_32_x',
'22_32_y', '22_33_x', '22_33_y', '22_34_x', '22_34_y', '22_35_x', '22_35_y', '22_36_x', '22_36_y',
'22_37_x', '22_37_y', '22_38_x', '22_38_y', '22_39_x', '22_39_y', '22_40_x', '22_40_y', '22_41_x',
'22_41_y', '22_42_x', '22_42_y', '22_43_x', '22_43_y', '22_44_x', '22_44_y', '22_45_x', '22_45_y',
'22_46_x', '22_46_y', '22_47_x', '22_47_y', '22_48_x', '22_48_y', '22_49_x', '22_49_y', '22_50_x',
'22_50_y', '22_51_x', '22_51_y', '22_52_x', '22_52_y', '22_53_x', '22_53_y', '22_54_x', '22_54_y',
'22_55_x', '22_55_y', '22_56_x', '22_56_y', '22_57_x', '22_57_y', '22_58_x', '22_58_y', '22_59_x',
'22_59_y', '22_60_x', '22_60_y', '22_61_x', '22_61_y', '22_62_x', '22_62_y', '22_63_x', '22_63_y',
'22_64_x', '22_64_y', '22_65_x', '22_65_y', '22_66_x', '22_66_y', '22_67_x', '22_67_y', '23_24_x',
'23_24_y', '23_25_x', '23_25_y', '23_26_x', '23_26_y', '23_27_x', '23_27_y', '23_28_x', '23_28_y',
'23_29_x', '23_29_y', '23_30_x', '23_30_y', '23_31_x', '23_31_y', '23_32_x', '23_32_y', '23_33_x',
'23_33_y', '23_34_x', '23_34_y', '23_35_x', '23_35_y', '23_36_x', '23_36_y', '23_37_x', '23_37_y',
'23_38_x', '23_38_y', '23_39_x', '23_39_y', '23_40_x', '23_40_y', '23_41_x', '23_41_y', '23_42_x',
'23_42_y', '23_43_x', '23_43_y', '23_44_x', '23_44_y', '23_45_x', '23_45_y', '23_46_x', '23_46_y',
'23_47_x', '23_47_y', '23_48_x', '23_48_y', '23_49_x', '23_49_y', '23_50_x', '23_50_y', '23_51_x',
'23_51_y', '23_52_x', '23_52_y', '23_53_x', '23_53_y', '23_54_x', '23_54_y', '23_55_x', '23_55_y',
'23_56_x', '23_56_y', '23_57_x', '23_57_y', '23_58_x', '23_58_y', '23_59_x', '23_59_y', '23_60_x',
'23_60_y', '23_61_x', '23_61_y', '23_62_x', '23_62_y', '23_63_x', '23_63_y', '23_64_x', '23_64_y',
'23_65_x', '23_65_y', '23_66_x', '23_66_y', '23_67_x', '23_67_y', '24_25_x', '24_25_y', '24_26_x',
'24_26_y', '24_27_x', '24_27_y', '24_28_x', '24_28_y', '24_29_x', '24_29_y', '24_30_x', '24_30_y',
'24_31_x', '24_31_y', '24_32_x', '24_32_y', '24_33_x', '24_33_y', '24_34_x', '24_34_y', '24_35_x',
'24_35_y', '24_36_x', '24_36_y', '24_37_x', '24_37_y', '24_38_x', '24_38_y', '24_39_x', '24_39_y',
'24_40_x', '24_40_y', '24_41_x', '24_41_y', '24_42_x', '24_42_y', '24_43_x', '24_43_y', '24_44_x',
'24_44_y', '24_45_x', '24_45_y', '24_46_x', '24_46_y', '24_47_x', '24_47_y', '24_48_x', '24_48_y',
'24_49_x', '24_49_y', '24_50_x', '24_50_y', '24_51_x', '24_51_y', '24_52_x', '24_52_y', '24_53_x',
'24_53_y', '24_54_x', '24_54_y', '24_55_x', '24_55_y', '24_56_x', '24_56_y', '24_57_x', '24_57_y',
'24_58_x', '24_58_y', '24_59_x', '24_59_y', '24_60_x', '24_60_y', '24_61_x', '24_61_y', '24_62_x',
'24_62_y', '24_63_x', '24_63_y', '24_64_x', '24_64_y', '24_65_x', '24_65_y', '24_66_x', '24_66_y',
'24_67_x', '24_67_y', '25_26_x', '25_26_y', '25_27_x', '25_27_y', '25_28_x', '25_28_y', '25_29_x',
'25_29_y', '25_30_x', '25_30_y', '25_31_x', '25_31_y', '25_32_x', '25_32_y', '25_33_x', '25_33_y',
'25_34_x', '25_34_y', '25_35_x', '25_35_y', '25_36_x', '25_36_y', '25_37_x', '25_37_y', '25_38_x',
'25_38_y', '25_39_x', '25_39_y', '25_40_x', '25_40_y', '25_41_x', '25_41_y', '25_42_x', '25_42_y',
'25_43_x', '25_43_y', '25_44_x', '25_44_y', '25_45_x', '25_45_y', '25_46_x', '25_46_y', '25_47_x',
'25_47_y', '25_48_x', '25_48_y', '25_49_x', '25_49_y', '25_50_x', '25_50_y', '25_51_x', '25_51_y',
'25_52_x', '25_52_y', '25_53_x', '25_53_y', '25_54_x', '25_54_y', '25_55_x', '25_55_y', '25_56_x',
'25_56_y', '25_57_x', '25_57_y', '25_58_x', '25_58_y', '25_59_x', '25_59_y', '25_60_x', '25_60_y',
'25_61_x', '25_61_y', '25_62_x', '25_62_y', '25_63_x', '25_63_y', '25_64_x', '25_64_y', '25_65_x',
'25_65_y', '25_66_x', '25_66_y', '25_67_x', '25_67_y', '26_27_x', '26_27_y', '26_28_x', '26_28_y',
'26_29_x', '26_29_y', '26_30_x', '26_30_y', '26_31_x', '26_31_y', '26_32_x', '26_32_y', '26_33_x',
'26_33_y', '26_34_x', '26_34_y', '26_35_x', '26_35_y', '26_36_x', '26_36_y', '26_37_x', '26_37_y',
'26_38_x', '26_38_y', '26_39_x', '26_39_y', '26_40_x', '26_40_y', '26_41_x', '26_41_y', '26_42_x',
'26_42_y', '26_43_x', '26_43_y', '26_44_x', '26_44_y', '26_45_x', '26_45_y', '26_46_x', '26_46_y',
'26_47_x', '26_47_y', '26_48_x', '26_48_y', '26_49_x', '26_49_y', '26_50_x', '26_50_y', '26_51_x',
'26_51_y', '26_52_x', '26_52_y', '26_53_x', '26_53_y', '26_54_x', '26_54_y', '26_55_x', '26_55_y',
'26_56_x', '26_56_y', '26_57_x', '26_57_y', '26_58_x', '26_58_y', '26_59_x', '26_59_y', '26_60_x',
'26_60_y', '26_61_x', '26_61_y', '26_62_x', '26_62_y', '26_63_x', '26_63_y', '26_64_x', '26_64_y',
'26_65_x', '26_65_y', '26_66_x', '26_66_y', '26_67_x', '26_67_y', '27_28_x', '27_28_y', '27_29_x',
'27_29_y', '27_30_x', '27_30_y', '27_31_x', '27_31_y', '27_32_x', '27_32_y', '27_33_x', '27_33_y',
'27_34_x', '27_34_y', '27_35_x', '27_35_y', '27_36_x', '27_36_y', '27_37_x', '27_37_y', '27_38_x',
'27_38_y', '27_39_x', '27_39_y', '27_40_x', '27_40_y', '27_41_x', '27_41_y', '27_42_x', '27_42_y',
'27_43_x', '27_43_y', '27_44_x', '27_44_y', '27_45_x', '27_45_y', '27_46_x', '27_46_y', '27_47_x',
'27_47_y', '27_48_x', '27_48_y', '27_49_x', '27_49_y', '27_50_x', '27_50_y', '27_51_x', '27_51_y',
'27_52_x', '27_52_y', '27_53_x', '27_53_y', '27_54_x', '27_54_y', '27_55_x', '27_55_y', '27_56_x',
'27_56_y', '27_57_x', '27_57_y', '27_58_x', '27_58_y', '27_59_x', '27_59_y', '27_60_x', '27_60_y',
'27_61_x', '27_61_y', '27_62_x', '27_62_y', '27_63_x', '27_63_y', '27_64_x', '27_64_y', '27_65_x',
'27_65_y', '27_66_x', '27_66_y', '27_67_x', '27_67_y', '28_29_x', '28_29_y', '28_30_x', '28_30_y',
'28_31_x', '28_31_y', '28_32_x', '28_32_y', '28_33_x', '28_33_y', '28_34_x', '28_34_y', '28_35_x',
'28_35_y', '28_36_x', '28_36_y', '28_37_x', '28_37_y', '28_38_x', '28_38_y', '28_39_x', '28_39_y',
'28_40_x', '28_40_y', '28_41_x', '28_41_y', '28_42_x', '28_42_y', '28_43_x', '28_43_y', '28_44_x',
'28_44_y', '28_45_x', '28_45_y', '28_46_x', '28_46_y', '28_47_x', '28_47_y', '28_48_x', '28_48_y',
'28_49_x', '28_49_y', '28_50_x', '28_50_y', '28_51_x', '28_51_y', '28_52_x', '28_52_y', '28_53_x',
'28_53_y', '28_54_x', '28_54_y', '28_55_x', '28_55_y', '28_56_x', '28_56_y', '28_57_x', '28_57_y',
'28_58_x', '28_58_y', '28_59_x', '28_59_y', '28_60_x', '28_60_y', '28_61_x', '28_61_y', '28_62_x',
'28_62_y', '28_63_x', '28_63_y', '28_64_x', '28_64_y', '28_65_x', '28_65_y', '28_66_x', '28_66_y',
'28_67_x', '28_67_y', '29_30_x', '29_30_y', '29_31_x', '29_31_y', '29_32_x', '29_32_y', '29_33_x',
'29_33_y', '29_34_x', '29_34_y', '29_35_x', '29_35_y', '29_36_x', '29_36_y', '29_37_x', '29_37_y',
'29_38_x', '29_38_y', '29_39_x', '29_39_y', '29_40_x', '29_40_y', '29_41_x', '29_41_y', '29_42_x',
'29_42_y', '29_43_x', '29_43_y', '29_44_x', '29_44_y', '29_45_x', '29_45_y', '29_46_x', '29_46_y',
'29_47_x', '29_47_y', '29_48_x', '29_48_y', '29_49_x', '29_49_y', '29_50_x', '29_50_y', '29_51_x',
'29_51_y', '29_52_x', '29_52_y', '29_53_x', '29_53_y', '29_54_x', '29_54_y', '29_55_x', '29_55_y',
'29_56_x', '29_56_y', '29_57_x', '29_57_y', '29_58_x', '29_58_y', '29_59_x', '29_59_y', '29_60_x',
'29_60_y', '29_61_x', '29_61_y', '29_62_x', '29_62_y', '29_63_x', '29_63_y', '29_64_x', '29_64_y',
'29_65_x', '29_65_y', '29_66_x', '29_66_y', '29_67_x', '29_67_y', '2_10_x', '2_10_y', '2_11_x', '2_11_y',
'2_12_x', '2_12_y', '2_13_x', '2_13_y', '2_14_x', '2_14_y', '2_15_x', '2_15_y', '2_16_x', '2_16_y',
'2_17_x', '2_17_y', '2_18_x', '2_18_y', '2_19_x', '2_19_y', '2_20_x', '2_20_y', '2_21_x', '2_21_y',
'2_22_x', '2_22_y', '2_23_x', '2_23_y', '2_24_x', '2_24_y', '2_25_x', '2_25_y', '2_26_x', '2_26_y',
'2_27_x', '2_27_y', '2_28_x', '2_28_y', '2_29_x', '2_29_y', '2_30_x', '2_30_y', '2_31_x', '2_31_y',
'2_32_x', '2_32_y', '2_33_x', '2_33_y', '2_34_x', '2_34_y', '2_35_x', '2_35_y', '2_36_x', '2_36_y',
'2_37_x', '2_37_y', '2_38_x', '2_38_y', '2_39_x', '2_39_y', '2_3_x', '2_3_y', '2_40_x', '2_40_y',
'2_41_x', '2_41_y', '2_42_x', '2_42_y', '2_43_x', '2_43_y', '2_44_x', '2_44_y', '2_45_x', '2_45_y',
'2_46_x', '2_46_y', '2_47_x', '2_47_y', '2_48_x', '2_48_y', '2_49_x', '2_49_y', '2_4_x', '2_4_y',
'2_50_x', '2_50_y', '2_51_x', '2_51_y', '2_52_x', '2_52_y', '2_53_x', '2_53_y', '2_54_x', '2_54_y',
'2_55_x', '2_55_y', '2_56_x', '2_56_y', '2_57_x', '2_57_y', '2_58_x', '2_58_y', '2_59_x', '2_59_y',
'2_5_x', '2_5_y', '2_60_x', '2_60_y', '2_61_x', '2_61_y', '2_62_x', '2_62_y', '2_63_x', '2_63_y',
'2_64_x', '2_64_y', '2_65_x', '2_65_y', '2_66_x', '2_66_y', '2_67_x', '2_67_y', '2_6_x', '2_6_y',
'2_7_x', '2_7_y', '2_8_x', '2_8_y', '2_9_x', '2_9_y', '30_31_x', '30_31_y', '30_32_x', '30_32_y',
'30_33_x', '30_33_y', '30_34_x', '30_34_y', '30_35_x', '30_35_y', '30_36_x', '30_36_y', '30_37_x',
'30_37_y', '30_38_x', '30_38_y', '30_39_x', '30_39_y', '30_40_x', '30_40_y', '30_41_x', '30_41_y',
'30_42_x', '30_42_y', '30_43_x', '30_43_y', '30_44_x', '30_44_y', '30_45_x', '30_45_y', '30_46_x',
'30_46_y', '30_47_x', '30_47_y', '30_48_x', '30_48_y', '30_49_x', '30_49_y', '30_50_x', '30_50_y',
'30_51_x', '30_51_y', '30_52_x', '30_52_y', '30_53_x', '30_53_y', '30_54_x', '30_54_y', '30_55_x',
'30_55_y', '30_56_x', '30_56_y', '30_57_x', '30_57_y', '30_58_x', '30_58_y', '30_59_x', '30_59_y',
'30_60_x', '30_60_y', '30_61_x', '30_61_y', '30_62_x', '30_62_y', '30_63_x', '30_63_y', '30_64_x',
'30_64_y', '30_65_x', '30_65_y', '30_66_x', '30_66_y', '30_67_x', '30_67_y', '31_32_x', '31_32_y',
'31_33_x', '31_33_y', '31_34_x', '31_34_y', '31_35_x', '31_35_y', '31_36_x', '31_36_y', '31_37_x',
'31_37_y', '31_38_x', '31_38_y', '31_39_x', '31_39_y', '31_40_x', '31_40_y', '31_41_x', '31_41_y',
'31_42_x', '31_42_y', '31_43_x', '31_43_y', '31_44_x', '31_44_y', '31_45_x', '31_45_y', '31_46_x',
'31_46_y', '31_47_x', '31_47_y', '31_48_x', '31_48_y', '31_49_x', '31_49_y', '31_50_x', '31_50_y',
'31_51_x', '31_51_y', '31_52_x', '31_52_y', '31_53_x', '31_53_y', '31_54_x', '31_54_y', '31_55_x',
'31_55_y', '31_56_x', '31_56_y', '31_57_x', '31_57_y', '31_58_x', '31_58_y', '31_59_x', '31_59_y',
'31_60_x', '31_60_y', '31_61_x', '31_61_y', '31_62_x', '31_62_y', '31_63_x', '31_63_y', '31_64_x',
'31_64_y', '31_65_x', '31_65_y', '31_66_x', '31_66_y', '31_67_x', '31_67_y', '32_33_x', '32_33_y',
'32_34_x', '32_34_y', '32_35_x', '32_35_y', '32_36_x', '32_36_y', '32_37_x', '32_37_y', '32_38_x',
'32_38_y', '32_39_x', '32_39_y', '32_40_x', '32_40_y', '32_41_x', '32_41_y', '32_42_x', '32_42_y',
'32_43_x', '32_43_y', '32_44_x', '32_44_y', '32_45_x', '32_45_y', '32_46_x', '32_46_y', '32_47_x',
'32_47_y', '32_48_x', '32_48_y', '32_49_x', '32_49_y', '32_50_x', '32_50_y', '32_51_x', '32_51_y',
'32_52_x', '32_52_y', '32_53_x', '32_53_y', '32_54_x', '32_54_y', '32_55_x', '32_55_y', '32_56_x',
'32_56_y', '32_57_x', '32_57_y', '32_58_x', '32_58_y', '32_59_x', '32_59_y', '32_60_x', '32_60_y',
'32_61_x', '32_61_y', '32_62_x', '32_62_y', '32_63_x', '32_63_y', '32_64_x', '32_64_y', '32_65_x',
'32_65_y', '32_66_x', '32_66_y', '32_67_x', '32_67_y', '33_34_x', '33_34_y', '33_35_x', '33_35_y',
'33_36_x', '33_36_y', '33_37_x', '33_37_y', '33_38_x', '33_38_y', '33_39_x', '33_39_y', '33_40_x',
'33_40_y', '33_41_x', '33_41_y', '33_42_x', '33_42_y', '33_43_x', '33_43_y', '33_44_x', '33_44_y',
'33_45_x', '33_45_y', '33_46_x', '33_46_y', '33_47_x', '33_47_y', '33_48_x', '33_48_y', '33_49_x',
'33_49_y', '33_50_x', '33_50_y', '33_51_x', '33_51_y', '33_52_x', '33_52_y', '33_53_x', '33_53_y',
'33_54_x', '33_54_y', '33_55_x', '33_55_y', '33_56_x', '33_56_y', '33_57_x', '33_57_y', '33_58_x',
'33_58_y', '33_59_x', '33_59_y', '33_60_x', '33_60_y', '33_61_x', '33_61_y', '33_62_x', '33_62_y',
'33_63_x', '33_63_y', '33_64_x', '33_64_y', '33_65_x', '33_65_y', '33_66_x', '33_66_y', '33_67_x',
'33_67_y', '34_35_x', '34_35_y', '34_36_x', '34_36_y', '34_37_x', '34_37_y', '34_38_x', '34_38_y',
'34_39_x', '34_39_y', '34_40_x', '34_40_y', '34_41_x', '34_41_y', '34_42_x', '34_42_y', '34_43_x',
'34_43_y', '34_44_x', '34_44_y', '34_45_x', '34_45_y', '34_46_x', '34_46_y', '34_47_x', '34_47_y',
'34_48_x', '34_48_y', '34_49_x', '34_49_y', '34_50_x', '34_50_y', '34_51_x', '34_51_y', '34_52_x',
'34_52_y', '34_53_x', '34_53_y', '34_54_x', '34_54_y', '34_55_x', '34_55_y', '34_56_x', '34_56_y',
'34_57_x', '34_57_y', '34_58_x', '34_58_y', '34_59_x', '34_59_y', '34_60_x', '34_60_y', '34_61_x',
'34_61_y', '34_62_x', '34_62_y', '34_63_x', '34_63_y', '34_64_x', '34_64_y', '34_65_x', '34_65_y',
'34_66_x', '34_66_y', '34_67_x', '34_67_y', '35_36_x', '35_36_y', '35_37_x', '35_37_y', '35_38_x',
'35_38_y', '35_39_x', '35_39_y', '35_40_x', '35_40_y', '35_41_x', '35_41_y', '35_42_x', '35_42_y',
'35_43_x', '35_43_y', '35_44_x', '35_44_y', '35_45_x', '35_45_y', '35_46_x', '35_46_y', '35_47_x',
'35_47_y', '35_48_x', '35_48_y', '35_49_x', '35_49_y', '35_50_x', '35_50_y', '35_51_x', '35_51_y',
'35_52_x', '35_52_y', '35_53_x', '35_53_y', '35_54_x', '35_54_y', '35_55_x', '35_55_y', '35_56_x',
'35_56_y', '35_57_x', '35_57_y', '35_58_x', '35_58_y', '35_59_x', '35_59_y', '35_60_x', '35_60_y',
'35_61_x', '35_61_y', '35_62_x', '35_62_y', '35_63_x', '35_63_y', '35_64_x', '35_64_y', '35_65_x',
'35_65_y', '35_66_x', '35_66_y', '35_67_x', '35_67_y', '36_37_x', '36_37_y', '36_38_x', '36_38_y',
'36_39_x', '36_39_y', '36_40_x', '36_40_y', '36_41_x', '36_41_y', '36_42_x', '36_42_y', '36_43_x',
'36_43_y', '36_44_x', '36_44_y', '36_45_x', '36_45_y', '36_46_x', '36_46_y', '36_47_x', '36_47_y',
'36_48_x', '36_48_y', '36_49_x', '36_49_y', '36_50_x', '36_50_y', '36_51_x', '36_51_y', '36_52_x',
'36_52_y', '36_53_x', '36_53_y', '36_54_x', '36_54_y', '36_55_x', '36_55_y', '36_56_x', '36_56_y',
'36_57_x', '36_57_y', '36_58_x', '36_58_y', '36_59_x', '36_59_y', '36_60_x', '36_60_y', '36_61_x',
'36_61_y', '36_62_x', '36_62_y', '36_63_x', '36_63_y', '36_64_x', '36_64_y', '36_65_x', '36_65_y',
'36_66_x', '36_66_y', '36_67_x', '36_67_y', '37_38_x', '37_38_y', '37_39_x', '37_39_y', '37_40_x',
'37_40_y', '37_41_x', '37_41_y', '37_42_x', '37_42_y', '37_43_x', '37_43_y', '37_44_x', '37_44_y',
'37_45_x', '37_45_y', '37_46_x', '37_46_y', '37_47_x', '37_47_y', '37_48_x', '37_48_y', '37_49_x',
'37_49_y', '37_50_x', '37_50_y', '37_51_x', '37_51_y', '37_52_x', '37_52_y', '37_53_x', '37_53_y',
'37_54_x', '37_54_y', '37_55_x', '37_55_y', '37_56_x', '37_56_y', '37_57_x', '37_57_y', '37_58_x',
'37_58_y', '37_59_x', '37_59_y', '37_60_x', '37_60_y', '37_61_x', '37_61_y', '37_62_x', '37_62_y',
'37_63_x', '37_63_y', '37_64_x', '37_64_y', '37_65_x', '37_65_y', '37_66_x', '37_66_y', '37_67_x',
'37_67_y', '38_39_x', '38_39_y', '38_40_x', '38_40_y', '38_41_x', '38_41_y', '38_42_x', '38_42_y',
'38_43_x', '38_43_y', '38_44_x', '38_44_y', '38_45_x', '38_45_y', '38_46_x', '38_46_y', '38_47_x',
'38_47_y', '38_48_x', '38_48_y', '38_49_x', '38_49_y', '38_50_x', '38_50_y', '38_51_x', '38_51_y',
'38_52_x', '38_52_y', '38_53_x', '38_53_y', '38_54_x', '38_54_y', '38_55_x', '38_55_y', '38_56_x',
'38_56_y', '38_57_x', '38_57_y', '38_58_x', '38_58_y', '38_59_x', '38_59_y', '38_60_x', '38_60_y',
'38_61_x', '38_61_y', '38_62_x', '38_62_y', '38_63_x', '38_63_y', '38_64_x', '38_64_y', '38_65_x',
'38_65_y', '38_66_x', '38_66_y', '38_67_x', '38_67_y', '39_40_x', '39_40_y', '39_41_x', '39_41_y',
'39_42_x', '39_42_y', '39_43_x', '39_43_y', '39_44_x', '39_44_y', '39_45_x', '39_45_y', '39_46_x',
'39_46_y', '39_47_x', '39_47_y', '39_48_x', '39_48_y', '39_49_x', '39_49_y', '39_50_x', '39_50_y',
'39_51_x', '39_51_y', '39_52_x', '39_52_y', '39_53_x', '39_53_y', '39_54_x', '39_54_y', '39_55_x',
'39_55_y', '39_56_x', '39_56_y', '39_57_x', '39_57_y', '39_58_x', '39_58_y', '39_59_x', '39_59_y',
'39_60_x', '39_60_y', '39_61_x', '39_61_y', '39_62_x', '39_62_y', '39_63_x', '39_63_y', '39_64_x',
'39_64_y', '39_65_x', '39_65_y', '39_66_x', '39_66_y', '39_67_x', '39_67_y', '3_10_x', '3_10_y',
'3_11_x', '3_11_y', '3_12_x', '3_12_y', '3_13_x', '3_13_y', '3_14_x', '3_14_y', '3_15_x', '3_15_y',
'3_16_x', '3_16_y', '3_17_x', '3_17_y', '3_18_x', '3_18_y', '3_19_x', '3_19_y', '3_20_x', '3_20_y',
'3_21_x', '3_21_y', '3_22_x', '3_22_y', '3_23_x', '3_23_y', '3_24_x', '3_24_y', '3_25_x', '3_25_y',
'3_26_x', '3_26_y', '3_27_x', '3_27_y', '3_28_x', '3_28_y', '3_29_x', '3_29_y', '3_30_x', '3_30_y',
'3_31_x', '3_31_y', '3_32_x', '3_32_y', '3_33_x', '3_33_y', '3_34_x', '3_34_y', '3_35_x', '3_35_y',
'3_36_x', '3_36_y', '3_37_x', '3_37_y', '3_38_x', '3_38_y', '3_39_x', '3_39_y', '3_40_x', '3_40_y',
'3_41_x', '3_41_y', '3_42_x', '3_42_y', '3_43_x', '3_43_y', '3_44_x', '3_44_y', '3_45_x', '3_45_y',
'3_46_x', '3_46_y', '3_47_x', '3_47_y', '3_48_x', '3_48_y', '3_49_x', '3_49_y', '3_4_x', '3_4_y',
'3_50_x', '3_50_y', '3_51_x', '3_51_y', '3_52_x', '3_52_y', '3_53_x', '3_53_y', '3_54_x', '3_54_y',
'3_55_x', '3_55_y', '3_56_x', '3_56_y', '3_57_x', '3_57_y', '3_58_x', '3_58_y', '3_59_x', '3_59_y',
'3_5_x', '3_5_y', '3_60_x', '3_60_y', '3_61_x', '3_61_y', '3_62_x', '3_62_y', '3_63_x', '3_63_y',
'3_64_x', '3_64_y', '3_65_x', '3_65_y', '3_66_x', '3_66_y', '3_67_x', '3_67_y', '3_6_x', '3_6_y',
'3_7_x', '3_7_y', '3_8_x', '3_8_y', '3_9_x', '3_9_y', '40_41_x', '40_41_y', '40_42_x', '40_42_y',
'40_43_x', '40_43_y', '40_44_x', '40_44_y', '40_45_x', '40_45_y', '40_46_x', '40_46_y', '40_47_x',
'40_47_y', '40_48_x', '40_48_y', '40_49_x', '40_49_y', '40_50_x', '40_50_y', '40_51_x', '40_51_y',
'40_52_x', '40_52_y', '40_53_x', '40_53_y', '40_54_x', '40_54_y', '40_55_x', '40_55_y', '40_56_x',
'40_56_y', '40_57_x', '40_57_y', '40_58_x', '40_58_y', '40_59_x', '40_59_y', '40_60_x', '40_60_y',
'40_61_x', '40_61_y', '40_62_x', '40_62_y', '40_63_x', '40_63_y', '40_64_x', '40_64_y', '40_65_x',
'40_65_y', '40_66_x', '40_66_y', '40_67_x', '40_67_y', '41_42_x', '41_42_y', '41_43_x', '41_43_y',
'41_44_x', '41_44_y', '41_45_x', '41_45_y', '41_46_x', '41_46_y', '41_47_x', '41_47_y', '41_48_x',
'41_48_y', '41_49_x', '41_49_y', '41_50_x', '41_50_y', '41_51_x', '41_51_y', '41_52_x', '41_52_y',
'41_53_x', '41_53_y', '41_54_x', '41_54_y', '41_55_x', '41_55_y', '41_56_x', '41_56_y', '41_57_x',
'41_57_y', '41_58_x', '41_58_y', '41_59_x', '41_59_y', '41_60_x', '41_60_y', '41_61_x', '41_61_y',
'41_62_x', '41_62_y', '41_63_x', '41_63_y', '41_64_x', '41_64_y', '41_65_x', '41_65_y', '41_66_x',
'41_66_y', '41_67_x', '41_67_y', '42_43_x', '42_43_y', '42_44_x', '42_44_y', '42_45_x', '42_45_y',
'42_46_x', '42_46_y', '42_47_x', '42_47_y', '42_48_x', '42_48_y', '42_49_x', '42_49_y', '42_50_x',
'42_50_y', '42_51_x', '42_51_y', '42_52_x', '42_52_y', '42_53_x', '42_53_y', '42_54_x', '42_54_y',
'42_55_x', '42_55_y', '42_56_x', '42_56_y', '42_57_x', '42_57_y', '42_58_x', '42_58_y', '42_59_x',
'42_59_y', '42_60_x', '42_60_y', '42_61_x', '42_61_y', '42_62_x', '42_62_y', '42_63_x', '42_63_y',
'42_64_x', '42_64_y', '42_65_x', '42_65_y', '42_66_x', '42_66_y', '42_67_x', '42_67_y', '43_44_x',
'43_44_y', '43_45_x', '43_45_y', '43_46_x', '43_46_y', '43_47_x', '43_47_y', '43_48_x', '43_48_y',
'43_49_x', '43_49_y', '43_50_x', '43_50_y', '43_51_x', '43_51_y', '43_52_x', '43_52_y', '43_53_x',
'43_53_y', '43_54_x', '43_54_y', '43_55_x', '43_55_y', '43_56_x', '43_56_y', '43_57_x', '43_57_y',
'43_58_x', '43_58_y', '43_59_x', '43_59_y', '43_60_x', '43_60_y', '43_61_x', '43_61_y', '43_62_x',
'43_62_y', '43_63_x', '43_63_y', '43_64_x', '43_64_y', '43_65_x', '43_65_y', '43_66_x', '43_66_y',
'43_67_x', '43_67_y', '44_45_x', '44_45_y', '44_46_x', '44_46_y', '44_47_x', '44_47_y', '44_48_x',
'44_48_y', '44_49_x', '44_49_y', '44_50_x', '44_50_y', '44_51_x', '44_51_y', '44_52_x', '44_52_y',
'44_53_x', '44_53_y', '44_54_x', '44_54_y', '44_55_x', '44_55_y', '44_56_x', '44_56_y', '44_57_x',
'44_57_y', '44_58_x', '44_58_y', '44_59_x', '44_59_y', '44_60_x', '44_60_y', '44_61_x', '44_61_y',
'44_62_x', '44_62_y', '44_63_x', '44_63_y', '44_64_x', '44_64_y', '44_65_x', '44_65_y', '44_66_x',
'44_66_y', '44_67_x', '44_67_y', '45_46_x', '45_46_y', '45_47_x', '45_47_y', '45_48_x', '45_48_y',
'45_49_x', '45_49_y', '45_50_x', '45_50_y', '45_51_x', '45_51_y', '45_52_x', '45_52_y', '45_53_x',
'45_53_y', '45_54_x', '45_54_y', '45_55_x', '45_55_y', '45_56_x', '45_56_y', '45_57_x', '45_57_y',
'45_58_x', '45_58_y', '45_59_x', '45_59_y', '45_60_x', '45_60_y', '45_61_x', '45_61_y', '45_62_x',
'45_62_y', '45_63_x', '45_63_y', '45_64_x', '45_64_y', '45_65_x', '45_65_y', '45_66_x', '45_66_y',
'45_67_x', '45_67_y', '46_47_x', '46_47_y', '46_48_x', '46_48_y', '46_49_x', '46_49_y', '46_50_x',
'46_50_y', '46_51_x', '46_51_y', '46_52_x', '46_52_y', '46_53_x', '46_53_y', '46_54_x', '46_54_y',
'46_55_x', '46_55_y', '46_56_x', '46_56_y', '46_57_x', '46_57_y', '46_58_x', '46_58_y', '46_59_x',
'46_59_y', '46_60_x', '46_60_y', '46_61_x', '46_61_y', '46_62_x', '46_62_y', '46_63_x', '46_63_y',
'46_64_x', '46_64_y', '46_65_x', '46_65_y', '46_66_x', '46_66_y', '46_67_x', '46_67_y', '47_48_x',
'47_48_y', '47_49_x', '47_49_y', '47_50_x', '47_50_y', '47_51_x', '47_51_y', '47_52_x', '47_52_y',
'47_53_x', '47_53_y', '47_54_x', '47_54_y', '47_55_x', '47_55_y', '47_56_x', '47_56_y', '47_57_x',
'47_57_y', '47_58_x', '47_58_y', '47_59_x', '47_59_y', '47_60_x', '47_60_y', '47_61_x', '47_61_y',
'47_62_x', '47_62_y', '47_63_x', '47_63_y', '47_64_x', '47_64_y', '47_65_x', '47_65_y', '47_66_x',
'47_66_y', '47_67_x', '47_67_y', '48_49_x', '48_49_y', '48_50_x', '48_50_y', '48_51_x', '48_51_y',
'48_52_x', '48_52_y', '48_53_x', '48_53_y', '48_54_x', '48_54_y', '48_55_x', '48_55_y', '48_56_x',
'48_56_y', '48_57_x', '48_57_y', '48_58_x', '48_58_y', '48_59_x', '48_59_y', '48_60_x', '48_60_y',
'48_61_x', '48_61_y', '48_62_x', '48_62_y', '48_63_x', '48_63_y', '48_64_x', '48_64_y', '48_65_x',
'48_65_y', '48_66_x', '48_66_y', '48_67_x', '48_67_y', '49_50_x', '49_50_y', '49_51_x', '49_51_y',
'49_52_x', '49_52_y', '49_53_x', '49_53_y', '49_54_x', '49_54_y', '49_55_x', '49_55_y', '49_56_x',
'49_56_y', '49_57_x', '49_57_y', '49_58_x', '49_58_y', '49_59_x', '49_59_y', '49_60_x', '49_60_y',
'49_61_x', '49_61_y', '49_62_x', '49_62_y', '49_63_x', '49_63_y', '49_64_x', '49_64_y', '49_65_x',
'49_65_y', '49_66_x', '49_66_y', '49_67_x', '49_67_y', '4_10_x', '4_10_y', '4_11_x', '4_11_y', '4_12_x',
'4_12_y', '4_13_x', '4_13_y', '4_14_x', '4_14_y', '4_15_x', '4_15_y', '4_16_x', '4_16_y', '4_17_x',
'4_17_y', '4_18_x', '4_18_y', '4_19_x', '4_19_y', '4_20_x', '4_20_y', '4_21_x', '4_21_y', '4_22_x',
'4_22_y', '4_23_x', '4_23_y', '4_24_x', '4_24_y', '4_25_x', '4_25_y', '4_26_x', '4_26_y', '4_27_x',
'4_27_y', '4_28_x', '4_28_y', '4_29_x', '4_29_y', '4_30_x', '4_30_y', '4_31_x', '4_31_y', '4_32_x',
'4_32_y', '4_33_x', '4_33_y', '4_34_x', '4_34_y', '4_35_x', '4_35_y', '4_36_x', '4_36_y', '4_37_x',
'4_37_y', '4_38_x', '4_38_y', '4_39_x', '4_39_y', '4_40_x', '4_40_y', '4_41_x', '4_41_y', '4_42_x',
'4_42_y', '4_43_x', '4_43_y', '4_44_x', '4_44_y', '4_45_x', '4_45_y', '4_46_x', '4_46_y', '4_47_x',
'4_47_y', '4_48_x', '4_48_y', '4_49_x', '4_49_y', '4_50_x', '4_50_y', '4_51_x', '4_51_y', '4_52_x',
'4_52_y', '4_53_x', '4_53_y', '4_54_x', '4_54_y', '4_55_x', '4_55_y', '4_56_x', '4_56_y', '4_57_x',
'4_57_y', '4_58_x', '4_58_y', '4_59_x', '4_59_y', '4_5_x', '4_5_y', '4_60_x', '4_60_y', '4_61_x',
'4_61_y', '4_62_x', '4_62_y', '4_63_x', '4_63_y', '4_64_x', '4_64_y', '4_65_x', '4_65_y', '4_66_x',
'4_66_y', '4_67_x', '4_67_y', '4_6_x', '4_6_y', '4_7_x', '4_7_y', '4_8_x', '4_8_y', '4_9_x', '4_9_y',
'50_51_x', '50_51_y', '50_52_x', '50_52_y', '50_53_x', '50_53_y', '50_54_x', '50_54_y', '50_55_x',
'50_55_y', '50_56_x', '50_56_y', '50_57_x', '50_57_y', '50_58_x', '50_58_y', '50_59_x', '50_59_y',
'50_60_x', '50_60_y', '50_61_x', '50_61_y', '50_62_x', '50_62_y', '50_63_x', '50_63_y', '50_64_x',
'50_64_y', '50_65_x', '50_65_y', '50_66_x', '50_66_y', '50_67_x', '50_67_y', '51_52_x', '51_52_y',
'51_53_x', '51_53_y', '51_54_x', '51_54_y', '51_55_x', '51_55_y', '51_56_x', '51_56_y', '51_57_x',
'51_57_y', '51_58_x', '51_58_y', '51_59_x', '51_59_y', '51_60_x', '51_60_y', '51_61_x', '51_61_y',
'51_62_x', '51_62_y', '51_63_x', '51_63_y', '51_64_x', '51_64_y', '51_65_x', '51_65_y', '51_66_x',
'51_66_y', '51_67_x', '51_67_y', '52_53_x', '52_53_y', '52_54_x', '52_54_y', '52_55_x', '52_55_y',
'52_56_x', '52_56_y', '52_57_x', '52_57_y', '52_58_x', '52_58_y', '52_59_x', '52_59_y', '52_60_x',
'52_60_y', '52_61_x', '52_61_y', '52_62_x', '52_62_y', '52_63_x', '52_63_y', '52_64_x', '52_64_y',
'52_65_x', '52_65_y', '52_66_x', '52_66_y', '52_67_x', '52_67_y', '53_54_x', '53_54_y', '53_55_x',
'53_55_y', '53_56_x', '53_56_y', '53_57_x', '53_57_y', '53_58_x', '53_58_y', '53_59_x', '53_59_y',
'53_60_x', '53_60_y', '53_61_x', '53_61_y', '53_62_x', '53_62_y', '53_63_x', '53_63_y', '53_64_x',
'53_64_y', '53_65_x', '53_65_y', '53_66_x', '53_66_y', '53_67_x', '53_67_y', '54_55_x', '54_55_y',
'54_56_x', '54_56_y', '54_57_x', '54_57_y', '54_58_x', '54_58_y', '54_59_x', '54_59_y', '54_60_x',
'54_60_y', '54_61_x', '54_61_y', '54_62_x', '54_62_y', '54_63_x', '54_63_y', '54_64_x', '54_64_y',
'54_65_x', '54_65_y', '54_66_x', '54_66_y', '54_67_x', '54_67_y', '55_56_x', '55_56_y', '55_57_x',
'55_57_y', '55_58_x', '55_58_y', '55_59_x', '55_59_y', '55_60_x', '55_60_y', '55_61_x', '55_61_y',
'55_62_x', '55_62_y', '55_63_x', '55_63_y', '55_64_x', '55_64_y', '55_65_x', '55_65_y', '55_66_x',
'55_66_y', '55_67_x', '55_67_y', '56_57_x', '56_57_y', '56_58_x', '56_58_y', '56_59_x', '56_59_y',
'56_60_x', '56_60_y', '56_61_x', '56_61_y', '56_62_x', '56_62_y', '56_63_x', '56_63_y', '56_64_x',
'56_64_y', '56_65_x', '56_65_y', '56_66_x', '56_66_y', '56_67_x', '56_67_y', '57_58_x', '57_58_y',
'57_59_x', '57_59_y', '57_60_x', '57_60_y', '57_61_x', '57_61_y', '57_62_x', '57_62_y', '57_63_x',
'57_63_y', '57_64_x', '57_64_y', '57_65_x', '57_65_y', '57_66_x', '57_66_y', '57_67_x', '57_67_y',
'58_59_x', '58_59_y', '58_60_x', '58_60_y', '58_61_x', '58_61_y', '58_62_x', '58_62_y', '58_63_x',
'58_63_y', '58_64_x', '58_64_y', '58_65_x', '58_65_y', '58_66_x', '58_66_y', '58_67_x', '58_67_y',
'59_60_x', '59_60_y', '59_61_x', '59_61_y', '59_62_x', '59_62_y', '59_63_x', '59_63_y', '59_64_x',
'59_64_y', '59_65_x', '59_65_y', '59_66_x', '59_66_y', '59_67_x', '59_67_y', '5_10_x', '5_10_y',
'5_11_x', '5_11_y', '5_12_x', '5_12_y', '5_13_x', '5_13_y', '5_14_x', '5_14_y', '5_15_x', '5_15_y',
'5_16_x', '5_16_y', '5_17_x', '5_17_y', '5_18_x', '5_18_y', '5_19_x', '5_19_y', '5_20_x', '5_20_y',
'5_21_x', '5_21_y', '5_22_x', '5_22_y', '5_23_x', '5_23_y', '5_24_x', '5_24_y', '5_25_x', '5_25_y',
'5_26_x', '5_26_y', '5_27_x', '5_27_y', '5_28_x', '5_28_y', '5_29_x', '5_29_y', '5_30_x', '5_30_y',
'5_31_x', '5_31_y', '5_32_x', '5_32_y', '5_33_x', '5_33_y', '5_34_x', '5_34_y', '5_35_x', '5_35_y',
'5_36_x', '5_36_y', '5_37_x', '5_37_y', '5_38_x', '5_38_y', '5_39_x', '5_39_y', '5_40_x', '5_40_y',
'5_41_x', '5_41_y', '5_42_x', '5_42_y', '5_43_x', '5_43_y', '5_44_x', '5_44_y', '5_45_x', '5_45_y',
'5_46_x', '5_46_y', '5_47_x', '5_47_y', '5_48_x', '5_48_y', '5_49_x', '5_49_y', '5_50_x', '5_50_y',
'5_51_x', '5_51_y', '5_52_x', '5_52_y', '5_53_x', '5_53_y', '5_54_x', '5_54_y', '5_55_x', '5_55_y',
'5_56_x', '5_56_y', '5_57_x', '5_57_y', '5_58_x', '5_58_y', '5_59_x', '5_59_y', '5_60_x', '5_60_y',
'5_61_x', '5_61_y', '5_62_x', '5_62_y', '5_63_x', '5_63_y', '5_64_x', '5_64_y', '5_65_x', '5_65_y',
'5_66_x', '5_66_y', '5_67_x', '5_67_y', '5_6_x', '5_6_y', '5_7_x', '5_7_y', '5_8_x', '5_8_y', '5_9_x',
'5_9_y', '60_61_x', '60_61_y', '60_62_x', '60_62_y', '60_63_x', '60_63_y', '60_64_x', '60_64_y',
'60_65_x', '60_65_y', '60_66_x', '60_66_y', '60_67_x', '60_67_y', '61_62_x', '61_62_y', '61_63_x',
'61_63_y', '61_64_x', '61_64_y', '61_65_x', '61_65_y', '61_66_x', '61_66_y', '61_67_x', '61_67_y',
'62_63_x', '62_63_y', '62_64_x', '62_64_y', '62_65_x', '62_65_y', '62_66_x', '62_66_y', '62_67_x',
'62_67_y', '63_64_x', '63_64_y', '63_65_x', '63_65_y', '63_66_x', '63_66_y', '63_67_x', '63_67_y',
'64_65_x', '64_65_y', '64_66_x', '64_66_y', '64_67_x', '64_67_y', '65_66_x', '65_66_y', '65_67_x',
'65_67_y', '66_67_x', '66_67_y', '6_10_x', '6_10_y', '6_11_x', '6_11_y', '6_12_x', '6_12_y', '6_13_x',
'6_13_y', '6_14_x', '6_14_y', '6_15_x', '6_15_y', '6_16_x', '6_16_y', '6_17_x', '6_17_y', '6_18_x',
'6_18_y', '6_19_x', '6_19_y', '6_20_x', '6_20_y', '6_21_x', '6_21_y', '6_22_x', '6_22_y', '6_23_x',
'6_23_y', '6_24_x', '6_24_y', '6_25_x', '6_25_y', '6_26_x', '6_26_y', '6_27_x', '6_27_y', '6_28_x',
'6_28_y', '6_29_x', '6_29_y', '6_30_x', '6_30_y', '6_31_x', '6_31_y', '6_32_x', '6_32_y', '6_33_x',
'6_33_y', '6_34_x', '6_34_y', '6_35_x', '6_35_y', '6_36_x', '6_36_y', '6_37_x', '6_37_y', '6_38_x',
'6_38_y', '6_39_x', '6_39_y', '6_40_x', '6_40_y', '6_41_x', '6_41_y', '6_42_x', '6_42_y', '6_43_x',
'6_43_y', '6_44_x', '6_44_y', '6_45_x', '6_45_y', '6_46_x', '6_46_y', '6_47_x', '6_47_y', '6_48_x',
'6_48_y', '6_49_x', '6_49_y', '6_50_x', '6_50_y', '6_51_x', '6_51_y', '6_52_x', '6_52_y', '6_53_x',
'6_53_y', '6_54_x', '6_54_y', '6_55_x', '6_55_y', '6_56_x', '6_56_y', '6_57_x', '6_57_y', '6_58_x',
'6_58_y', '6_59_x', '6_59_y', '6_60_x', '6_60_y', '6_61_x', '6_61_y', '6_62_x', '6_62_y', '6_63_x',
'6_63_y', '6_64_x', '6_64_y', '6_65_x', '6_65_y', '6_66_x', '6_66_y', '6_67_x', '6_67_y', '6_7_x',
'6_7_y', '6_8_x', '6_8_y', '6_9_x', '6_9_y', '7_10_x', '7_10_y', '7_11_x', '7_11_y', '7_12_x', '7_12_y',
'7_13_x', '7_13_y', '7_14_x', '7_14_y', '7_15_x', '7_15_y', '7_16_x', '7_16_y', '7_17_x', '7_17_y',
'7_18_x', '7_18_y', '7_19_x', '7_19_y', '7_20_x', '7_20_y', '7_21_x', '7_21_y', '7_22_x', '7_22_y',
'7_23_x', '7_23_y', '7_24_x', '7_24_y', '7_25_x', '7_25_y', '7_26_x', '7_26_y', '7_27_x', '7_27_y',
'7_28_x', '7_28_y', '7_29_x', '7_29_y', '7_30_x', '7_30_y', '7_31_x', '7_31_y', '7_32_x', '7_32_y',
'7_33_x', '7_33_y', '7_34_x', '7_34_y', '7_35_x', '7_35_y', '7_36_x', '7_36_y', '7_37_x', '7_37_y',
'7_38_x', '7_38_y', '7_39_x', '7_39_y', '7_40_x', '7_40_y', '7_41_x', '7_41_y', '7_42_x', '7_42_y',
'7_43_x', '7_43_y', '7_44_x', '7_44_y', '7_45_x', '7_45_y', '7_46_x', '7_46_y', '7_47_x', '7_47_y',
'7_48_x', '7_48_y', '7_49_x', '7_49_y', '7_50_x', '7_50_y', '7_51_x', '7_51_y', '7_52_x', '7_52_y',
'7_53_x', '7_53_y', '7_54_x', '7_54_y', '7_55_x', '7_55_y', '7_56_x', '7_56_y', '7_57_x', '7_57_y',
'7_58_x', '7_58_y', '7_59_x', '7_59_y', '7_60_x', '7_60_y', '7_61_x', '7_61_y', '7_62_x', '7_62_y',
'7_63_x', '7_63_y', '7_64_x', '7_64_y', '7_65_x', '7_65_y', '7_66_x', '7_66_y', '7_67_x', '7_67_y',
'7_8_x', '7_8_y', '7_9_x', '7_9_y', '8_10_x', '8_10_y', '8_11_x', '8_11_y', '8_12_x', '8_12_y', '8_13_x',
'8_13_y', '8_14_x', '8_14_y', '8_15_x', '8_15_y', '8_16_x', '8_16_y', '8_17_x', '8_17_y', '8_18_x',
'8_18_y', '8_19_x', '8_19_y', '8_20_x', '8_20_y', '8_21_x', '8_21_y', '8_22_x', '8_22_y', '8_23_x',
'8_23_y', '8_24_x', '8_24_y', '8_25_x', '8_25_y', '8_26_x', '8_26_y', '8_27_x', '8_27_y', '8_28_x',
'8_28_y', '8_29_x', '8_29_y', '8_30_x', '8_30_y', '8_31_x', '8_31_y', '8_32_x', '8_32_y', '8_33_x',
'8_33_y', '8_34_x', '8_34_y', '8_35_x', '8_35_y', '8_36_x', '8_36_y', '8_37_x', '8_37_y', '8_38_x',
'8_38_y', '8_39_x', '8_39_y', '8_40_x', '8_40_y', '8_41_x', '8_41_y', '8_42_x', '8_42_y', '8_43_x',
'8_43_y', '8_44_x', '8_44_y', '8_45_x', '8_45_y', '8_46_x', '8_46_y', '8_47_x', '8_47_y', '8_48_x',
'8_48_y', '8_49_x', '8_49_y', '8_50_x', '8_50_y', '8_51_x', '8_51_y', '8_52_x', '8_52_y', '8_53_x',
'8_53_y', '8_54_x', '8_54_y', '8_55_x', '8_55_y', '8_56_x', '8_56_y', '8_57_x', '8_57_y', '8_58_x',
'8_58_y', '8_59_x', '8_59_y', '8_60_x', '8_60_y', '8_61_x', '8_61_y', '8_62_x', '8_62_y', '8_63_x',
'8_63_y', '8_64_x', '8_64_y', '8_65_x', '8_65_y', '8_66_x', '8_66_y', '8_67_x', '8_67_y', '8_9_x',
'8_9_y', '9_10_x', '9_10_y', '9_11_x', '9_11_y', '9_12_x', '9_12_y', '9_13_x', '9_13_y', '9_14_x',
'9_14_y', '9_15_x', '9_15_y', '9_16_x', '9_16_y', '9_17_x', '9_17_y', '9_18_x', '9_18_y', '9_19_x',
'9_19_y', '9_20_x', '9_20_y', '9_21_x', '9_21_y', '9_22_x', '9_22_y', '9_23_x', '9_23_y', '9_24_x',
'9_24_y', '9_25_x', '9_25_y', '9_26_x', '9_26_y', '9_27_x', '9_27_y', '9_28_x', '9_28_y', '9_29_x',
'9_29_y', '9_30_x', '9_30_y', '9_31_x', '9_31_y', '9_32_x', '9_32_y', '9_33_x', '9_33_y', '9_34_x',
'9_34_y', '9_35_x', '9_35_y', '9_36_x', '9_36_y', '9_37_x', '9_37_y', '9_38_x', '9_38_y', '9_39_x',
'9_39_y', '9_40_x', '9_40_y', '9_41_x', '9_41_y', '9_42_x', '9_42_y', '9_43_x', '9_43_y', '9_44_x',
'9_44_y', '9_45_x', '9_45_y', '9_46_x', '9_46_y', '9_47_x', '9_47_y', '9_48_x', '9_48_y', '9_49_x',
'9_49_y', '9_50_x', '9_50_y', '9_51_x', '9_51_y', '9_52_x', '9_52_y', '9_53_x', '9_53_y', '9_54_x',
'9_54_y', '9_55_x', '9_55_y', '9_56_x', '9_56_y', '9_57_x', '9_57_y', '9_58_x', '9_58_y', '9_59_x',
'9_59_y', '9_60_x', '9_60_y', '9_61_x', '9_61_y', '9_62_x', '9_62_y', '9_63_x', '9_63_y', '9_64_x',
'9_64_y', '9_65_x', '9_65_y', '9_66_x', '9_66_y', '9_67_x', '9_67_y', 'skin_0', 'skin_1', 'skin_10',
'skin_11', 'skin_12', 'skin_13', 'skin_14', 'skin_15', 'skin_16', 'skin_17', 'skin_18', 'skin_19',
'skin_2', 'skin_20', 'skin_21', 'skin_22', 'skin_23', 'skin_24', 'skin_25', 'skin_3', 'skin_4', 'skin_5',
'skin_6', 'skin_7', 'skin_8', 'skin_9']
beauty_data = beauty_data[select_cols]
print(beauty_data.shape)
beauty_data.drop(['Image'], axis=1, inplace=True)
X_train, X_test, y_train, y_test = train_test_split(beauty_data.drop('label', axis=1), beauty_data['label'],
train_size=0.75, test_size=0.25)
# tpot = TPOTRegressor(scoring='r2', n_jobs=-1, early_stop=5, verbosity=2)
# tpot.fit(X_train, y_train)
# print(tpot.score(X_test, y_test))
# tpot.export('../model/tpot_beauty_pipeline.py')
atm = ATM()
atm.run()
|
nilq/baby-python
|
python
|
from .descriptor import DescriptorType
from .object_type import ObjectDict
__all__ = ["ObjectDict", "DescriptorType"]
|
nilq/baby-python
|
python
|
from typing import List
class Solution:
# 141, 逆波兰表达式求值, Medium
def evalRPN(self, tokens: List[str]) -> int:
stack = []
for token in tokens:
if token == "+":
oprand2 = stack.pop()
oprand1 = stack.pop()
stack.append(oprand1 + oprand2)
elif token == "*":
oprand2 = stack.pop()
oprand1 = stack.pop()
stack.append(oprand1 * oprand2)
elif token == "/":
oprand2 = stack.pop()
oprand1 = stack.pop()
stack.append(int(oprand1 / oprand2))
elif token == "-":
oprand2 = stack.pop()
oprand1 = stack.pop()
stack.append(oprand1 - oprand2)
else:
stack.append(int(token))
# print(stack)
return stack[0]
def main():
s = Solution()
print(s.evalRPN(tokens = ["10","6","9","3","+","-11","*","/","*","17","+","5","+"]))
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
# we are here to get weighted-5-node-subgraph,
# given edge-count/bi-edge-count/strong-tie-count/weak-tie-count only to distinguish
# to leverage on weighte 4 node subgraphs(edge_count, biedge_count, strong_count, weak_count, subNo)
# next to share not edge
import re,sys,random, os
def subg(edges,s4,lf,sf,sfv,ror):
# subgraph counts, range from 1 to 13 for 3-node subgraph
#subgraph = {}
distinct = {}
ec = 0
#ec_ij = 0 # edge count within subgraph so as to cluster subgraph
bc = 0
#bc_ij = 0 # double directed edges count
sc = 0
wc = 0
unit = {} # key as subgraph, and value as features of subgraph
fcount = {} # vetex count in the front seat
#fcount_ij = {}
bcount = {} # vetex count in the back seat
#bcount_ij = {}
bi_boolean = False # for bi-edge check
pair = []
temp = []
third = 0
key = ''
#rpair = ''
seq = [] # for every subgraph to store all connections
#seq_ij = [] # for i,j related conenctions only
# sort to begin
edges.sort()
unit_seq = {}
unit_seq_sorted = {}
# motif detection take place in edges and e_copy
pair = []
key = ''
temp = []
nodes = []
features = []
new = ''
sub_can = []
for sub in s4: # strong edge count, weak edge count, and sub-no, giving #13 at most
#lf.write('')
lf.write('We are considering 4 node subgraph: <'+sub+'>\n')
for c in edges:
nodes = re.split(',',re.split(':',sub)[0])
features = re.split(',',re.split(':',sub)[1])
sub_can = re.split('\'',re.split(':',sub)[2][1:-1])
lf.write('\n')
lf.write('============= New Inner Loop ==============\n')
lf.write('Innner connection is <'+c+'>\n')
key = ''
temp = re.split(',',c)
if temp[0] in nodes and temp[1] in nodes or \
temp[0] not in nodes and temp[1] not in nodes:
continue
if temp[0] not in nodes:
new = temp[0]
nodes.append(new)
else:
new = temp[1]
nodes.append(new)
nodes.sort()
key = nodes[0]+','+nodes[1]+','+nodes[2]+','+nodes[3]+','+nodes[4]
if key in unit.keys():
continue
# to see new to other's connection
ec = int(features[0])
bc = int(features[1])
sc = int(features[2])
wc = int(features[3])
#fcount = {}
#bcount = {}
bi_boolean = False
seq = []
### Get edges from this 4-node-subgraph ###
for x in sub_can:
if len(x)<4:
continue
seq.append(x)
##### new subgraph found #####
lf.write('We found a new 5_node_subgraph: <'+key+'>\n')
lf.write('@@@@@seq for now we have:@@@@@\n')
for s in seq:
lf.write(s+'\n')
# we got a subgraph now, they are e and c
# next, get detail relationships across e and c
k = ''
for n in nodes:
if n!=new:
bi_boolean = False
k = n+','+new+',20'
if k in edges:
ec += 1
sc += 1
bi_boolean = True
seq.append(k)
#
# if p in fcount.keys():
# fcount[p] = fcount[p] + 1
# else:
# fcount[p] = 1
# if third in bcount.keys():
# bcount[third] = bcount[third] + 1
# else:
# bcount[third] = 1
k = n+','+new+',10'
if k in edges:
ec = ec + 1
wc = wc + 1
bi_boolean = True
seq.append(k)
k = new+','+n+',20'
if k in edges:
seq.append(k)
ec = ec + 1
sc = sc + 1
if bi_boolean == True:
bc = bc + 1
bi_boolean = False
k = new+','+n+',10'
if k in edges:
seq.append(k)
ec = ec + 1
sc = sc + 1
if bi_boolean == True:
bc = bc + 1
bi_boolean = False
#lf.write('@@@@@@@@@@Here comes sequences of connections\n')
#for s in seq:
# lf.write(s+'\n')
#lf.write('\n')
# check if ec overflow
if ec>20:
print 'ERROR: ec overflowed!!!'+str(ec)
lf.write('ERROR: ec overflowed!!!'+str(ec)+'\n')
# subgraph is about to complete
# value of unit:
unit[key]=str(ec)+','+str(bc)+','+str(sc)+','+str(wc)
unit_seq[key]=seq
# in order to only maitain single edge, we put it into (small edge no, larger edge no) form
temp = []
for s in seq:
ss = s[:-3] # get rid of ',20' or ',10'
first = re.split(',',ss)[0]
second = re.split(',',ss)[1]
new = second+','+first
if first>second and new not in temp:# in alphabet order, not int order
temp.append(new)
elif ss not in temp:
temp.append(ss)
unit_seq_sorted[key]=temp
temp = []
lf.write('We appending following binary direction-free edges to unit_seq_sorted as :\n')
lf.write(str(unit_seq_sorted[key])+'\n')
#double loop ended
#print unit
lf.write('\n')
lf.write('*********Here comes subgraphs************\n')
c = 0
for k,v in unit.iteritems():
lf.write(k+':'+v+'\n')
sfv.write(k+':'+v+':')
sfv.write(str(unit_seq[k])+'\n')
c = c+1
lf.write('\n' + str(c) + ' subgraph count has been written into file.\n')
# Here we are about to count the distinct number of every single subgraph
pp = []
ing_weighted = {} # put ec,bc,sc,wc,sub_no as distinct feature
ing = {} #only put sub_no as distinct feature
# compute weighted_subgraphs
# IT IS the true weighted_subgraphs
weighted_subgraph = {}
binary_subgraph = {}
distinct_weighted = {}
for k, v in unit.iteritems():
pp = re.split(',',k)
lf.write('We consider key: '+str(k)+'\n')
if v not in weighted_subgraph.keys():
weighted_subgraph[v] = 1
else:
weighted_subgraph[v] = weighted_subgraph[v] + 1
if v not in ing_weighted.keys():
distinct_weighted[v] = 1
ing_weighted[v]=[]
for edge in unit_seq[k]:
ing_weighted[v].append(edge)
else:
temp_temp = []
for edge in unit_seq[k]:
if edge not in ing_weighted[v]:
temp_temp.append(edge)
if len(temp_temp)==len(unit_seq[k]):
distinct_weighted[v] += 1
for edge in temp_temp:
ing_weighted[v].append(edge)
vv = int(re.split(',',v)[-1])
# vv is the exact binary 3 node subgraph no
if vv not in binary_subgraph.keys():
binary_subgraph[vv] = 1
else:
binary_subgraph[vv] = binary_subgraph[vv] + 1
if vv not in ing.keys():
distinct[vv] = 1
ing[vv]=[]
for edge in unit_seq_sorted[k]:
ing[vv].append(edge)
else:
temp_temp = []
for edge in unit_seq_sorted[k]:
if edge not in ing[vv]:
temp_temp.append(edge)
if len(temp_temp)==len(unit_seq_sorted[k]):
distinct[vv] += 1
for edge in unit_seq_sorted[k]:
ing[vv].append(edge)
lf.write('Newly appended a distinct subgraph: \n')
# to count the number of ditinct subgraphs
# it may not that precise cauz <sc,wc,13_sub> is short for weighted_3_subgraph
#lf.write('Here comes the distinct version of subgraphs.\n')
#for k,v in ing.iteritems():
# lf.write(str(k)+':'+str(len(v)/4)+'\n')
# distinct[k] = len(v)/4
#print subgraph
#sf.write('\n')
sf.write('Subgraph to be: \n')
if ror==0:# it is real network
# write subgraph.txt with distinct no in
for k,v in weighted_subgraph.iteritems():
vv = int(re.split(',',k)[-1])
#print vv
# weighted subgraph: weighted appearance, distinct count on binary subgraph (1~13): raw appearance, distinct count on weighted subgraph
sf.write(str(k)+':'+str(v)+','+str(distinct_weighted[k])+':'+str(binary_subgraph[vv])+','+str(distinct[vv])+'\n')
else:
for k,v in weighted_subgraph.iteritems():
sf.write(str(k)+':'+str(v)+'\n')
def main(argv):
# to store edges from file
edges = []
subgraph_of_4_node = []
subgraph_file_verbose =''
pair = []
rf = ''
f = ''
log_file = ''
#log_file_random = ''
subgraph_file = ''
line = ''
sub4 = ''
for parents,dirnames,filenames in os.walk(argv[1]):
for fn in filenames:
if '_edges.txt' in fn:
print '------Begin Processing '+fn+'--------'
edges = []
subgraph_of_4_node = []
line = ''
rf = open(argv[1]+'/'+fn,'r')
sub4 = open(argv[1]+'/'+fn[:fn.index('_')]+'_weighted_4_node_subgraph_verbose.txt','r')
f = open(argv[1]+'/'+fn[:fn.index('_')]+'_weighted_5_node_simple.txt','w+')
log_file = open(argv[1]+'/'+fn[:fn.index('_')]+'_weighted_5_node_log.txt','w')
#log_file_random = open(argv[1]+'/'+fn[:str(fn).index('_')]+'_weighted_4_node_random_log.txt','w')
subgraph_file = open(argv[1]+'/'+fn[:str(fn).index('_')]+'_weighted_5_node_subgraph.txt','w+')
subgraph_file_verbose = open(argv[1]+'/'+fn[:str(fn).index('_')]+'_weighted_5_node_subgraph_verbose.txt','w+')
#get 3_node_subgraphs
for line in sub4:
subgraph_of_4_node.append(line.strip())
for line in rf:
if('Source' in line):
continue
line = line[:line.index(',D')]+line[line.index('d,')+1:] # weight is considered here
f.write(line)
#into set
f.seek(0)
for line in f:
pair = re.split(',',line[:-1])
key = pair[0]+','+pair[1]+','+pair[2]
edges.append(key)
edges.sort()
# call to find subgraphs
# for original network
subg(edges,subgraph_of_4_node, log_file,subgraph_file,subgraph_file_verbose,0)
# for randomized network
#for m in range(M):
# randomize(edges,single,double,log_file_random,subgraph_file,len(edges))# motif detection
print '======End Processing '+fn+'======'
if __name__ == '__main__':
main(sys.argv)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from distutils.core import setup
setup(name="Eng Phys Office Space Tools",
description="A set of scripts to work with the Eng Phys office space committee",
version="0.1dev",
author="Tim van Boxtel",
author_email="vanboxtj@mcmaster.ca",
py_modules=['parse-grad-students'],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT",
])
|
nilq/baby-python
|
python
|
import tweepy #https://github.com/tweepy/tweepy
import csv
import pandas as pd
# Used for progress bar
import time
import sys
#Twitter API credentials
consumer_key = "NBNgPGCBeGv80PcsYU3QWU94d"
consumer_secret = "lvAaoSInlF9mPonoMMldFq5ZE96oAAl30TLh6ynVwK2tauvOQC"
access_key = "1311728151265832961-AaFHXfZtozEgfoVZoFnNqzqRZEWQAr"
access_secret = "Az8Ezlg77NtkZ8coTqXGsXW2wVh7Wbpm3JTsAkoPsrq7z"
OAUTH_KEYS = {'consumer_key':consumer_key, 'consumer_secret':consumer_secret,
'access_token_key':access_key, 'access_token_secret':access_secret}
auth = tweepy.OAuthHandler(OAUTH_KEYS['consumer_key'], OAUTH_KEYS['consumer_secret'])
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
search = tweepy.Cursor(api.search, q='#Trump').items(4000)
# Create lists for each field desired from the tweets.
sn = []
text = []
timestamp =[]
for tweet in search:
#print tweet.user.screen_name, tweet.created_at, tweet.text
timestamp.append(tweet.created_at)
sn.append(tweet.user.screen_name)
text.append(tweet.text)
# Convert lists to dataframe
df = pd.DataFrame()
df['timestamp'] = timestamp
df['sn'] = sn
df['text'] = text
# Prepare ford date filtering. Adding an EST time column since chat hosted by people in that time zone.
df['timestamp'] = pd.to_datetime(df['timestamp'])
df['EST'] = df['timestamp'] - pd.Timedelta(hours=5) #Convert to EST
df['EST'] = pd.to_datetime(df['EST'])
# Subset for the dates required. Can select a specific date or time to examine.
import time
df = df[(pd.to_datetime("2015-12-14 20:00:00", format='%Y-%m-%d %H:%M:%S') < df['EST']) & (df['EST'] < pd.to_datetime("2015-12-14 21:00:00", format='%Y-%m-%d %H:%M:%S'))]
# Write out Tweets in case they are needed later.
df.to_csv('edtechtweets.csv',index = False,encoding='utf-8')
# Create a list of the unique usernames in order to see which users we need to retrieve friends for.
allNames = list(df['sn'].unique())
# Initialize dataframe of users that will hold the edge relationships
dfUsers = pd.DataFrame()
dfUsers['userFromName'] =[]
dfUsers['userFromId'] =[]
dfUsers['userToId'] = []
count = 0
nameCount = len(allNames)
# The choice to retrieve friends (who the user is following) rather than followers is intentional.
# Either would work. However, many Twitter users follow fewer users than are following them, especially the most popular accounts.
# This reduces the number of very large calls to Twitter API, which seemed to cause problems.
for name in allNames:
# Build list of friends
currentFriends = []
for page in tweepy.Cursor(api.friends_ids, screen_name=name).pages():
currentFriends.extend(page)
currentId = api.get_user(screen_name=name).id
currentId = [currentId] * len(currentFriends)
currentName = [name] * len(currentFriends)
dfTemp = pd.DataFrame()
dfTemp['userFromName'] = currentName
dfTemp['userFromId'] = currentId
dfTemp['userToId'] = currentFriends
dfUsers = pd.concat([dfUsers,dfTemp])
time.sleep(70) # avoids hitting Twitter rate limit
# Progress bar to track approximate progress
count +=1
per = round(count*100.0/nameCount,1)
sys.stdout.write("\rTwitter call %s%% complete." % per)
sys.stdout.flush()
# Again, to limit the number of calls to Twitter API, just do lookups on followers that connect to those in our user group.
# We are not interested in "friends" that are not part of this community.
fromId = dfUsers['userFromId'].unique()
dfChat = dfUsers[dfUsers['userToId'].apply(lambda x: x in fromId)]
# No more Twitter API lookups are necessary. Create a lookup table that we will use to get the verify the userToName
dfLookup = dfChat[['userFromName','userFromId']]
dfLookup = dfLookup.drop_duplicates()
dfLookup.columns = ['userToName','userToId']
dfCommunity = dfUsers.merge(dfLookup, on='userToId')
dfCommunity.to_csv('dfCommunity.csv',index = False,encoding='utf-8')
|
nilq/baby-python
|
python
|
#Pygments Tk Text from http://code.google.com/p/pygments-tk-text/
#Original Developer: Jonathon Eunice: jonathan.eunice@gmail.com
__author__ = 'Robert Cope'
__original__author__ = 'Jonathan Eunice'
|
nilq/baby-python
|
python
|
import random
def generatePassword(pwlength):
alphabet = "abcdefghijklmnopqrstuvwxyz"
passwords = []
for i in pwlength:
password = ""
for j in range(i):
next_letter_index = random.randrange(len(alphabet))
password = password + alphabet[next_letter_index]
password = replaceWithNumber(password)
password = replaceWithUppercaseLetter(password)
passwords.append(password)
return passwords
def replaceWithNumber(pword):
for i in range(random.randrange(1,3)):
replace_index = random.randrange(len(pword)//2)
pword = pword[0:replace_index] + str(random.randrange(10)) + pword[replace_index+1:]
return pword
def replaceWithUppercaseLetter(pword):
for i in range(random.randrange(1,3)):
replace_index = random.randrange(len(pword)//2,len(pword))
pword = pword[0:replace_index] + pword[replace_index].upper() + pword[replace_index+1:]
return pword
def main():
numPasswords = int(input("How many passwords do you want to generate? "))
print("Generating " +str(numPasswords)+" passwords")
passwordLengths = []
print("Minimum length of password should be 3")
for i in range(numPasswords):
length = int(input("Enter the length of Password #" + str(i+1) + " "))
if length<3:
length = 3
passwordLengths.append(length)
Password = generatePassword(passwordLengths)
for i in range(numPasswords):
print ("Password #"+str(i+1)+" = " + Password[i])
main()
#This program is created by Harsh Sharma
|
nilq/baby-python
|
python
|
GAME_SIZE = 4
SCORE_TO_WIN1 = 512
SCORE_TO_WIN2 = 1024
SCORE_TO_WIN3 = 2048
SCORE_TO_WIN0 = 256
from game2048.game import Game
from game2048.agents import ExpectiMaxAgent
# save the dataset
f1 = open("dataset_256_3.txt", "w")
f2 = open("dataset_512_3.txt", "w")
f3 = open("dataset_1024_3.txt", "w")
# for i in range(100):
# print("i = ", i)
# game = Game(size=GAME_SIZE,score_to_win=SCORE_TO_WIN1)
# agent = ExpectiMaxAgent(game=game)
# while True:
# direction = agent.step()
# if (game.end != 0):
# break
# # print (game.board)
# # print ("direction: ", direction)
# for i in range(4):
# for j in range(4):
# #f.write(game.board[i,j])
# print(game.board[i, j], file = f1)
# print(direction, file = f1)
# #f.write(direction)
# game.move(direction)
# #f.write('\n')
# for i in range(100):
# print("i = ", i)
# game = Game(size=GAME_SIZE,score_to_win=SCORE_TO_WIN2)
# agent = ExpectiMaxAgent(game=game)
# while True:
# direction = agent.step()
# if (game.end != 0):
# break
# # print (game.board)
# # print ("direction: ", direction)
# for i in range(4):
# for j in range(4):
# #f.write(game.board[i,j])
# print(game.board[i, j], file = f2)
# print(direction, file = f2)
# #f.write(direction)
# game.move(direction)
# #f.write('\n')
for i in range(300):
print("i = ", i)
game = Game(size=GAME_SIZE,score_to_win=SCORE_TO_WIN0)
agent = ExpectiMaxAgent(game=game)
while True:
direction = agent.step()
if (game.end != 0):
break
# print (game.board)
# print ("direction: ", direction)
if game.board.max() <256:
for i in range(4):
for j in range(4):
#f.write(game.board[i,j])
print(game.board[i, j], file = f1)
print(direction, file = f1)
elif game.board.max() <512:
for i in range(4):
for j in range(4):
#f.write(game.board[i,j])
print(game.board[i, j], file = f2)
print(direction, file = f2)
else:
for i in range(4):
for j in range(4):
#f.write(game.board[i,j])
print(game.board[i, j], file = f3)
print(direction, file = f3)
#f.write(direction)
game.move(direction)
#f.write('\n')
|
nilq/baby-python
|
python
|
"""
####################################################################################################
# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.
# Filename : embedding.py
# Abstract : torch.nn.Embedding function encapsulation.
# Current Version: 1.0.0
# Date : 2021-05-20
######################################################################################################
"""
from torch import nn
from mmcv.runner import load_checkpoint
from davarocr.davar_common.models.builder import EMBEDDING
from davarocr.davar_common.utils import get_root_logger
@EMBEDDING.register_module()
class Embedding(nn.Module):
""" Embedding layer. Raw implementation: nn.Embedding(vocab_size, embedding_dim)"""
def __init__(self,
vocab_size,
embedding_dim,
drop_out=0.):
"""
Args:
vocab_size (int): size of vocabulary.
embedding_dim (int): dim of input features
drop_out (float): drop_out ratio if required.
"""
super().__init__()
self.drop_out = drop_out
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.drop_out_layer = nn.Dropout(self.drop_out)
def init_weights(self, pretrained=None):
""" Weight initialization
Args:
pretrained (str, optional): Path to pre-trained weights. Defaults to None.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
logger.info("Embedding:")
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
return
else:
raise TypeError('pretrained must be a str or None')
def forward(self, input_feature):
""" Forward computation
Args:
input_feature (Tensor): in shape of [B x N x L]
Returns:
Tensor: in shape of [B x N x L x D], where D is the embedding_dim.
"""
embed_vector = self.embedding(input_feature)
embed_vector = self.drop_out_layer(embed_vector)
return embed_vector
|
nilq/baby-python
|
python
|
from tweepy import Stream
from stream_tweets import StockListener
import get_old_tweets
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
def getTweets(stock_name):
twitter_stream = Stream(get_old_tweets.auth, StockListener(stock_name))
twitter_stream.filter(track=[("$"+stock_name)])
logging.info("Stream for", stock_name, "is active...")
get_old_tweets.get_past_tweets(stock_name)
logging.info("Past tweets of", stock_name, "added to db...")
|
nilq/baby-python
|
python
|
from painter.config import NAME, PATH, SHELVES
from templates import app
# Painter base
PAINTER = app.App(NAME, PATH, SHELVES)
|
nilq/baby-python
|
python
|
"""Various lower-level functions to support the computation of steady states"""
import warnings
import numpy as np
import scipy.optimize as opt
from numbers import Real
from functools import partial
from ...utilities import misc, solvers
def instantiate_steady_state_mutable_kwargs(dissolve, block_kwargs, solver_kwargs, constrained_kwargs):
"""Instantiate mutable types from `None` default values in the steady_state function"""
if dissolve is None:
dissolve = []
if block_kwargs is None:
block_kwargs = {}
if solver_kwargs is None:
solver_kwargs = {}
if constrained_kwargs is None:
constrained_kwargs = {}
return dissolve, block_kwargs, solver_kwargs, constrained_kwargs
def provide_solver_default(unknowns):
if len(unknowns) == 1:
bounds = list(unknowns.values())[0]
if not isinstance(bounds, tuple) or bounds[0] > bounds[1]:
raise ValueError("Unable to find a compatible one-dimensional solver with provided `unknowns`.\n"
" Please provide valid lower/upper bounds, e.g. unknowns = {`a`: (0, 1)}")
else:
return "brentq"
elif len(unknowns) > 1:
init_values = list(unknowns.values())
if not np.all([isinstance(v, Real) for v in init_values]):
raise ValueError("Unable to find a compatible multi-dimensional solver with provided `unknowns`.\n"
" Please provide valid initial values, e.g. unknowns = {`a`: 1, `b`: 2}")
else:
return "broyden_custom"
else:
raise ValueError("`unknowns` is empty! Please provide a dict of keys/values equal to the number of unknowns"
" that need to be solved for.")
def run_consistency_check(cresid, ctol=1e-9, fragile=False):
if cresid > ctol:
if fragile:
raise RuntimeError(f"The target values evaluated for the proposed set of unknowns produce a "
f"maximum residual value of {cresid}, which is greater than the ctol {ctol}.\n"
f" If used, check if HelperBlocks are indeed compatible with the DAG.\n"
f" If this is not an issue, adjust ctol accordingly.")
else:
warnings.warn(f"The target values evaluated for the proposed set of unknowns produce a "
f"maximum residual value of {cresid}, which is greater than the ctol {ctol}.\n"
f" If used, check if HelperBlocks are indeed compatible with the DAG.\n"
f" If this is not an issue, adjust ctol accordingly.")
# Allow targets to be specified in the following formats
# 1) target = {"asset_mkt": 0} or ["asset_mkt"] (the standard case, where the target = 0)
# 2) target = {"r": 0.01} (allowing for the target to be non-zero)
# 3) target = {"K": "A"} (allowing the target to be another variable in potential_args)
def compute_target_values(targets, potential_args):
"""
For a given set of target specifications and potential arguments available, compute the targets.
Called as the return value for the residual function when utilizing the numerical solver.
targets: Refer to `steady_state` function docstring
potential_args: Refer to the `steady_state` function docstring for the "calibration" variable
return: A `float` (if computing a univariate target) or an `np.ndarray` (if using a multivariate target)
"""
target_values = np.empty(len(targets))
for (i, t) in enumerate(targets):
v = targets[t] if isinstance(targets, dict) else 0
if type(v) == str:
target_values[i] = potential_args[t] - potential_args[v]
else:
target_values[i] = potential_args[t] - v
# Univariate solvers require float return values (and not lists)
if len(targets) == 1:
return target_values[0]
else:
return target_values
def compare_steady_states(ss_ref, ss_comp, tol=1e-8, name_map=None, internal=True, check_same_keys=True, verbose=False):
"""Check if two steady state dicts (can be flat dicts or SteadyStateDict objects) are the same up to a tolerance"""
if name_map is None:
name_map = {}
valid = True
# Compare the steady state values present in both ss_ref and ss_comp
if internal:
if not hasattr(ss_ref, "internal") or not hasattr(ss_comp, "internal"):
warnings.warn("The provided steady state dicts do not both have .internal attrs. Will only compare"
" top-level values")
ds_to_check = [(ss_ref, ss_comp, "toplevel")]
else:
ds_to_check = [(ss_ref, ss_comp, "toplevel")] + [(ss_ref.internal[i], ss_comp.internal[i], i + "_internal") for i in ss_ref.internal]
else:
ds_to_check = [(ss_ref, ss_comp, "toplevel")]
for ds in ds_to_check:
d_ref, d_comp, level = ds
for key_ref in d_ref.keys():
if key_ref in d_comp.keys():
key_comp = key_ref
elif key_ref in name_map:
key_comp = name_map[key_ref]
else:
continue
if np.isscalar(d_ref[key_ref]):
resid = abs(d_ref[key_ref] - d_comp[key_comp])
else:
resid = np.linalg.norm(d_ref[key_ref].ravel() - d_comp[key_comp].ravel(), np.inf)
if verbose:
print(f"{key_ref} resid: {resid}")
else:
if not np.all(np.isclose(resid, 0., atol=tol)):
valid = False
# Show the steady state values present in only one of d_ref or d_comp, i.e. if there are missing keys
if check_same_keys:
d_ref_incl_mapped = set(d_ref.keys()) - set(name_map.keys())
d_comp_incl_mapped = set(d_comp.keys()) - set(name_map.values())
diff_keys = d_ref_incl_mapped.symmetric_difference(d_comp_incl_mapped)
if diff_keys:
if verbose:
print(f"At level '{level}', the keys present only one of the two steady state dicts are {diff_keys}")
valid = False
return valid
def solve_for_unknowns(residual, unknowns, solver, solver_kwargs, residual_kwargs=None,
constrained_method="linear_continuation", constrained_kwargs=None,
tol=2e-12, verbose=False):
"""Given a residual function (constructed within steady_state) and a set of bounds or initial values for
the set of unknowns, solve for the root.
residual: `function`
A function to be supplied to a numerical solver that takes unknown values as arguments
and returns computed targets.
unknowns: `dict`
Refer to the `steady_state` function docstring for the "unknowns" variable
targets: `dict`
Refer to the `steady_state` function docstring for the "targets" variable
tol: `float`
The absolute convergence tolerance of the computed target to the desired target value in the numerical solver
solver: `str`
Refer to the `steady_state` function docstring for the "solver" variable
solver_kwargs:
Refer to the `steady_state` function docstring for the "solver_kwargs" variable
return: The root[s] of the residual function as either a scalar (float) or a list of floats
"""
if residual_kwargs is None:
residual_kwargs = {}
scipy_optimize_uni_solvers = ["bisect", "brentq", "brenth", "ridder", "toms748", "newton", "secant", "halley"]
scipy_optimize_multi_solvers = ["hybr", "lm", "broyden1", "broyden2", "anderson", "linearmixing", "diagbroyden",
"excitingmixing", "krylov", "df-sane"]
# Wrap kwargs into the residual function
residual_f = partial(residual, **residual_kwargs)
if solver is None:
raise RuntimeError("Must provide a numerical solver from the following set: brentq, broyden, solved")
elif solver in scipy_optimize_uni_solvers:
initial_values_or_bounds = extract_univariate_initial_values_or_bounds(unknowns)
result = opt.root_scalar(residual_f, method=solver, xtol=tol,
**initial_values_or_bounds, **solver_kwargs)
if not result.converged:
raise ValueError(f"Steady-state solver, {solver}, did not converge.")
unknown_solutions = result.root
elif solver in scipy_optimize_multi_solvers:
initial_values, bounds = extract_multivariate_initial_values_and_bounds(unknowns)
# If no bounds were provided
if not bounds:
result = opt.root(residual_f, initial_values,
method=solver, tol=tol, **solver_kwargs)
else:
constrained_residual = constrained_multivariate_residual(residual_f, bounds, verbose=verbose,
method=constrained_method,
**constrained_kwargs)
result = opt.root(constrained_residual, initial_values,
method=solver, tol=tol, **solver_kwargs)
if not result.success:
raise ValueError(f"Steady-state solver, {solver}, did not converge."
f" The termination status is {result.status}.")
unknown_solutions = list(result.x)
# TODO: Implement a more general interface for custom solvers, so we don't need to add new elifs at this level
# everytime a new custom solver is implemented.
elif solver == "broyden_custom":
initial_values, bounds = extract_multivariate_initial_values_and_bounds(unknowns)
# If no bounds were provided
if not bounds:
unknown_solutions, _ = solvers.broyden_solver(residual_f, initial_values,
tol=tol, verbose=verbose, **solver_kwargs)
else:
constrained_residual = constrained_multivariate_residual(residual_f, bounds, verbose=verbose,
method=constrained_method,
**constrained_kwargs)
unknown_solutions, _ = solvers.broyden_solver(constrained_residual, initial_values,
verbose=verbose, tol=tol, **solver_kwargs)
unknown_solutions = list(unknown_solutions)
elif solver == "newton_custom":
initial_values, bounds = extract_multivariate_initial_values_and_bounds(unknowns)
# If no bounds were provided
if not bounds:
unknown_solutions, _ = solvers.newton_solver(residual_f, initial_values,
tol=tol, verbose=verbose, **solver_kwargs)
else:
constrained_residual = constrained_multivariate_residual(residual_f, bounds, verbose=verbose,
method=constrained_method,
**constrained_kwargs)
unknown_solutions, _ = solvers.newton_solver(constrained_residual, initial_values,
tol=tol, verbose=verbose, **solver_kwargs)
unknown_solutions = list(unknown_solutions)
elif solver == "solved":
# If the model either doesn't require a numerical solution or is being evaluated at a candidate solution
# simply call residual_f once to populate the `ss_values` dict
residual_f(unknowns.values())
unknown_solutions = unknowns.values()
else:
raise RuntimeError(f"steady_state is not yet compatible with {solver}.")
return dict(misc.smart_zip(unknowns.keys(), unknown_solutions))
def extract_univariate_initial_values_or_bounds(unknowns):
val = next(iter(unknowns.values()))
if np.isscalar(val):
return {"x0": val}
else:
return {"bracket": (val[0], val[1])}
def extract_multivariate_initial_values_and_bounds(unknowns, fragile=False):
"""Provided a dict mapping names of unknowns to initial values/bounds, return separate dicts of
the initial values and bounds.
Note: For one-sided bounds, simply put np.inf/-np.inf as the other side of the bounds, so there is
no ambiguity about which is the unconstrained side.
"""
initial_values = []
multi_bounds = {}
for k, v in unknowns.items():
if np.isscalar(v):
initial_values.append(v)
elif len(v) == 2:
if fragile:
raise ValueError(f"{len(v)} is an invalid size for the value of an unknown."
f" the values of `unknowns` must either be a scalar, pertaining to a"
f" single initial value for the root solver to begin from,"
f" a length 2 tuple, pertaining to a lower bound and an upper bound,"
f" or a length 3 tuple, pertaining to a lower bound, initial value, and upper bound.")
else:
warnings.warn("Interpreting values of `unknowns` from length 2 tuple as lower and upper bounds"
" and averaging them to get a scalar initial value to provide to the solver.")
initial_values.append((v[0] + v[1])/2)
elif len(v) == 3:
lb, iv, ub = v
assert lb < iv < ub
initial_values.append(iv)
multi_bounds[k] = (lb, ub)
else:
raise ValueError(f"{len(v)} is an invalid size for the value of an unknown."
f" the values of `unknowns` must either be a scalar, pertaining to a"
f" single initial value for the root solver to begin from,"
f" a length 2 tuple, pertaining to a lower bound and an upper bound,"
f" or a length 3 tuple, pertaining to a lower bound, initial value, and upper bound.")
return np.asarray(initial_values), multi_bounds
def residual_with_linear_continuation(residual, bounds, eval_at_boundary=False,
boundary_epsilon=1e-4, penalty_scale=1e1,
verbose=False):
"""Modify a residual function to implement bounds by an additive penalty for exceeding the boundaries
provided, scaled by the amount the guess exceeds the boundary.
e.g. For residual function f(x), desiring x in (0, 1) (so assuming eval_at_boundary = False)
If the guess for x is 1.1 then we will censor to x_censored = 1 - boundary_epsilon, and return
f(x_censored) + penalty (where the penalty does not require re-evaluating f() which may be costly)
residual: `function`
The function whose roots we want to solve for
bounds: `dict`
A dict mapping the names of the unknowns (`str`) to length two tuples corresponding to the lower and upper
bounds.
eval_at_boundary: `bool`
Whether to allow the residual function to be evaluated at exactly the boundary values or not.
Think of it as whether the solver will treat the bounds as creating a closed or open set for the search space.
boundary_epsilon: `float`
The amount to adjust the proposed guess, x, by to calculate the censored value of the residual function,
when the proposed guess exceeds the boundaries.
penalty_scale: `float`
The linear scaling factor for adjusting the penalty for the proposed unknown values exceeding the boundary.
verbose: `bool`
Whether to print out additional information for how the constrained residual function is behaving during
optimization. Useful for tuning the solver.
"""
lbs = np.asarray([v[0] for v in bounds.values()])
ubs = np.asarray([v[1] for v in bounds.values()])
def constr_residual(x, residual_cache=[]):
"""Implements a constrained residual function, where any attempts to evaluate x outside of the
bounds provided will result in a linear penalty function scaled by `penalty_scale`.
Note: We are purposefully using residual_cache as a mutable default argument to cache the most recent
valid evaluation (maintain state between function calls) of the residual function to induce solvers
to backstep if they encounter a region of the search space that returns nan values.
See Hitchhiker's Guide to Python post on Mutable Default Arguments: "When the Gotcha Isn't a Gotcha"
"""
if eval_at_boundary:
x_censored = np.where(x < lbs, lbs, x)
x_censored = np.where(x > ubs, ubs, x_censored)
else:
x_censored = np.where(x < lbs, lbs + boundary_epsilon, x)
x_censored = np.where(x > ubs, ubs - boundary_epsilon, x_censored)
residual_censored = residual(x_censored)
if verbose:
print(f"Attempted x is {x}")
print(f"Censored x is {x_censored}")
print(f"The residual_censored is {residual_censored}")
if np.any(np.isnan(residual_censored)):
# Provide a scaled penalty to the solver when trying to evaluate residual() in an undefined region
residual_censored = residual_cache[0] * penalty_scale
if verbose:
print(f"The new residual_censored is {residual_censored}")
else:
if not residual_cache:
residual_cache.append(residual_censored)
else:
residual_cache[0] = residual_censored
if verbose:
print(f"The residual_cache is {residual_cache[0]}")
# Provide an additive, scaled penalty to the solver when trying to evaluate residual() outside of the boundary
residual_with_boundary_penalty = residual_censored + \
(x - x_censored) * penalty_scale * residual_censored
return residual_with_boundary_penalty
return constr_residual
def constrained_multivariate_residual(residual, bounds, method="linear_continuation", verbose=False,
**constrained_kwargs):
"""Return a constrained version of the residual function, which accounts for bounds, using the specified method.
See the docstring of the specific method of interest for further details."""
if method == "linear_continuation":
return residual_with_linear_continuation(residual, bounds, verbose=verbose, **constrained_kwargs)
# TODO: Implement logistic transform as another option for constrained multivariate residual
else:
raise ValueError(f"Method {method} for constrained multivariate root-finding has not yet been implemented.")
|
nilq/baby-python
|
python
|
from django.db import models
from .book import Book
from .language import Language
class BookLanguage(models.Model):
id = models.AutoField(
primary_key=True,
editable=False)
book = models.ForeignKey(
Book,
db_column='book_id',
blank=False, null=False,
on_delete=models.PROTECT)
language = models.ForeignKey(
Language,
db_column='language_id',
blank=False, null=False,
on_delete=models.PROTECT
)
name = models.CharField(
max_length=100,
blank=False, null=False)
abreviation = models.CharField(
max_length=100,
blank=True, null=True)
class Meta:
verbose_name = 'Book language'
verbose_name_plural = 'Book language'
db_table = 'believe_book_lang'
def __str__(self):
return self.name
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.